ngram
listlengths 0
67.8k
|
|---|
[] |
[
"('google_plus_url', models.URLField(null=True, verbose_name=b'google plus url of candidate campaign', blank=True)), ('youtube_url', models.URLField(null=True, verbose_name=b'youtube url",
"models.CharField(max_length=254, null=True, verbose_name=b'we vote contest office id', blank=True)), ('number_voting_for', models.CharField(max_length=254, null=True, verbose_name=b'google civic",
"campaign id', blank=True)), ('we_vote_politician_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote politician id', blank=True)), ('candidate_url', models.URLField(null=True,",
"be elected', blank=True)), ('contest_level0', models.CharField(max_length=254, null=True, verbose_name=b'google civic level, option 0', blank=True)), ('contest_level1',",
"auto_created=True, primary_key=True)), ('office', models.CharField(max_length=254, verbose_name=b'google civic office')), ('google_civic_election_id', models.CharField(max_length=254, null=True, verbose_name=b'google civic election",
"candidate campaign email', blank=True)), ('phone', models.CharField(max_length=254, null=True, verbose_name=b'google civic candidate campaign email', blank=True)),",
"email', blank=True)), ('was_processed', models.BooleanField(default=False, verbose_name=b'is primary election')), ], ), migrations.CreateModel( name='GoogleCivicContestOffice', fields=[ ('id',",
"candidates who will be elected', blank=True)), ('contest_level0', models.CharField(max_length=254, null=True, verbose_name=b'google civic level, option",
"auto_created=True, primary_key=True)), ('name', models.CharField(max_length=254, verbose_name=b'google civic candidate name')), ('party', models.CharField(max_length=254, null=True, verbose_name=b'google civic",
"verbose_name=b'google civic number of candidates who will be elected', blank=True)), ('contest_level0', models.CharField(max_length=254, null=True,",
"null=True, verbose_name=b'google civic referendum details url')), ('google_civic_election_id', models.CharField(max_length=254, verbose_name=b'google civic election id')), ('we_vote_election_id',",
"null=True, verbose_name=b'google civic photoUrl', blank=True)), ('order_on_ballot', models.CharField(max_length=254, null=True, verbose_name=b'google civic order on ballot',",
"verbose_name=b'youtube url of candidate campaign', blank=True)), ('email', models.CharField(max_length=254, null=True, verbose_name=b'google civic candidate campaign",
"campaign email', blank=True)), ('phone', models.CharField(max_length=254, null=True, verbose_name=b'google civic candidate campaign email', blank=True)), ('was_processed',",
"models.CharField(max_length=254, null=True, verbose_name=b'google civic primary party', blank=True)), ('was_processed', models.BooleanField(default=False, verbose_name=b'is primary election')), ],",
"('was_processed', models.BooleanField(default=False, verbose_name=b'is primary election')), ], ), migrations.CreateModel( name='GoogleCivicContestOffice', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False,",
"blank=True)), ('contest_level1', models.CharField(max_length=254, null=True, verbose_name=b'google civic level, option 1', blank=True)), ('contest_level2', models.CharField(max_length=254, null=True,",
"id')), ('we_vote_election_id', models.CharField(max_length=20, unique=True, null=True, verbose_name=b'we vote election id', blank=True)), ('name', models.CharField(max_length=254, verbose_name=b'google",
"null=True, verbose_name=b'google civic primary party', blank=True)), ('district_name', models.CharField(max_length=254, verbose_name=b'google civic district name')), ('district_scope',",
"email', blank=True)), ('phone', models.CharField(max_length=254, null=True, verbose_name=b'google civic candidate campaign email', blank=True)), ('was_processed', models.BooleanField(default=False,",
"photoUrl', blank=True)), ('order_on_ballot', models.CharField(max_length=254, null=True, verbose_name=b'google civic order on ballot', blank=True)), ('google_civic_contest_office_id', models.CharField(max_length=254,",
"candidates to vote for', blank=True)), ('number_elected', models.CharField(max_length=254, null=True, verbose_name=b'google civic number of candidates",
"serialize=False, auto_created=True, primary_key=True)), ('google_civic_election_id', models.CharField(unique=True, max_length=20, verbose_name=b'google civic election id')), ('we_vote_election_id', models.CharField(max_length=20, unique=True,",
"('contest_level1', models.CharField(max_length=254, null=True, verbose_name=b'google civic level, option 1', blank=True)), ('contest_level2', models.CharField(max_length=254, null=True, verbose_name=b'google",
"verbose_name=b'google civic internal temp contest_office_id id')), ('we_vote_contest_office_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote contest_office_id id',",
"id', blank=True)), ('we_vote_candidate_campaign_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote candidate campaign id', blank=True)), ('we_vote_politician_id', models.CharField(max_length=254,",
"vote contest_office_id id', blank=True)), ('google_civic_election_id', models.CharField(max_length=254, verbose_name=b'google election id')), ('we_vote_election_id', models.CharField(max_length=254, null=True, verbose_name=b'we",
"verbose_name=b'google civic photoUrl', blank=True)), ('order_on_ballot', models.CharField(max_length=254, null=True, verbose_name=b'google civic order on ballot', blank=True)),",
"models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('google_civic_election_id', models.CharField(unique=True, max_length=20, verbose_name=b'google civic election id')), ('we_vote_election_id', models.CharField(max_length=20,",
"-*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations",
"to vote for', blank=True)), ('number_elected', models.CharField(max_length=254, null=True, verbose_name=b'google civic number of candidates who",
"number of candidates to vote for', blank=True)), ('number_elected', models.CharField(max_length=254, null=True, verbose_name=b'google civic number",
"id', blank=True)), ('we_vote_politician_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote politician id', blank=True)), ('candidate_url', models.URLField(null=True, verbose_name=b'website",
"civic primary party', blank=True)), ('special', models.CharField(max_length=254, null=True, verbose_name=b'google civic primary party', blank=True)), ('was_processed',",
"verbose_name=b'google civic ballot placement', blank=True)), ('primary_party', models.CharField(max_length=254, null=True, verbose_name=b'google civic primary party', blank=True)),",
"fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('referendum_title', models.CharField(max_length=254, verbose_name=b'google civic referendum title')), ('referendum_subtitle',",
"campaign', blank=True)), ('youtube_url', models.URLField(null=True, verbose_name=b'youtube url of candidate campaign', blank=True)), ('email', models.CharField(max_length=254, null=True,",
"vote election id', blank=True)), ('we_vote_contest_office_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote contest office id', blank=True)),",
"url of candidate campaign', blank=True)), ('google_plus_url', models.URLField(null=True, verbose_name=b'google plus url of candidate campaign',",
"models.CharField(max_length=254, verbose_name=b'google civic election name')), ('election_day', models.CharField(max_length=254, verbose_name=b'google civic election day')), ('was_processed', models.BooleanField(default=False,",
"url of candidate campaign', blank=True)), ('email', models.CharField(max_length=254, null=True, verbose_name=b'google civic candidate campaign email',",
"primary election')), ], ), migrations.CreateModel( name='GoogleCivicContestOffice', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('office',",
"verbose_name=b'is primary election')), ], ), migrations.CreateModel( name='GoogleCivicElection', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),",
"temp contest_office_id id')), ('we_vote_contest_office_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote contest_office_id id', blank=True)), ('google_civic_election_id', models.CharField(max_length=254,",
"primary party', blank=True)), ('was_processed', models.BooleanField(default=False, verbose_name=b'is primary election')), ], ), migrations.CreateModel( name='GoogleCivicContestReferendum', fields=[",
"verbose_name=b'is primary election')), ], ), migrations.CreateModel( name='GoogleCivicContestReferendum', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),",
"verbose_name=b'google civic district name')), ('district_scope', models.CharField(max_length=254, verbose_name=b'google civic district scope')), ('district_ocd_id', models.CharField(max_length=254, verbose_name=b'google",
"id')), ('we_vote_contest_office_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote contest_office_id id', blank=True)), ('google_civic_election_id', models.CharField(max_length=254, verbose_name=b'google election",
"candidate campaign id', blank=True)), ('we_vote_politician_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote politician id', blank=True)), ('candidate_url',",
"of candidate campaign', blank=True)), ('youtube_url', models.URLField(null=True, verbose_name=b'youtube url of candidate campaign', blank=True)), ('email',",
"level, option 2', blank=True)), ('ballot_placement', models.CharField(max_length=254, null=True, verbose_name=b'google civic ballot placement', blank=True)), ('primary_party',",
"('district_ocd_id', models.CharField(max_length=254, verbose_name=b'google civic district ocd id')), ('electorate_specifications', models.CharField(max_length=254, null=True, verbose_name=b'google civic primary",
"utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration):",
"null=True, verbose_name=b'google civic level, option 0', blank=True)), ('contest_level1', models.CharField(max_length=254, null=True, verbose_name=b'google civic level,",
"migrations.CreateModel( name='GoogleCivicContestReferendum', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('referendum_title', models.CharField(max_length=254, verbose_name=b'google civic referendum",
"primary_key=True)), ('google_civic_election_id', models.CharField(unique=True, max_length=20, verbose_name=b'google civic election id')), ('we_vote_election_id', models.CharField(max_length=20, unique=True, null=True, verbose_name=b'we",
"blank=True)), ('email', models.CharField(max_length=254, null=True, verbose_name=b'google civic candidate campaign email', blank=True)), ('phone', models.CharField(max_length=254, null=True,",
"option 2', blank=True)), ('ballot_placement', models.CharField(max_length=254, null=True, verbose_name=b'google civic ballot placement', blank=True)), ('primary_party', models.CharField(max_length=254,",
"contest_office_id id', blank=True)), ('google_civic_election_id', models.CharField(max_length=254, verbose_name=b'google election id')), ('we_vote_election_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote",
"('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('referendum_title', models.CharField(max_length=254, verbose_name=b'google civic referendum title')), ('referendum_subtitle', models.CharField(max_length=254,",
"serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=254, verbose_name=b'google civic candidate name')), ('party', models.CharField(max_length=254, null=True, verbose_name=b'google",
"verbose_name=b'we vote contest office id', blank=True)), ('number_voting_for', models.CharField(max_length=254, null=True, verbose_name=b'google civic number of",
"], ), migrations.CreateModel( name='GoogleCivicElection', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('google_civic_election_id', models.CharField(unique=True, max_length=20,",
"name')), ('party', models.CharField(max_length=254, null=True, verbose_name=b'google civic party', blank=True)), ('photo_url', models.CharField(max_length=254, null=True, verbose_name=b'google civic",
"serialize=False, auto_created=True, primary_key=True)), ('office', models.CharField(max_length=254, verbose_name=b'google civic office')), ('google_civic_election_id', models.CharField(max_length=254, null=True, verbose_name=b'google civic",
"campaign', blank=True)), ('facebook_url', models.URLField(null=True, verbose_name=b'facebook url of candidate campaign', blank=True)), ('twitter_url', models.URLField(null=True, verbose_name=b'twitter",
"party', blank=True)), ('special', models.CharField(max_length=254, null=True, verbose_name=b'google civic primary party', blank=True)), ('was_processed', models.BooleanField(default=False, verbose_name=b'is",
"verbose_name=b'google civic referendum title')), ('referendum_subtitle', models.CharField(max_length=254, verbose_name=b'google civic referendum subtitle')), ('referendum_url', models.CharField(max_length=254, null=True,",
"blank=True)), ('order_on_ballot', models.CharField(max_length=254, null=True, verbose_name=b'google civic order on ballot', blank=True)), ('google_civic_contest_office_id', models.CharField(max_length=254, verbose_name=b'google",
"civic election name')), ('election_day', models.CharField(max_length=254, verbose_name=b'google civic election day')), ('was_processed', models.BooleanField(default=False, verbose_name=b'is primary",
"candidate campaign', blank=True)), ('facebook_url', models.URLField(null=True, verbose_name=b'facebook url of candidate campaign', blank=True)), ('twitter_url', models.URLField(null=True,",
"null=True, verbose_name=b'google civic level, option 1', blank=True)), ('contest_level2', models.CharField(max_length=254, null=True, verbose_name=b'google civic level,",
"models.CharField(max_length=254, null=True, verbose_name=b'google civic number of candidates to vote for', blank=True)), ('number_elected', models.CharField(max_length=254,",
"election id', blank=True)), ('ballot_placement', models.CharField(max_length=254, null=True, verbose_name=b'google civic ballot placement', blank=True)), ('primary_party', models.CharField(max_length=254,",
"election id', blank=True)), ('we_vote_election_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote election id', blank=True)), ('we_vote_contest_office_id', models.CharField(max_length=254,",
"civic candidate campaign email', blank=True)), ('was_processed', models.BooleanField(default=False, verbose_name=b'is primary election')), ], ), migrations.CreateModel(",
"id')), ('we_vote_election_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote election id', blank=True)), ('we_vote_candidate_campaign_id', models.CharField(max_length=254, null=True, verbose_name=b'we",
"('was_processed', models.BooleanField(default=False, verbose_name=b'is primary election')), ], ), migrations.CreateModel( name='GoogleCivicElection', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False,",
"[ ] operations = [ migrations.CreateModel( name='GoogleCivicCandidateCampaign', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),",
"district ocd id')), ('electorate_specifications', models.CharField(max_length=254, null=True, verbose_name=b'google civic primary party', blank=True)), ('special', models.CharField(max_length=254,",
"name='GoogleCivicContestReferendum', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('referendum_title', models.CharField(max_length=254, verbose_name=b'google civic referendum title')),",
"('photo_url', models.CharField(max_length=254, null=True, verbose_name=b'google civic photoUrl', blank=True)), ('order_on_ballot', models.CharField(max_length=254, null=True, verbose_name=b'google civic order",
"blank=True)), ('primary_party', models.CharField(max_length=254, null=True, verbose_name=b'google civic primary party', blank=True)), ('district_name', models.CharField(max_length=254, verbose_name=b'google civic",
"models.CharField(max_length=254, verbose_name=b'google civic district name')), ('district_scope', models.CharField(max_length=254, verbose_name=b'google civic district scope')), ('district_ocd_id', models.CharField(max_length=254,",
"verbose_name=b'google election id')), ('we_vote_election_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote election id', blank=True)), ('we_vote_candidate_campaign_id', models.CharField(max_length=254,",
"('name', models.CharField(max_length=254, verbose_name=b'google civic candidate name')), ('party', models.CharField(max_length=254, null=True, verbose_name=b'google civic party', blank=True)),",
"null=True, verbose_name=b'google civic number of candidates to vote for', blank=True)), ('number_elected', models.CharField(max_length=254, null=True,",
"models.CharField(max_length=254, null=True, verbose_name=b'google civic ballot placement', blank=True)), ('primary_party', models.CharField(max_length=254, null=True, verbose_name=b'google civic primary",
"civic party', blank=True)), ('photo_url', models.CharField(max_length=254, null=True, verbose_name=b'google civic photoUrl', blank=True)), ('order_on_ballot', models.CharField(max_length=254, null=True,",
"null=True, verbose_name=b'google civic order on ballot', blank=True)), ('google_civic_contest_office_id', models.CharField(max_length=254, verbose_name=b'google civic internal temp",
"class Migration(migrations.Migration): dependencies = [ ] operations = [ migrations.CreateModel( name='GoogleCivicCandidateCampaign', fields=[ ('id',",
"verbose_name=b'google civic primary party', blank=True)), ('was_processed', models.BooleanField(default=False, verbose_name=b'is primary election')), ], ), migrations.CreateModel(",
"models.CharField(max_length=254, null=True, verbose_name=b'google civic level, option 2', blank=True)), ('ballot_placement', models.CharField(max_length=254, null=True, verbose_name=b'google civic",
"models.CharField(max_length=254, verbose_name=b'google election id')), ('we_vote_election_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote election id', blank=True)), ('we_vote_candidate_campaign_id',",
"models.CharField(max_length=254, null=True, verbose_name=b'we vote candidate campaign id', blank=True)), ('we_vote_politician_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote",
"party', blank=True)), ('was_processed', models.BooleanField(default=False, verbose_name=b'is primary election')), ], ), migrations.CreateModel( name='GoogleCivicContestReferendum', fields=[ ('id',",
"models.CharField(max_length=254, null=True, verbose_name=b'google civic referendum details url')), ('google_civic_election_id', models.CharField(max_length=254, verbose_name=b'google civic election id')),",
"= [ migrations.CreateModel( name='GoogleCivicCandidateCampaign', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=254, verbose_name=b'google",
"models, migrations class Migration(migrations.Migration): dependencies = [ ] operations = [ migrations.CreateModel( name='GoogleCivicCandidateCampaign',",
"elected', blank=True)), ('contest_level0', models.CharField(max_length=254, null=True, verbose_name=b'google civic level, option 0', blank=True)), ('contest_level1', models.CharField(max_length=254,",
"election id', blank=True)), ('we_vote_candidate_campaign_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote candidate campaign id', blank=True)), ('we_vote_politician_id',",
"('twitter_url', models.URLField(null=True, verbose_name=b'twitter url of candidate campaign', blank=True)), ('google_plus_url', models.URLField(null=True, verbose_name=b'google plus url",
"option 1', blank=True)), ('contest_level2', models.CharField(max_length=254, null=True, verbose_name=b'google civic level, option 2', blank=True)), ('ballot_placement',",
"vote election id', blank=True)), ('ballot_placement', models.CharField(max_length=254, null=True, verbose_name=b'google civic ballot placement', blank=True)), ('primary_party',",
"civic candidate campaign email', blank=True)), ('phone', models.CharField(max_length=254, null=True, verbose_name=b'google civic candidate campaign email',",
"verbose_name=b'google civic primary party', blank=True)), ('district_name', models.CharField(max_length=254, verbose_name=b'google civic district name')), ('district_scope', models.CharField(max_length=254,",
"('we_vote_politician_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote politician id', blank=True)), ('candidate_url', models.URLField(null=True, verbose_name=b'website url of",
"election id')), ('we_vote_election_id', models.CharField(max_length=20, unique=True, null=True, verbose_name=b'we vote election id', blank=True)), ('name', models.CharField(max_length=254,",
"civic number of candidates who will be elected', blank=True)), ('contest_level0', models.CharField(max_length=254, null=True, verbose_name=b'google",
"ocd id')), ('electorate_specifications', models.CharField(max_length=254, null=True, verbose_name=b'google civic primary party', blank=True)), ('special', models.CharField(max_length=254, null=True,",
"civic primary party', blank=True)), ('district_name', models.CharField(max_length=254, verbose_name=b'google civic district name')), ('district_scope', models.CharField(max_length=254, verbose_name=b'google",
"models.CharField(max_length=254, verbose_name=b'google civic office')), ('google_civic_election_id', models.CharField(max_length=254, null=True, verbose_name=b'google civic election id', blank=True)), ('we_vote_election_id',",
"verbose_name=b'google civic election name')), ('election_day', models.CharField(max_length=254, verbose_name=b'google civic election day')), ('was_processed', models.BooleanField(default=False, verbose_name=b'is",
"models.CharField(max_length=254, null=True, verbose_name=b'google civic party', blank=True)), ('photo_url', models.CharField(max_length=254, null=True, verbose_name=b'google civic photoUrl', blank=True)),",
"), migrations.CreateModel( name='GoogleCivicElection', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('google_civic_election_id', models.CharField(unique=True, max_length=20, verbose_name=b'google",
"dependencies = [ ] operations = [ migrations.CreateModel( name='GoogleCivicCandidateCampaign', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False,",
"models.URLField(null=True, verbose_name=b'twitter url of candidate campaign', blank=True)), ('google_plus_url', models.URLField(null=True, verbose_name=b'google plus url of",
"blank=True)), ('google_plus_url', models.URLField(null=True, verbose_name=b'google plus url of candidate campaign', blank=True)), ('youtube_url', models.URLField(null=True, verbose_name=b'youtube",
"models.CharField(max_length=254, null=True, verbose_name=b'we vote election id', blank=True)), ('we_vote_candidate_campaign_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote candidate",
"of candidate campaign', blank=True)), ('twitter_url', models.URLField(null=True, verbose_name=b'twitter url of candidate campaign', blank=True)), ('google_plus_url',",
"blank=True)), ('we_vote_election_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote election id', blank=True)), ('we_vote_contest_office_id', models.CharField(max_length=254, null=True, verbose_name=b'we",
"civic district scope')), ('district_ocd_id', models.CharField(max_length=254, verbose_name=b'google civic district ocd id')), ('electorate_specifications', models.CharField(max_length=254, null=True,",
"null=True, verbose_name=b'google civic candidate campaign email', blank=True)), ('was_processed', models.BooleanField(default=False, verbose_name=b'is primary election')), ],",
"blank=True)), ('facebook_url', models.URLField(null=True, verbose_name=b'facebook url of candidate campaign', blank=True)), ('twitter_url', models.URLField(null=True, verbose_name=b'twitter url",
"verbose_name=b'we vote election id', blank=True)), ('ballot_placement', models.CharField(max_length=254, null=True, verbose_name=b'google civic ballot placement', blank=True)),",
"('electorate_specifications', models.CharField(max_length=254, null=True, verbose_name=b'google civic primary party', blank=True)), ('special', models.CharField(max_length=254, null=True, verbose_name=b'google civic",
"verbose_name=b'google civic district scope')), ('district_ocd_id', models.CharField(max_length=254, verbose_name=b'google civic district ocd id')), ('electorate_specifications', models.CharField(max_length=254,",
"primary party', blank=True)), ('special', models.CharField(max_length=254, null=True, verbose_name=b'google civic primary party', blank=True)), ('was_processed', models.BooleanField(default=False,",
"blank=True)), ('google_civic_election_id', models.CharField(max_length=254, verbose_name=b'google election id')), ('we_vote_election_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote election id',",
"civic district name')), ('district_scope', models.CharField(max_length=254, verbose_name=b'google civic district scope')), ('district_ocd_id', models.CharField(max_length=254, verbose_name=b'google civic",
"verbose_name=b'google civic level, option 0', blank=True)), ('contest_level1', models.CharField(max_length=254, null=True, verbose_name=b'google civic level, option",
"verbose_name=b'we vote contest_office_id id', blank=True)), ('google_civic_election_id', models.CharField(max_length=254, verbose_name=b'google election id')), ('we_vote_election_id', models.CharField(max_length=254, null=True,",
"blank=True)), ('youtube_url', models.URLField(null=True, verbose_name=b'youtube url of candidate campaign', blank=True)), ('email', models.CharField(max_length=254, null=True, verbose_name=b'google",
"election')), ], ), migrations.CreateModel( name='GoogleCivicContestOffice', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('office', models.CharField(max_length=254,",
"import models, migrations class Migration(migrations.Migration): dependencies = [ ] operations = [ migrations.CreateModel(",
"('district_name', models.CharField(max_length=254, verbose_name=b'google civic district name')), ('district_scope', models.CharField(max_length=254, verbose_name=b'google civic district scope')), ('district_ocd_id',",
"models.CharField(max_length=254, verbose_name=b'google civic district ocd id')), ('electorate_specifications', models.CharField(max_length=254, null=True, verbose_name=b'google civic primary party',",
"url of candidate campaign', blank=True)), ('facebook_url', models.URLField(null=True, verbose_name=b'facebook url of candidate campaign', blank=True)),",
"election')), ], ), migrations.CreateModel( name='GoogleCivicContestReferendum', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('referendum_title', models.CharField(max_length=254,",
"blank=True)), ('district_name', models.CharField(max_length=254, verbose_name=b'google civic district name')), ('district_scope', models.CharField(max_length=254, verbose_name=b'google civic district scope')),",
"auto_created=True, primary_key=True)), ('google_civic_election_id', models.CharField(unique=True, max_length=20, verbose_name=b'google civic election id')), ('we_vote_election_id', models.CharField(max_length=20, unique=True, null=True,",
"id', blank=True)), ('name', models.CharField(max_length=254, verbose_name=b'google civic election name')), ('election_day', models.CharField(max_length=254, verbose_name=b'google civic election",
"models.BooleanField(default=False, verbose_name=b'is primary election')), ], ), migrations.CreateModel( name='GoogleCivicContestReferendum', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True,",
"url')), ('google_civic_election_id', models.CharField(max_length=254, verbose_name=b'google civic election id')), ('we_vote_election_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote election",
"null=True, verbose_name=b'google civic ballot placement', blank=True)), ('primary_party', models.CharField(max_length=254, null=True, verbose_name=b'google civic primary party',",
"candidate campaign', blank=True)), ('email', models.CharField(max_length=254, null=True, verbose_name=b'google civic candidate campaign email', blank=True)), ('phone',",
"civic candidate name')), ('party', models.CharField(max_length=254, null=True, verbose_name=b'google civic party', blank=True)), ('photo_url', models.CharField(max_length=254, null=True,",
"('referendum_subtitle', models.CharField(max_length=254, verbose_name=b'google civic referendum subtitle')), ('referendum_url', models.CharField(max_length=254, null=True, verbose_name=b'google civic referendum details",
"name='GoogleCivicContestOffice', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('office', models.CharField(max_length=254, verbose_name=b'google civic office')), ('google_civic_election_id',",
"of candidate campaign', blank=True)), ('email', models.CharField(max_length=254, null=True, verbose_name=b'google civic candidate campaign email', blank=True)),",
"blank=True)), ('ballot_placement', models.CharField(max_length=254, null=True, verbose_name=b'google civic ballot placement', blank=True)), ('primary_party', models.CharField(max_length=254, null=True, verbose_name=b'google",
"verbose_name=b'website url of candidate campaign', blank=True)), ('facebook_url', models.URLField(null=True, verbose_name=b'facebook url of candidate campaign',",
"verbose_name=b'google civic level, option 2', blank=True)), ('ballot_placement', models.CharField(max_length=254, null=True, verbose_name=b'google civic ballot placement',",
"details url')), ('google_civic_election_id', models.CharField(max_length=254, verbose_name=b'google civic election id')), ('we_vote_election_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote",
"models.BooleanField(default=False, verbose_name=b'is primary election')), ], ), migrations.CreateModel( name='GoogleCivicContestOffice', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True,",
"will be elected', blank=True)), ('contest_level0', models.CharField(max_length=254, null=True, verbose_name=b'google civic level, option 0', blank=True)),",
"('office', models.CharField(max_length=254, verbose_name=b'google civic office')), ('google_civic_election_id', models.CharField(max_length=254, null=True, verbose_name=b'google civic election id', blank=True)),",
"null=True, verbose_name=b'google civic primary party', blank=True)), ('special', models.CharField(max_length=254, null=True, verbose_name=b'google civic primary party',",
"models.CharField(max_length=254, null=True, verbose_name=b'google civic order on ballot', blank=True)), ('google_civic_contest_office_id', models.CharField(max_length=254, verbose_name=b'google civic internal",
"verbose_name=b'google civic primary party', blank=True)), ('special', models.CharField(max_length=254, null=True, verbose_name=b'google civic primary party', blank=True)),",
"civic referendum subtitle')), ('referendum_url', models.CharField(max_length=254, null=True, verbose_name=b'google civic referendum details url')), ('google_civic_election_id', models.CharField(max_length=254,",
"vote election id', blank=True)), ('name', models.CharField(max_length=254, verbose_name=b'google civic election name')), ('election_day', models.CharField(max_length=254, verbose_name=b'google",
"of candidates who will be elected', blank=True)), ('contest_level0', models.CharField(max_length=254, null=True, verbose_name=b'google civic level,",
"civic level, option 2', blank=True)), ('ballot_placement', models.CharField(max_length=254, null=True, verbose_name=b'google civic ballot placement', blank=True)),",
"], ), migrations.CreateModel( name='GoogleCivicContestOffice', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('office', models.CharField(max_length=254, verbose_name=b'google",
"primary election')), ], ), migrations.CreateModel( name='GoogleCivicElection', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('google_civic_election_id',",
"subtitle')), ('referendum_url', models.CharField(max_length=254, null=True, verbose_name=b'google civic referendum details url')), ('google_civic_election_id', models.CharField(max_length=254, verbose_name=b'google civic",
"name')), ('district_scope', models.CharField(max_length=254, verbose_name=b'google civic district scope')), ('district_ocd_id', models.CharField(max_length=254, verbose_name=b'google civic district ocd",
"civic photoUrl', blank=True)), ('order_on_ballot', models.CharField(max_length=254, null=True, verbose_name=b'google civic order on ballot', blank=True)), ('google_civic_contest_office_id',",
"null=True, verbose_name=b'we vote election id', blank=True)), ('ballot_placement', models.CharField(max_length=254, null=True, verbose_name=b'google civic ballot placement',",
"null=True, verbose_name=b'we vote candidate campaign id', blank=True)), ('we_vote_politician_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote politician",
"election name')), ('election_day', models.CharField(max_length=254, verbose_name=b'google civic election day')), ('was_processed', models.BooleanField(default=False, verbose_name=b'is primary election')),",
"null=True, verbose_name=b'google civic election id', blank=True)), ('we_vote_election_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote election id',",
"null=True, verbose_name=b'google civic party', blank=True)), ('photo_url', models.CharField(max_length=254, null=True, verbose_name=b'google civic photoUrl', blank=True)), ('order_on_ballot',",
"civic level, option 0', blank=True)), ('contest_level1', models.CharField(max_length=254, null=True, verbose_name=b'google civic level, option 1',",
"('google_civic_election_id', models.CharField(max_length=254, null=True, verbose_name=b'google civic election id', blank=True)), ('we_vote_election_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote",
"models.CharField(max_length=254, verbose_name=b'google civic referendum subtitle')), ('referendum_url', models.CharField(max_length=254, null=True, verbose_name=b'google civic referendum details url')),",
"verbose_name=b'google civic candidate campaign email', blank=True)), ('was_processed', models.BooleanField(default=False, verbose_name=b'is primary election')), ], ),",
"blank=True)), ('number_voting_for', models.CharField(max_length=254, null=True, verbose_name=b'google civic number of candidates to vote for', blank=True)),",
"null=True, verbose_name=b'we vote election id', blank=True)), ('name', models.CharField(max_length=254, verbose_name=b'google civic election name')), ('election_day',",
"models.CharField(max_length=254, null=True, verbose_name=b'google civic candidate campaign email', blank=True)), ('was_processed', models.BooleanField(default=False, verbose_name=b'is primary election')),",
"ballot', blank=True)), ('google_civic_contest_office_id', models.CharField(max_length=254, verbose_name=b'google civic internal temp contest_office_id id')), ('we_vote_contest_office_id', models.CharField(max_length=254, null=True,",
"primary party', blank=True)), ('was_processed', models.BooleanField(default=False, verbose_name=b'is primary election')), ], ), migrations.CreateModel( name='GoogleCivicElection', fields=[",
"verbose_name=b'is primary election')), ], ), migrations.CreateModel( name='GoogleCivicContestOffice', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),",
"models.CharField(max_length=254, verbose_name=b'google civic election day')), ('was_processed', models.BooleanField(default=False, verbose_name=b'is primary election')), ], ), ]",
"null=True, verbose_name=b'we vote contest_office_id id', blank=True)), ('google_civic_election_id', models.CharField(max_length=254, verbose_name=b'google election id')), ('we_vote_election_id', models.CharField(max_length=254,",
"models.CharField(max_length=254, null=True, verbose_name=b'google civic election id', blank=True)), ('we_vote_election_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote election",
"url of candidate campaign', blank=True)), ('twitter_url', models.URLField(null=True, verbose_name=b'twitter url of candidate campaign', blank=True)),",
"district scope')), ('district_ocd_id', models.CharField(max_length=254, verbose_name=b'google civic district ocd id')), ('electorate_specifications', models.CharField(max_length=254, null=True, verbose_name=b'google",
"('special', models.CharField(max_length=254, null=True, verbose_name=b'google civic primary party', blank=True)), ('was_processed', models.BooleanField(default=False, verbose_name=b'is primary election')),",
"verbose_name=b'google civic office')), ('google_civic_election_id', models.CharField(max_length=254, null=True, verbose_name=b'google civic election id', blank=True)), ('we_vote_election_id', models.CharField(max_length=254,",
"contest office id', blank=True)), ('number_voting_for', models.CharField(max_length=254, null=True, verbose_name=b'google civic number of candidates to",
"serialize=False, auto_created=True, primary_key=True)), ('referendum_title', models.CharField(max_length=254, verbose_name=b'google civic referendum title')), ('referendum_subtitle', models.CharField(max_length=254, verbose_name=b'google civic",
"office id', blank=True)), ('number_voting_for', models.CharField(max_length=254, null=True, verbose_name=b'google civic number of candidates to vote",
"('was_processed', models.BooleanField(default=False, verbose_name=b'is primary election')), ], ), migrations.CreateModel( name='GoogleCivicContestReferendum', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False,",
"migrations class Migration(migrations.Migration): dependencies = [ ] operations = [ migrations.CreateModel( name='GoogleCivicCandidateCampaign', fields=[",
"placement', blank=True)), ('primary_party', models.CharField(max_length=254, null=True, verbose_name=b'google civic primary party', blank=True)), ('district_name', models.CharField(max_length=254, verbose_name=b'google",
"= [ ] operations = [ migrations.CreateModel( name='GoogleCivicCandidateCampaign', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True,",
"verbose_name=b'google plus url of candidate campaign', blank=True)), ('youtube_url', models.URLField(null=True, verbose_name=b'youtube url of candidate",
"primary_key=True)), ('referendum_title', models.CharField(max_length=254, verbose_name=b'google civic referendum title')), ('referendum_subtitle', models.CharField(max_length=254, verbose_name=b'google civic referendum subtitle')),",
"verbose_name=b'google civic election id', blank=True)), ('we_vote_election_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote election id', blank=True)),",
"level, option 1', blank=True)), ('contest_level2', models.CharField(max_length=254, null=True, verbose_name=b'google civic level, option 2', blank=True)),",
"civic level, option 1', blank=True)), ('contest_level2', models.CharField(max_length=254, null=True, verbose_name=b'google civic level, option 2',",
"of candidate campaign', blank=True)), ('google_plus_url', models.URLField(null=True, verbose_name=b'google plus url of candidate campaign', blank=True)),",
"models.URLField(null=True, verbose_name=b'google plus url of candidate campaign', blank=True)), ('youtube_url', models.URLField(null=True, verbose_name=b'youtube url of",
"id', blank=True)), ('candidate_url', models.URLField(null=True, verbose_name=b'website url of candidate campaign', blank=True)), ('facebook_url', models.URLField(null=True, verbose_name=b'facebook",
"('phone', models.CharField(max_length=254, null=True, verbose_name=b'google civic candidate campaign email', blank=True)), ('was_processed', models.BooleanField(default=False, verbose_name=b'is primary",
"migrations.CreateModel( name='GoogleCivicElection', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('google_civic_election_id', models.CharField(unique=True, max_length=20, verbose_name=b'google civic",
"('order_on_ballot', models.CharField(max_length=254, null=True, verbose_name=b'google civic order on ballot', blank=True)), ('google_civic_contest_office_id', models.CharField(max_length=254, verbose_name=b'google civic",
"id', blank=True)), ('we_vote_contest_office_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote contest office id', blank=True)), ('number_voting_for', models.CharField(max_length=254,",
"], ), migrations.CreateModel( name='GoogleCivicContestReferendum', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('referendum_title', models.CharField(max_length=254, verbose_name=b'google",
"('google_civic_election_id', models.CharField(unique=True, max_length=20, verbose_name=b'google civic election id')), ('we_vote_election_id', models.CharField(max_length=20, unique=True, null=True, verbose_name=b'we vote",
"), migrations.CreateModel( name='GoogleCivicContestReferendum', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('referendum_title', models.CharField(max_length=254, verbose_name=b'google civic",
"blank=True)), ('we_vote_politician_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote politician id', blank=True)), ('candidate_url', models.URLField(null=True, verbose_name=b'website url",
"null=True, verbose_name=b'google civic number of candidates who will be elected', blank=True)), ('contest_level0', models.CharField(max_length=254,",
"of candidate campaign', blank=True)), ('facebook_url', models.URLField(null=True, verbose_name=b'facebook url of candidate campaign', blank=True)), ('twitter_url',",
"civic ballot placement', blank=True)), ('primary_party', models.CharField(max_length=254, null=True, verbose_name=b'google civic primary party', blank=True)), ('district_name',",
"name')), ('election_day', models.CharField(max_length=254, verbose_name=b'google civic election day')), ('was_processed', models.BooleanField(default=False, verbose_name=b'is primary election')), ],",
"name='GoogleCivicCandidateCampaign', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=254, verbose_name=b'google civic candidate name')),",
"vote contest office id', blank=True)), ('number_voting_for', models.CharField(max_length=254, null=True, verbose_name=b'google civic number of candidates",
"models.CharField(unique=True, max_length=20, verbose_name=b'google civic election id')), ('we_vote_election_id', models.CharField(max_length=20, unique=True, null=True, verbose_name=b'we vote election",
"('google_civic_election_id', models.CharField(max_length=254, verbose_name=b'google civic election id')), ('we_vote_election_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote election id',",
"('ballot_placement', models.CharField(max_length=254, null=True, verbose_name=b'google civic ballot placement', blank=True)), ('primary_party', models.CharField(max_length=254, null=True, verbose_name=b'google civic",
"__future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [",
"models.CharField(max_length=20, unique=True, null=True, verbose_name=b'we vote election id', blank=True)), ('name', models.CharField(max_length=254, verbose_name=b'google civic election",
"campaign email', blank=True)), ('was_processed', models.BooleanField(default=False, verbose_name=b'is primary election')), ], ), migrations.CreateModel( name='GoogleCivicContestOffice', fields=[",
"scope')), ('district_ocd_id', models.CharField(max_length=254, verbose_name=b'google civic district ocd id')), ('electorate_specifications', models.CharField(max_length=254, null=True, verbose_name=b'google civic",
"candidate campaign', blank=True)), ('google_plus_url', models.URLField(null=True, verbose_name=b'google plus url of candidate campaign', blank=True)), ('youtube_url',",
"null=True, verbose_name=b'google civic primary party', blank=True)), ('was_processed', models.BooleanField(default=False, verbose_name=b'is primary election')), ], ),",
"verbose_name=b'google civic order on ballot', blank=True)), ('google_civic_contest_office_id', models.CharField(max_length=254, verbose_name=b'google civic internal temp contest_office_id",
"verbose_name=b'we vote politician id', blank=True)), ('candidate_url', models.URLField(null=True, verbose_name=b'website url of candidate campaign', blank=True)),",
"coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class",
"models.BooleanField(default=False, verbose_name=b'is primary election')), ], ), migrations.CreateModel( name='GoogleCivicElection', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True,",
"vote politician id', blank=True)), ('candidate_url', models.URLField(null=True, verbose_name=b'website url of candidate campaign', blank=True)), ('facebook_url',",
"plus url of candidate campaign', blank=True)), ('youtube_url', models.URLField(null=True, verbose_name=b'youtube url of candidate campaign',",
"id')), ('we_vote_election_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote election id', blank=True)), ('ballot_placement', models.CharField(max_length=254, null=True, verbose_name=b'google",
"('google_civic_election_id', models.CharField(max_length=254, verbose_name=b'google election id')), ('we_vote_election_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote election id', blank=True)),",
"verbose_name=b'google civic level, option 1', blank=True)), ('contest_level2', models.CharField(max_length=254, null=True, verbose_name=b'google civic level, option",
"contest_office_id id')), ('we_vote_contest_office_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote contest_office_id id', blank=True)), ('google_civic_election_id', models.CharField(max_length=254, verbose_name=b'google",
"referendum details url')), ('google_civic_election_id', models.CharField(max_length=254, verbose_name=b'google civic election id')), ('we_vote_election_id', models.CharField(max_length=254, null=True, verbose_name=b'we",
"primary party', blank=True)), ('district_name', models.CharField(max_length=254, verbose_name=b'google civic district name')), ('district_scope', models.CharField(max_length=254, verbose_name=b'google civic",
"auto_created=True, primary_key=True)), ('referendum_title', models.CharField(max_length=254, verbose_name=b'google civic referendum title')), ('referendum_subtitle', models.CharField(max_length=254, verbose_name=b'google civic referendum",
"('google_civic_contest_office_id', models.CharField(max_length=254, verbose_name=b'google civic internal temp contest_office_id id')), ('we_vote_contest_office_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote",
"election id')), ('we_vote_election_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote election id', blank=True)), ('ballot_placement', models.CharField(max_length=254, null=True,",
"election id', blank=True)), ('we_vote_contest_office_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote contest office id', blank=True)), ('number_voting_for',",
"civic referendum title')), ('referendum_subtitle', models.CharField(max_length=254, verbose_name=b'google civic referendum subtitle')), ('referendum_url', models.CharField(max_length=254, null=True, verbose_name=b'google",
"import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ]",
"('referendum_url', models.CharField(max_length=254, null=True, verbose_name=b'google civic referendum details url')), ('google_civic_election_id', models.CharField(max_length=254, verbose_name=b'google civic election",
"verbose_name=b'google civic number of candidates to vote for', blank=True)), ('number_elected', models.CharField(max_length=254, null=True, verbose_name=b'google",
"), migrations.CreateModel( name='GoogleCivicContestOffice', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('office', models.CharField(max_length=254, verbose_name=b'google civic",
"('election_day', models.CharField(max_length=254, verbose_name=b'google civic election day')), ('was_processed', models.BooleanField(default=False, verbose_name=b'is primary election')), ], ),",
"('contest_level2', models.CharField(max_length=254, null=True, verbose_name=b'google civic level, option 2', blank=True)), ('ballot_placement', models.CharField(max_length=254, null=True, verbose_name=b'google",
"('we_vote_election_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote election id', blank=True)), ('we_vote_contest_office_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote",
"verbose_name=b'google civic candidate campaign email', blank=True)), ('phone', models.CharField(max_length=254, null=True, verbose_name=b'google civic candidate campaign",
"('candidate_url', models.URLField(null=True, verbose_name=b'website url of candidate campaign', blank=True)), ('facebook_url', models.URLField(null=True, verbose_name=b'facebook url of",
"verbose_name=b'google civic election id')), ('we_vote_election_id', models.CharField(max_length=20, unique=True, null=True, verbose_name=b'we vote election id', blank=True)),",
"unique=True, null=True, verbose_name=b'we vote election id', blank=True)), ('name', models.CharField(max_length=254, verbose_name=b'google civic election name')),",
"models.CharField(max_length=254, null=True, verbose_name=b'google civic primary party', blank=True)), ('special', models.CharField(max_length=254, null=True, verbose_name=b'google civic primary",
"civic primary party', blank=True)), ('was_processed', models.BooleanField(default=False, verbose_name=b'is primary election')), ], ), migrations.CreateModel( name='GoogleCivicContestReferendum',",
"civic election id')), ('we_vote_election_id', models.CharField(max_length=20, unique=True, null=True, verbose_name=b'we vote election id', blank=True)), ('name',",
"('number_elected', models.CharField(max_length=254, null=True, verbose_name=b'google civic number of candidates who will be elected', blank=True)),",
"civic internal temp contest_office_id id')), ('we_vote_contest_office_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote contest_office_id id', blank=True)),",
"blank=True)), ('contest_level2', models.CharField(max_length=254, null=True, verbose_name=b'google civic level, option 2', blank=True)), ('ballot_placement', models.CharField(max_length=254, null=True,",
"election id')), ('we_vote_election_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote election id', blank=True)), ('we_vote_candidate_campaign_id', models.CharField(max_length=254, null=True,",
"order on ballot', blank=True)), ('google_civic_contest_office_id', models.CharField(max_length=254, verbose_name=b'google civic internal temp contest_office_id id')), ('we_vote_contest_office_id',",
"ballot placement', blank=True)), ('primary_party', models.CharField(max_length=254, null=True, verbose_name=b'google civic primary party', blank=True)), ('district_name', models.CharField(max_length=254,",
"id')), ('electorate_specifications', models.CharField(max_length=254, null=True, verbose_name=b'google civic primary party', blank=True)), ('special', models.CharField(max_length=254, null=True, verbose_name=b'google",
"models.CharField(max_length=254, null=True, verbose_name=b'google civic level, option 1', blank=True)), ('contest_level2', models.CharField(max_length=254, null=True, verbose_name=b'google civic",
"campaign', blank=True)), ('email', models.CharField(max_length=254, null=True, verbose_name=b'google civic candidate campaign email', blank=True)), ('phone', models.CharField(max_length=254,",
"('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('google_civic_election_id', models.CharField(unique=True, max_length=20, verbose_name=b'google civic election id')), ('we_vote_election_id',",
"number of candidates who will be elected', blank=True)), ('contest_level0', models.CharField(max_length=254, null=True, verbose_name=b'google civic",
"fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('google_civic_election_id', models.CharField(unique=True, max_length=20, verbose_name=b'google civic election id')),",
"vote election id', blank=True)), ('we_vote_candidate_campaign_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote candidate campaign id', blank=True)),",
"('contest_level0', models.CharField(max_length=254, null=True, verbose_name=b'google civic level, option 0', blank=True)), ('contest_level1', models.CharField(max_length=254, null=True, verbose_name=b'google",
"option 0', blank=True)), ('contest_level1', models.CharField(max_length=254, null=True, verbose_name=b'google civic level, option 1', blank=True)), ('contest_level2',",
"('email', models.CharField(max_length=254, null=True, verbose_name=b'google civic candidate campaign email', blank=True)), ('phone', models.CharField(max_length=254, null=True, verbose_name=b'google",
"verbose_name=b'facebook url of candidate campaign', blank=True)), ('twitter_url', models.URLField(null=True, verbose_name=b'twitter url of candidate campaign',",
"of candidates to vote for', blank=True)), ('number_elected', models.CharField(max_length=254, null=True, verbose_name=b'google civic number of",
"blank=True)), ('contest_level0', models.CharField(max_length=254, null=True, verbose_name=b'google civic level, option 0', blank=True)), ('contest_level1', models.CharField(max_length=254, null=True,",
"office')), ('google_civic_election_id', models.CharField(max_length=254, null=True, verbose_name=b'google civic election id', blank=True)), ('we_vote_election_id', models.CharField(max_length=254, null=True, verbose_name=b'we",
"verbose_name=b'we vote candidate campaign id', blank=True)), ('we_vote_politician_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote politician id',",
"civic district ocd id')), ('electorate_specifications', models.CharField(max_length=254, null=True, verbose_name=b'google civic primary party', blank=True)), ('special',",
"models.CharField(max_length=254, null=True, verbose_name=b'we vote politician id', blank=True)), ('candidate_url', models.URLField(null=True, verbose_name=b'website url of candidate",
"('we_vote_candidate_campaign_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote candidate campaign id', blank=True)), ('we_vote_politician_id', models.CharField(max_length=254, null=True, verbose_name=b'we",
"-*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies",
"('we_vote_contest_office_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote contest office id', blank=True)), ('number_voting_for', models.CharField(max_length=254, null=True, verbose_name=b'google",
"models.URLField(null=True, verbose_name=b'facebook url of candidate campaign', blank=True)), ('twitter_url', models.URLField(null=True, verbose_name=b'twitter url of candidate",
"('referendum_title', models.CharField(max_length=254, verbose_name=b'google civic referendum title')), ('referendum_subtitle', models.CharField(max_length=254, verbose_name=b'google civic referendum subtitle')), ('referendum_url',",
"models.URLField(null=True, verbose_name=b'website url of candidate campaign', blank=True)), ('facebook_url', models.URLField(null=True, verbose_name=b'facebook url of candidate",
"from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies =",
"verbose_name=b'twitter url of candidate campaign', blank=True)), ('google_plus_url', models.URLField(null=True, verbose_name=b'google plus url of candidate",
"migrations.CreateModel( name='GoogleCivicContestOffice', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('office', models.CharField(max_length=254, verbose_name=b'google civic office')),",
"verbose_name=b'we vote election id', blank=True)), ('we_vote_candidate_campaign_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote candidate campaign id',",
"null=True, verbose_name=b'we vote election id', blank=True)), ('we_vote_contest_office_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote contest office",
"vote candidate campaign id', blank=True)), ('we_vote_politician_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote politician id', blank=True)),",
"operations = [ migrations.CreateModel( name='GoogleCivicCandidateCampaign', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=254,",
"party', blank=True)), ('photo_url', models.CharField(max_length=254, null=True, verbose_name=b'google civic photoUrl', blank=True)), ('order_on_ballot', models.CharField(max_length=254, null=True, verbose_name=b'google",
"blank=True)), ('name', models.CharField(max_length=254, verbose_name=b'google civic election name')), ('election_day', models.CharField(max_length=254, verbose_name=b'google civic election day')),",
"1', blank=True)), ('contest_level2', models.CharField(max_length=254, null=True, verbose_name=b'google civic level, option 2', blank=True)), ('ballot_placement', models.CharField(max_length=254,",
"civic order on ballot', blank=True)), ('google_civic_contest_office_id', models.CharField(max_length=254, verbose_name=b'google civic internal temp contest_office_id id')),",
"django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ] operations = [",
"level, option 0', blank=True)), ('contest_level1', models.CharField(max_length=254, null=True, verbose_name=b'google civic level, option 1', blank=True)),",
"('name', models.CharField(max_length=254, verbose_name=b'google civic election name')), ('election_day', models.CharField(max_length=254, verbose_name=b'google civic election day')), ('was_processed',",
"vote for', blank=True)), ('number_elected', models.CharField(max_length=254, null=True, verbose_name=b'google civic number of candidates who will",
"unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ] operations",
"candidate campaign', blank=True)), ('twitter_url', models.URLField(null=True, verbose_name=b'twitter url of candidate campaign', blank=True)), ('google_plus_url', models.URLField(null=True,",
"('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=254, verbose_name=b'google civic candidate name')), ('party', models.CharField(max_length=254,",
"models.CharField(max_length=254, null=True, verbose_name=b'google civic candidate campaign email', blank=True)), ('phone', models.CharField(max_length=254, null=True, verbose_name=b'google civic",
"blank=True)), ('number_elected', models.CharField(max_length=254, null=True, verbose_name=b'google civic number of candidates who will be elected',",
"politician id', blank=True)), ('candidate_url', models.URLField(null=True, verbose_name=b'website url of candidate campaign', blank=True)), ('facebook_url', models.URLField(null=True,",
"null=True, verbose_name=b'we vote politician id', blank=True)), ('candidate_url', models.URLField(null=True, verbose_name=b'website url of candidate campaign',",
"('primary_party', models.CharField(max_length=254, null=True, verbose_name=b'google civic primary party', blank=True)), ('district_name', models.CharField(max_length=254, verbose_name=b'google civic district",
"models.CharField(max_length=254, verbose_name=b'google civic district scope')), ('district_ocd_id', models.CharField(max_length=254, verbose_name=b'google civic district ocd id')), ('electorate_specifications',",
"verbose_name=b'google civic election id')), ('we_vote_election_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote election id', blank=True)), ('ballot_placement',",
"verbose_name=b'google civic referendum details url')), ('google_civic_election_id', models.CharField(max_length=254, verbose_name=b'google civic election id')), ('we_vote_election_id', models.CharField(max_length=254,",
"civic referendum details url')), ('google_civic_election_id', models.CharField(max_length=254, verbose_name=b'google civic election id')), ('we_vote_election_id', models.CharField(max_length=254, null=True,",
"('we_vote_election_id', models.CharField(max_length=20, unique=True, null=True, verbose_name=b'we vote election id', blank=True)), ('name', models.CharField(max_length=254, verbose_name=b'google civic",
"id', blank=True)), ('ballot_placement', models.CharField(max_length=254, null=True, verbose_name=b'google civic ballot placement', blank=True)), ('primary_party', models.CharField(max_length=254, null=True,",
"id', blank=True)), ('number_voting_for', models.CharField(max_length=254, null=True, verbose_name=b'google civic number of candidates to vote for',",
"referendum subtitle')), ('referendum_url', models.CharField(max_length=254, null=True, verbose_name=b'google civic referendum details url')), ('google_civic_election_id', models.CharField(max_length=254, verbose_name=b'google",
"null=True, verbose_name=b'google civic level, option 2', blank=True)), ('ballot_placement', models.CharField(max_length=254, null=True, verbose_name=b'google civic ballot",
"models.CharField(max_length=254, null=True, verbose_name=b'we vote election id', blank=True)), ('we_vote_contest_office_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote contest",
"civic election id', blank=True)), ('we_vote_election_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote election id', blank=True)), ('we_vote_contest_office_id',",
"blank=True)), ('twitter_url', models.URLField(null=True, verbose_name=b'twitter url of candidate campaign', blank=True)), ('google_plus_url', models.URLField(null=True, verbose_name=b'google plus",
"id', blank=True)), ('we_vote_election_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote election id', blank=True)), ('we_vote_contest_office_id', models.CharField(max_length=254, null=True,",
"primary election')), ], ), migrations.CreateModel( name='GoogleCivicContestReferendum', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('referendum_title',",
"blank=True)), ('was_processed', models.BooleanField(default=False, verbose_name=b'is primary election')), ], ), migrations.CreateModel( name='GoogleCivicElection', fields=[ ('id', models.AutoField(verbose_name='ID',",
"blank=True)), ('we_vote_candidate_campaign_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote candidate campaign id', blank=True)), ('we_vote_politician_id', models.CharField(max_length=254, null=True,",
"blank=True)), ('phone', models.CharField(max_length=254, null=True, verbose_name=b'google civic candidate campaign email', blank=True)), ('was_processed', models.BooleanField(default=False, verbose_name=b'is",
"blank=True)), ('was_processed', models.BooleanField(default=False, verbose_name=b'is primary election')), ], ), migrations.CreateModel( name='GoogleCivicContestOffice', fields=[ ('id', models.AutoField(verbose_name='ID',",
"models.CharField(max_length=254, null=True, verbose_name=b'google civic primary party', blank=True)), ('district_name', models.CharField(max_length=254, verbose_name=b'google civic district name')),",
"civic number of candidates to vote for', blank=True)), ('number_elected', models.CharField(max_length=254, null=True, verbose_name=b'google civic",
"('we_vote_contest_office_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote contest_office_id id', blank=True)), ('google_civic_election_id', models.CharField(max_length=254, verbose_name=b'google election id')),",
"blank=True)), ('we_vote_contest_office_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote contest office id', blank=True)), ('number_voting_for', models.CharField(max_length=254, null=True,",
"blank=True)), ('was_processed', models.BooleanField(default=False, verbose_name=b'is primary election')), ], ), migrations.CreateModel( name='GoogleCivicContestReferendum', fields=[ ('id', models.AutoField(verbose_name='ID',",
"verbose_name=b'we vote election id', blank=True)), ('name', models.CharField(max_length=254, verbose_name=b'google civic election name')), ('election_day', models.CharField(max_length=254,",
"id', blank=True)), ('google_civic_election_id', models.CharField(max_length=254, verbose_name=b'google election id')), ('we_vote_election_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote election",
"verbose_name=b'we vote election id', blank=True)), ('we_vote_contest_office_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote contest office id',",
"from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ] operations =",
"models.CharField(max_length=254, null=True, verbose_name=b'google civic level, option 0', blank=True)), ('contest_level1', models.CharField(max_length=254, null=True, verbose_name=b'google civic",
"url of candidate campaign', blank=True)), ('youtube_url', models.URLField(null=True, verbose_name=b'youtube url of candidate campaign', blank=True)),",
"candidate campaign', blank=True)), ('youtube_url', models.URLField(null=True, verbose_name=b'youtube url of candidate campaign', blank=True)), ('email', models.CharField(max_length=254,",
"blank=True)), ('candidate_url', models.URLField(null=True, verbose_name=b'website url of candidate campaign', blank=True)), ('facebook_url', models.URLField(null=True, verbose_name=b'facebook url",
"candidate name')), ('party', models.CharField(max_length=254, null=True, verbose_name=b'google civic party', blank=True)), ('photo_url', models.CharField(max_length=254, null=True, verbose_name=b'google",
"max_length=20, verbose_name=b'google civic election id')), ('we_vote_election_id', models.CharField(max_length=20, unique=True, null=True, verbose_name=b'we vote election id',",
"] operations = [ migrations.CreateModel( name='GoogleCivicCandidateCampaign', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name',",
"primary_key=True)), ('office', models.CharField(max_length=254, verbose_name=b'google civic office')), ('google_civic_election_id', models.CharField(max_length=254, null=True, verbose_name=b'google civic election id',",
"verbose_name=b'google civic party', blank=True)), ('photo_url', models.CharField(max_length=254, null=True, verbose_name=b'google civic photoUrl', blank=True)), ('order_on_ballot', models.CharField(max_length=254,",
"null=True, verbose_name=b'we vote election id', blank=True)), ('we_vote_candidate_campaign_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote candidate campaign",
"0', blank=True)), ('contest_level1', models.CharField(max_length=254, null=True, verbose_name=b'google civic level, option 1', blank=True)), ('contest_level2', models.CharField(max_length=254,",
"campaign', blank=True)), ('google_plus_url', models.URLField(null=True, verbose_name=b'google plus url of candidate campaign', blank=True)), ('youtube_url', models.URLField(null=True,",
"models.CharField(max_length=254, verbose_name=b'google civic election id')), ('we_vote_election_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote election id', blank=True)),",
"fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('office', models.CharField(max_length=254, verbose_name=b'google civic office')), ('google_civic_election_id', models.CharField(max_length=254,",
"models.CharField(max_length=254, null=True, verbose_name=b'google civic photoUrl', blank=True)), ('order_on_ballot', models.CharField(max_length=254, null=True, verbose_name=b'google civic order on",
"title')), ('referendum_subtitle', models.CharField(max_length=254, verbose_name=b'google civic referendum subtitle')), ('referendum_url', models.CharField(max_length=254, null=True, verbose_name=b'google civic referendum",
"('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('office', models.CharField(max_length=254, verbose_name=b'google civic office')), ('google_civic_election_id', models.CharField(max_length=254, null=True,",
"name='GoogleCivicElection', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('google_civic_election_id', models.CharField(unique=True, max_length=20, verbose_name=b'google civic election",
"election')), ], ), migrations.CreateModel( name='GoogleCivicElection', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('google_civic_election_id', models.CharField(unique=True,",
"models.CharField(max_length=254, verbose_name=b'google civic candidate name')), ('party', models.CharField(max_length=254, null=True, verbose_name=b'google civic party', blank=True)), ('photo_url',",
"party', blank=True)), ('was_processed', models.BooleanField(default=False, verbose_name=b'is primary election')), ], ), migrations.CreateModel( name='GoogleCivicElection', fields=[ ('id',",
"who will be elected', blank=True)), ('contest_level0', models.CharField(max_length=254, null=True, verbose_name=b'google civic level, option 0',",
"blank=True)), ('special', models.CharField(max_length=254, null=True, verbose_name=b'google civic primary party', blank=True)), ('was_processed', models.BooleanField(default=False, verbose_name=b'is primary",
"campaign', blank=True)), ('twitter_url', models.URLField(null=True, verbose_name=b'twitter url of candidate campaign', blank=True)), ('google_plus_url', models.URLField(null=True, verbose_name=b'google",
"blank=True)), ('google_civic_contest_office_id', models.CharField(max_length=254, verbose_name=b'google civic internal temp contest_office_id id')), ('we_vote_contest_office_id', models.CharField(max_length=254, null=True, verbose_name=b'we",
"migrations.CreateModel( name='GoogleCivicCandidateCampaign', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=254, verbose_name=b'google civic candidate",
"models.CharField(max_length=254, verbose_name=b'google civic internal temp contest_office_id id')), ('we_vote_contest_office_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote contest_office_id",
"verbose_name=b'google civic candidate name')), ('party', models.CharField(max_length=254, null=True, verbose_name=b'google civic party', blank=True)), ('photo_url', models.CharField(max_length=254,",
"civic office')), ('google_civic_election_id', models.CharField(max_length=254, null=True, verbose_name=b'google civic election id', blank=True)), ('we_vote_election_id', models.CharField(max_length=254, null=True,",
"('number_voting_for', models.CharField(max_length=254, null=True, verbose_name=b'google civic number of candidates to vote for', blank=True)), ('number_elected',",
"models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('office', models.CharField(max_length=254, verbose_name=b'google civic office')), ('google_civic_election_id', models.CharField(max_length=254, null=True, verbose_name=b'google",
"models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=254, verbose_name=b'google civic candidate name')), ('party', models.CharField(max_length=254, null=True,",
"('we_vote_election_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote election id', blank=True)), ('we_vote_candidate_campaign_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote",
"party', blank=True)), ('district_name', models.CharField(max_length=254, verbose_name=b'google civic district name')), ('district_scope', models.CharField(max_length=254, verbose_name=b'google civic district",
"models.CharField(max_length=254, verbose_name=b'google civic referendum title')), ('referendum_subtitle', models.CharField(max_length=254, verbose_name=b'google civic referendum subtitle')), ('referendum_url', models.CharField(max_length=254,",
"models.CharField(max_length=254, null=True, verbose_name=b'we vote contest_office_id id', blank=True)), ('google_civic_election_id', models.CharField(max_length=254, verbose_name=b'google election id')), ('we_vote_election_id',",
"candidate campaign email', blank=True)), ('was_processed', models.BooleanField(default=False, verbose_name=b'is primary election')), ], ), migrations.CreateModel( name='GoogleCivicContestOffice',",
"referendum title')), ('referendum_subtitle', models.CharField(max_length=254, verbose_name=b'google civic referendum subtitle')), ('referendum_url', models.CharField(max_length=254, null=True, verbose_name=b'google civic",
"civic primary party', blank=True)), ('was_processed', models.BooleanField(default=False, verbose_name=b'is primary election')), ], ), migrations.CreateModel( name='GoogleCivicElection',",
"for', blank=True)), ('number_elected', models.CharField(max_length=254, null=True, verbose_name=b'google civic number of candidates who will be",
"verbose_name=b'google civic district ocd id')), ('electorate_specifications', models.CharField(max_length=254, null=True, verbose_name=b'google civic primary party', blank=True)),",
"election id', blank=True)), ('name', models.CharField(max_length=254, verbose_name=b'google civic election name')), ('election_day', models.CharField(max_length=254, verbose_name=b'google civic",
"civic election id')), ('we_vote_election_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote election id', blank=True)), ('ballot_placement', models.CharField(max_length=254,",
"verbose_name=b'google civic referendum subtitle')), ('referendum_url', models.CharField(max_length=254, null=True, verbose_name=b'google civic referendum details url')), ('google_civic_election_id',",
"on ballot', blank=True)), ('google_civic_contest_office_id', models.CharField(max_length=254, verbose_name=b'google civic internal temp contest_office_id id')), ('we_vote_contest_office_id', models.CharField(max_length=254,",
"('district_scope', models.CharField(max_length=254, verbose_name=b'google civic district scope')), ('district_ocd_id', models.CharField(max_length=254, verbose_name=b'google civic district ocd id')),",
"2', blank=True)), ('ballot_placement', models.CharField(max_length=254, null=True, verbose_name=b'google civic ballot placement', blank=True)), ('primary_party', models.CharField(max_length=254, null=True,",
"Migration(migrations.Migration): dependencies = [ ] operations = [ migrations.CreateModel( name='GoogleCivicCandidateCampaign', fields=[ ('id', models.AutoField(verbose_name='ID',",
"models.CharField(max_length=254, null=True, verbose_name=b'google civic number of candidates who will be elected', blank=True)), ('contest_level0',",
"district name')), ('district_scope', models.CharField(max_length=254, verbose_name=b'google civic district scope')), ('district_ocd_id', models.CharField(max_length=254, verbose_name=b'google civic district",
"primary_key=True)), ('name', models.CharField(max_length=254, verbose_name=b'google civic candidate name')), ('party', models.CharField(max_length=254, null=True, verbose_name=b'google civic party',",
"('party', models.CharField(max_length=254, null=True, verbose_name=b'google civic party', blank=True)), ('photo_url', models.CharField(max_length=254, null=True, verbose_name=b'google civic photoUrl',",
"('we_vote_election_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote election id', blank=True)), ('ballot_placement', models.CharField(max_length=254, null=True, verbose_name=b'google civic",
"models.URLField(null=True, verbose_name=b'youtube url of candidate campaign', blank=True)), ('email', models.CharField(max_length=254, null=True, verbose_name=b'google civic candidate",
"[ migrations.CreateModel( name='GoogleCivicCandidateCampaign', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=254, verbose_name=b'google civic",
"fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=254, verbose_name=b'google civic candidate name')), ('party',",
"null=True, verbose_name=b'we vote contest office id', blank=True)), ('number_voting_for', models.CharField(max_length=254, null=True, verbose_name=b'google civic number",
"null=True, verbose_name=b'google civic candidate campaign email', blank=True)), ('phone', models.CharField(max_length=254, null=True, verbose_name=b'google civic candidate",
"blank=True)), ('photo_url', models.CharField(max_length=254, null=True, verbose_name=b'google civic photoUrl', blank=True)), ('order_on_ballot', models.CharField(max_length=254, null=True, verbose_name=b'google civic",
"internal temp contest_office_id id')), ('we_vote_contest_office_id', models.CharField(max_length=254, null=True, verbose_name=b'we vote contest_office_id id', blank=True)), ('google_civic_election_id',",
"('youtube_url', models.URLField(null=True, verbose_name=b'youtube url of candidate campaign', blank=True)), ('email', models.CharField(max_length=254, null=True, verbose_name=b'google civic",
"# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models,",
"models.CharField(max_length=254, null=True, verbose_name=b'we vote election id', blank=True)), ('ballot_placement', models.CharField(max_length=254, null=True, verbose_name=b'google civic ballot",
"('facebook_url', models.URLField(null=True, verbose_name=b'facebook url of candidate campaign', blank=True)), ('twitter_url', models.URLField(null=True, verbose_name=b'twitter url of",
"models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('referendum_title', models.CharField(max_length=254, verbose_name=b'google civic referendum title')), ('referendum_subtitle', models.CharField(max_length=254, verbose_name=b'google"
] |
[
"\"Aliyun RocketMQ Airflow Provider\", \"description\": \"Airflow provider for aliyun rocketmq\", \"hook-class-names\": [\"aliyun_rocketmq_provider.hooks.aliyun_rocketmq.AliyunRocketMQHook\"], \"versions\":",
"\"package-name\": \"airflow-providers-aliyun-rocketmq\", \"name\": \"Aliyun RocketMQ Airflow Provider\", \"description\": \"Airflow provider for aliyun rocketmq\",",
"return { \"package-name\": \"airflow-providers-aliyun-rocketmq\", \"name\": \"Aliyun RocketMQ Airflow Provider\", \"description\": \"Airflow provider for",
"\"name\": \"Aliyun RocketMQ Airflow Provider\", \"description\": \"Airflow provider for aliyun rocketmq\", \"hook-class-names\": [\"aliyun_rocketmq_provider.hooks.aliyun_rocketmq.AliyunRocketMQHook\"],",
"\"airflow-providers-aliyun-rocketmq\", \"name\": \"Aliyun RocketMQ Airflow Provider\", \"description\": \"Airflow provider for aliyun rocketmq\", \"hook-class-names\":",
"RocketMQ Airflow Provider\", \"description\": \"Airflow provider for aliyun rocketmq\", \"hook-class-names\": [\"aliyun_rocketmq_provider.hooks.aliyun_rocketmq.AliyunRocketMQHook\"], \"versions\": [\"0.1.2\"]",
"Airflow Provider\", \"description\": \"Airflow provider for aliyun rocketmq\", \"hook-class-names\": [\"aliyun_rocketmq_provider.hooks.aliyun_rocketmq.AliyunRocketMQHook\"], \"versions\": [\"0.1.2\"] }",
"def get_provider_info(): return { \"package-name\": \"airflow-providers-aliyun-rocketmq\", \"name\": \"Aliyun RocketMQ Airflow Provider\", \"description\": \"Airflow",
"get_provider_info(): return { \"package-name\": \"airflow-providers-aliyun-rocketmq\", \"name\": \"Aliyun RocketMQ Airflow Provider\", \"description\": \"Airflow provider",
"{ \"package-name\": \"airflow-providers-aliyun-rocketmq\", \"name\": \"Aliyun RocketMQ Airflow Provider\", \"description\": \"Airflow provider for aliyun"
] |
[
"urllib #sets the URLs h1 = \"test_eng.html\" h2 = \"test2_eng.html\" # need to",
"the URLs h1 = \"test_eng.html\" h2 = \"test2_eng.html\" # need to either figure",
"or turn search_all into a string def headliner(url): soup = BeautifulSoup((open(url)), \"lxml\") head1",
"soup.h2.string head3 = soup.h3.string print head1 print head1[0].get_text() #print head1[1].get_text() #print head2[2].get_text() #print",
"head3 = soup.h3.string print head1 print head1[0].get_text() #print head1[1].get_text() #print head2[2].get_text() #print head2",
"= soup.find_all(['h1','h2','h3']) head2 = soup.h2.string head3 = soup.h3.string print head1 print head1[0].get_text() #print",
"#sets the URLs h1 = \"test_eng.html\" h2 = \"test2_eng.html\" # need to either",
"import urllib #sets the URLs h1 = \"test_eng.html\" h2 = \"test2_eng.html\" # need",
"either figure out how to skip \"None\" results or turn search_all into a",
"skip \"None\" results or turn search_all into a string def headliner(url): soup =",
"# need to either figure out how to skip \"None\" results or turn",
"how to skip \"None\" results or turn search_all into a string def headliner(url):",
"figure out how to skip \"None\" results or turn search_all into a string",
"= \"test_eng.html\" h2 = \"test2_eng.html\" # need to either figure out how to",
"need to either figure out how to skip \"None\" results or turn search_all",
"search_all into a string def headliner(url): soup = BeautifulSoup((open(url)), \"lxml\") head1 = soup.find_all(['h1','h2','h3'])",
"turn search_all into a string def headliner(url): soup = BeautifulSoup((open(url)), \"lxml\") head1 =",
"\"lxml\") head1 = soup.find_all(['h1','h2','h3']) head2 = soup.h2.string head3 = soup.h3.string print head1 print",
"to skip \"None\" results or turn search_all into a string def headliner(url): soup",
"h1 = \"test_eng.html\" h2 = \"test2_eng.html\" # need to either figure out how",
"soup.h3.string print head1 print head1[0].get_text() #print head1[1].get_text() #print head2[2].get_text() #print head2 #print head3",
"soup.find_all(['h1','h2','h3']) head2 = soup.h2.string head3 = soup.h3.string print head1 print head1[0].get_text() #print head1[1].get_text()",
"print head1 print head1[0].get_text() #print head1[1].get_text() #print head2[2].get_text() #print head2 #print head3 print",
"\"test_eng.html\" h2 = \"test2_eng.html\" # need to either figure out how to skip",
"bs4 import BeautifulSoup import urllib #sets the URLs h1 = \"test_eng.html\" h2 =",
"BeautifulSoup import urllib #sets the URLs h1 = \"test_eng.html\" h2 = \"test2_eng.html\" #",
"out how to skip \"None\" results or turn search_all into a string def",
"headliner(url): soup = BeautifulSoup((open(url)), \"lxml\") head1 = soup.find_all(['h1','h2','h3']) head2 = soup.h2.string head3 =",
"from bs4 import BeautifulSoup import urllib #sets the URLs h1 = \"test_eng.html\" h2",
"print head1[0].get_text() #print head1[1].get_text() #print head2[2].get_text() #print head2 #print head3 print \"\" headliner(h1)",
"a string def headliner(url): soup = BeautifulSoup((open(url)), \"lxml\") head1 = soup.find_all(['h1','h2','h3']) head2 =",
"BeautifulSoup((open(url)), \"lxml\") head1 = soup.find_all(['h1','h2','h3']) head2 = soup.h2.string head3 = soup.h3.string print head1",
"= soup.h3.string print head1 print head1[0].get_text() #print head1[1].get_text() #print head2[2].get_text() #print head2 #print",
"head1 print head1[0].get_text() #print head1[1].get_text() #print head2[2].get_text() #print head2 #print head3 print \"\"",
"\"None\" results or turn search_all into a string def headliner(url): soup = BeautifulSoup((open(url)),",
"= \"test2_eng.html\" # need to either figure out how to skip \"None\" results",
"\"test2_eng.html\" # need to either figure out how to skip \"None\" results or",
"into a string def headliner(url): soup = BeautifulSoup((open(url)), \"lxml\") head1 = soup.find_all(['h1','h2','h3']) head2",
"def headliner(url): soup = BeautifulSoup((open(url)), \"lxml\") head1 = soup.find_all(['h1','h2','h3']) head2 = soup.h2.string head3",
"h2 = \"test2_eng.html\" # need to either figure out how to skip \"None\"",
"= BeautifulSoup((open(url)), \"lxml\") head1 = soup.find_all(['h1','h2','h3']) head2 = soup.h2.string head3 = soup.h3.string print",
"= soup.h2.string head3 = soup.h3.string print head1 print head1[0].get_text() #print head1[1].get_text() #print head2[2].get_text()",
"URLs h1 = \"test_eng.html\" h2 = \"test2_eng.html\" # need to either figure out",
"to either figure out how to skip \"None\" results or turn search_all into",
"soup = BeautifulSoup((open(url)), \"lxml\") head1 = soup.find_all(['h1','h2','h3']) head2 = soup.h2.string head3 = soup.h3.string",
"string def headliner(url): soup = BeautifulSoup((open(url)), \"lxml\") head1 = soup.find_all(['h1','h2','h3']) head2 = soup.h2.string",
"head2 = soup.h2.string head3 = soup.h3.string print head1 print head1[0].get_text() #print head1[1].get_text() #print",
"head1 = soup.find_all(['h1','h2','h3']) head2 = soup.h2.string head3 = soup.h3.string print head1 print head1[0].get_text()",
"results or turn search_all into a string def headliner(url): soup = BeautifulSoup((open(url)), \"lxml\")",
"import BeautifulSoup import urllib #sets the URLs h1 = \"test_eng.html\" h2 = \"test2_eng.html\""
] |
[
"noqa: E402 def set_env_var(var, value): os.system(f'setx {var} \"{value}\"') class PostInstallCommand(install): \"\"\"Post-installation for installation",
"setuptools import setup from setuptools.command.install import install sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'src')) from alias import",
"not in os.environ: aliases_dir = get_aliases_dir() set_env_var(ALIASES_DIR_VAR, aliases_dir) setup( cmdclass={ 'install': PostInstallCommand, }",
"from setuptools import setup from setuptools.command.install import install sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'src')) from alias",
"sys from setuptools import setup from setuptools.command.install import install sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'src')) from",
"installation mode.\"\"\" def run(self): install.run(self) if ALIASES_DIR_VAR not in os.environ: aliases_dir = get_aliases_dir()",
"from alias import ALIASES_DIR_VAR # noqa: E402 from alias import get_aliases_dir # noqa:",
"import get_aliases_dir # noqa: E402 def set_env_var(var, value): os.system(f'setx {var} \"{value}\"') class PostInstallCommand(install):",
"def run(self): install.run(self) if ALIASES_DIR_VAR not in os.environ: aliases_dir = get_aliases_dir() set_env_var(ALIASES_DIR_VAR, aliases_dir)",
"import ALIASES_DIR_VAR # noqa: E402 from alias import get_aliases_dir # noqa: E402 def",
"E402 from alias import get_aliases_dir # noqa: E402 def set_env_var(var, value): os.system(f'setx {var}",
"import sys from setuptools import setup from setuptools.command.install import install sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'src'))",
"from alias import get_aliases_dir # noqa: E402 def set_env_var(var, value): os.system(f'setx {var} \"{value}\"')",
"def set_env_var(var, value): os.system(f'setx {var} \"{value}\"') class PostInstallCommand(install): \"\"\"Post-installation for installation mode.\"\"\" def",
"class PostInstallCommand(install): \"\"\"Post-installation for installation mode.\"\"\" def run(self): install.run(self) if ALIASES_DIR_VAR not in",
"import os import sys from setuptools import setup from setuptools.command.install import install sys.path.insert(0,",
"for installation mode.\"\"\" def run(self): install.run(self) if ALIASES_DIR_VAR not in os.environ: aliases_dir =",
"run(self): install.run(self) if ALIASES_DIR_VAR not in os.environ: aliases_dir = get_aliases_dir() set_env_var(ALIASES_DIR_VAR, aliases_dir) setup(",
"os.path.join(os.path.dirname(__file__), 'src')) from alias import ALIASES_DIR_VAR # noqa: E402 from alias import get_aliases_dir",
"# noqa: E402 from alias import get_aliases_dir # noqa: E402 def set_env_var(var, value):",
"if ALIASES_DIR_VAR not in os.environ: aliases_dir = get_aliases_dir() set_env_var(ALIASES_DIR_VAR, aliases_dir) setup( cmdclass={ 'install':",
"os.system(f'setx {var} \"{value}\"') class PostInstallCommand(install): \"\"\"Post-installation for installation mode.\"\"\" def run(self): install.run(self) if",
"\"\"\"Post-installation for installation mode.\"\"\" def run(self): install.run(self) if ALIASES_DIR_VAR not in os.environ: aliases_dir",
"import setup from setuptools.command.install import install sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'src')) from alias import ALIASES_DIR_VAR",
"value): os.system(f'setx {var} \"{value}\"') class PostInstallCommand(install): \"\"\"Post-installation for installation mode.\"\"\" def run(self): install.run(self)",
"# noqa: E402 def set_env_var(var, value): os.system(f'setx {var} \"{value}\"') class PostInstallCommand(install): \"\"\"Post-installation for",
"'src')) from alias import ALIASES_DIR_VAR # noqa: E402 from alias import get_aliases_dir #",
"PostInstallCommand(install): \"\"\"Post-installation for installation mode.\"\"\" def run(self): install.run(self) if ALIASES_DIR_VAR not in os.environ:",
"from setuptools.command.install import install sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'src')) from alias import ALIASES_DIR_VAR # noqa:",
"{var} \"{value}\"') class PostInstallCommand(install): \"\"\"Post-installation for installation mode.\"\"\" def run(self): install.run(self) if ALIASES_DIR_VAR",
"E402 def set_env_var(var, value): os.system(f'setx {var} \"{value}\"') class PostInstallCommand(install): \"\"\"Post-installation for installation mode.\"\"\"",
"get_aliases_dir # noqa: E402 def set_env_var(var, value): os.system(f'setx {var} \"{value}\"') class PostInstallCommand(install): \"\"\"Post-installation",
"mode.\"\"\" def run(self): install.run(self) if ALIASES_DIR_VAR not in os.environ: aliases_dir = get_aliases_dir() set_env_var(ALIASES_DIR_VAR,",
"set_env_var(var, value): os.system(f'setx {var} \"{value}\"') class PostInstallCommand(install): \"\"\"Post-installation for installation mode.\"\"\" def run(self):",
"ALIASES_DIR_VAR not in os.environ: aliases_dir = get_aliases_dir() set_env_var(ALIASES_DIR_VAR, aliases_dir) setup( cmdclass={ 'install': PostInstallCommand,",
"os import sys from setuptools import setup from setuptools.command.install import install sys.path.insert(0, os.path.join(os.path.dirname(__file__),",
"setup from setuptools.command.install import install sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'src')) from alias import ALIASES_DIR_VAR #",
"<filename>setup.py import os import sys from setuptools import setup from setuptools.command.install import install",
"\"{value}\"') class PostInstallCommand(install): \"\"\"Post-installation for installation mode.\"\"\" def run(self): install.run(self) if ALIASES_DIR_VAR not",
"install.run(self) if ALIASES_DIR_VAR not in os.environ: aliases_dir = get_aliases_dir() set_env_var(ALIASES_DIR_VAR, aliases_dir) setup( cmdclass={",
"setuptools.command.install import install sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'src')) from alias import ALIASES_DIR_VAR # noqa: E402",
"import install sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'src')) from alias import ALIASES_DIR_VAR # noqa: E402 from",
"alias import get_aliases_dir # noqa: E402 def set_env_var(var, value): os.system(f'setx {var} \"{value}\"') class",
"noqa: E402 from alias import get_aliases_dir # noqa: E402 def set_env_var(var, value): os.system(f'setx",
"sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'src')) from alias import ALIASES_DIR_VAR # noqa: E402 from alias import",
"alias import ALIASES_DIR_VAR # noqa: E402 from alias import get_aliases_dir # noqa: E402",
"in os.environ: aliases_dir = get_aliases_dir() set_env_var(ALIASES_DIR_VAR, aliases_dir) setup( cmdclass={ 'install': PostInstallCommand, } )",
"install sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'src')) from alias import ALIASES_DIR_VAR # noqa: E402 from alias",
"ALIASES_DIR_VAR # noqa: E402 from alias import get_aliases_dir # noqa: E402 def set_env_var(var,"
] |
[
"1 ip_expected = \"172.16.58.3\" def mocked_requests_get(*args, **kwargs): mock_resp = MockResponse() if args[0] ==",
"patch from nose.tools import eq_ from buoy.client.network import ip class MockResponse: def __init__(self,",
"\"172.16.58.3\" def mocked_requests_get(*args, **kwargs): mock_resp = MockResponse() if args[0] == service_ok: mock_resp =",
"def mocked_requests_get(*args, **kwargs): mock_resp = MockResponse() if args[0] == service_ok: mock_resp = MockResponse(content=ip_expected,",
"mocked_requests_get eq_(ip_expected, ip.get_public_ip(services=self.services)) eq_(mock_method.call_count, max_attempts) @patch.object(ip, 'get') def test_get_public_ip_return_exception(self, mock_method): max_attempts = len(self.services)",
"= 1 ip_expected = \"172.16.58.3\" def mocked_requests_get(*args, **kwargs): mock_resp = MockResponse() if args[0]",
"len(self.services) def mocked_requests_get(*args, **kwargs): return MockResponse() mock_method.side_effect = mocked_requests_get self.assertRaises(ip.NoIPException, ip.get_public_ip, services=self.services) eq_(mock_method.call_count,",
"unittest from unittest.mock import patch from nose.tools import eq_ from buoy.client.network import ip",
"nose.tools import eq_ from buoy.client.network import ip class MockResponse: def __init__(self, **kwargs): self.content",
"== service_ok: mock_resp = MockResponse(content=ip_expected, status_code=200) return mock_resp mock_method.side_effect = mocked_requests_get eq_(ip_expected, ip.get_public_ip(services=self.services))",
"from nose.tools import eq_ from buoy.client.network import ip class MockResponse: def __init__(self, **kwargs):",
"ip_expected = \"172.16.58.3\" def mocked_requests_get(*args, **kwargs): mock_resp = MockResponse() if args[0] == service_ok:",
"'http://ip.42.pl/raw'] @patch.object(ip, 'get') def test_get_public_ip_return_ip_in_last_service(self, mock_method): service_ok = self.services[-1] max_attempts = len(self.services) ip_expected",
"= len(self.services) def mocked_requests_get(*args, **kwargs): return MockResponse() mock_method.side_effect = mocked_requests_get self.assertRaises(ip.NoIPException, ip.get_public_ip, services=self.services)",
"ip.get_public_ip(services=self.services)) eq_(mock_method.call_count, max_attempts) @patch.object(ip, 'get') def test_get_public_ip_return_exception(self, mock_method): max_attempts = len(self.services) def mocked_requests_get(*args,",
"eq_(ip_expected, ip.get_public_ip(services=self.services)) eq_(mock_method.call_count, max_attempts) @patch.object(ip, 'get') def test_get_public_ip_return_exception(self, mock_method): max_attempts = len(self.services) def",
"= MockResponse(content=ip_expected, status_code=200) return mock_resp mock_method.side_effect = mocked_requests_get eq_(ip_expected, ip.get_public_ip(services=self.services)) eq_(mock_method.call_count, max_attempts) @patch.object(ip,",
"mocked_requests_get eq_(ip_expected, ip.get_public_ip(services=self.services)) eq_(mock_method.call_count, max_attempts) @patch.object(ip, 'get') def test_get_public_ip_return_ip_in_first_service(self, mock_method): service_ok = self.services[0]",
"MockResponse(content=ip_expected, status_code=200) return mock_resp mock_method.side_effect = mocked_requests_get eq_(ip_expected, ip.get_public_ip(services=self.services)) eq_(mock_method.call_count, max_attempts) @patch.object(ip, 'get')",
"= self.services[0] max_attempts = 1 ip_expected = \"172.16.58.3\" def mocked_requests_get(*args, **kwargs): mock_resp =",
"= mocked_requests_get eq_(ip_expected, ip.get_public_ip(services=self.services)) eq_(mock_method.call_count, max_attempts) @patch.object(ip, 'get') def test_get_public_ip_return_ip_in_first_service(self, mock_method): service_ok =",
"setUp(self): self.services = ['http://icanhazip.com', 'http://ipv4bot.whatismyipaddress.com', 'https://api.ipify1.org', 'http://ip.42.pl/raw'] @patch.object(ip, 'get') def test_get_public_ip_return_ip_in_last_service(self, mock_method): service_ok",
"MockResponse: def __init__(self, **kwargs): self.content = str.encode(kwargs.pop('content', \"\")) self.status_code = kwargs.pop('status_code', 404) class",
"mock_resp mock_method.side_effect = mocked_requests_get eq_(ip_expected, ip.get_public_ip(services=self.services)) eq_(mock_method.call_count, max_attempts) @patch.object(ip, 'get') def test_get_public_ip_return_ip_in_first_service(self, mock_method):",
"@patch.object(ip, 'get') def test_get_public_ip_return_ip_in_first_service(self, mock_method): service_ok = self.services[0] max_attempts = 1 ip_expected =",
"max_attempts) @patch.object(ip, 'get') def test_get_public_ip_return_ip_in_first_service(self, mock_method): service_ok = self.services[0] max_attempts = 1 ip_expected",
"import ip class MockResponse: def __init__(self, **kwargs): self.content = str.encode(kwargs.pop('content', \"\")) self.status_code =",
"ip class MockResponse: def __init__(self, **kwargs): self.content = str.encode(kwargs.pop('content', \"\")) self.status_code = kwargs.pop('status_code',",
"MockResponse() mock_method.side_effect = mocked_requests_get self.assertRaises(ip.NoIPException, ip.get_public_ip, services=self.services) eq_(mock_method.call_count, max_attempts) if __name__ == '__main__':",
"mock_method): service_ok = self.services[0] max_attempts = 1 ip_expected = \"172.16.58.3\" def mocked_requests_get(*args, **kwargs):",
"mock_method.side_effect = mocked_requests_get eq_(ip_expected, ip.get_public_ip(services=self.services)) eq_(mock_method.call_count, max_attempts) @patch.object(ip, 'get') def test_get_public_ip_return_exception(self, mock_method): max_attempts",
"= len(self.services) ip_expected = \"172.16.58.3\" def mocked_requests_get(*args, **kwargs): mock_resp = MockResponse() if args[0]",
"eq_(mock_method.call_count, max_attempts) @patch.object(ip, 'get') def test_get_public_ip_return_exception(self, mock_method): max_attempts = len(self.services) def mocked_requests_get(*args, **kwargs):",
"self.status_code = kwargs.pop('status_code', 404) class TestPublicIP(unittest.TestCase): def setUp(self): self.services = ['http://icanhazip.com', 'http://ipv4bot.whatismyipaddress.com', 'https://api.ipify1.org',",
"= kwargs.pop('status_code', 404) class TestPublicIP(unittest.TestCase): def setUp(self): self.services = ['http://icanhazip.com', 'http://ipv4bot.whatismyipaddress.com', 'https://api.ipify1.org', 'http://ip.42.pl/raw']",
"'get') def test_get_public_ip_return_exception(self, mock_method): max_attempts = len(self.services) def mocked_requests_get(*args, **kwargs): return MockResponse() mock_method.side_effect",
"eq_ from buoy.client.network import ip class MockResponse: def __init__(self, **kwargs): self.content = str.encode(kwargs.pop('content',",
"\"\")) self.status_code = kwargs.pop('status_code', 404) class TestPublicIP(unittest.TestCase): def setUp(self): self.services = ['http://icanhazip.com', 'http://ipv4bot.whatismyipaddress.com',",
"'http://ipv4bot.whatismyipaddress.com', 'https://api.ipify1.org', 'http://ip.42.pl/raw'] @patch.object(ip, 'get') def test_get_public_ip_return_ip_in_last_service(self, mock_method): service_ok = self.services[-1] max_attempts =",
"eq_(mock_method.call_count, max_attempts) @patch.object(ip, 'get') def test_get_public_ip_return_ip_in_first_service(self, mock_method): service_ok = self.services[0] max_attempts = 1",
"str.encode(kwargs.pop('content', \"\")) self.status_code = kwargs.pop('status_code', 404) class TestPublicIP(unittest.TestCase): def setUp(self): self.services = ['http://icanhazip.com',",
"'get') def test_get_public_ip_return_ip_in_last_service(self, mock_method): service_ok = self.services[-1] max_attempts = len(self.services) ip_expected = \"172.16.58.3\"",
"**kwargs): return MockResponse() mock_method.side_effect = mocked_requests_get self.assertRaises(ip.NoIPException, ip.get_public_ip, services=self.services) eq_(mock_method.call_count, max_attempts) if __name__",
"def setUp(self): self.services = ['http://icanhazip.com', 'http://ipv4bot.whatismyipaddress.com', 'https://api.ipify1.org', 'http://ip.42.pl/raw'] @patch.object(ip, 'get') def test_get_public_ip_return_ip_in_last_service(self, mock_method):",
"from buoy.client.network import ip class MockResponse: def __init__(self, **kwargs): self.content = str.encode(kwargs.pop('content', \"\"))",
"def __init__(self, **kwargs): self.content = str.encode(kwargs.pop('content', \"\")) self.status_code = kwargs.pop('status_code', 404) class TestPublicIP(unittest.TestCase):",
"status_code=200) return mock_resp mock_method.side_effect = mocked_requests_get eq_(ip_expected, ip.get_public_ip(services=self.services)) eq_(mock_method.call_count, max_attempts) @patch.object(ip, 'get') def",
"import eq_ from buoy.client.network import ip class MockResponse: def __init__(self, **kwargs): self.content =",
"mock_resp = MockResponse() if args[0] == service_ok: mock_resp = MockResponse(content=ip_expected, status_code=200) return mock_resp",
"ip.get_public_ip(services=self.services)) eq_(mock_method.call_count, max_attempts) @patch.object(ip, 'get') def test_get_public_ip_return_ip_in_first_service(self, mock_method): service_ok = self.services[0] max_attempts =",
"class TestPublicIP(unittest.TestCase): def setUp(self): self.services = ['http://icanhazip.com', 'http://ipv4bot.whatismyipaddress.com', 'https://api.ipify1.org', 'http://ip.42.pl/raw'] @patch.object(ip, 'get') def",
"self.services[-1] max_attempts = len(self.services) ip_expected = \"172.16.58.3\" def mocked_requests_get(*args, **kwargs): mock_resp = MockResponse()",
"from unittest.mock import patch from nose.tools import eq_ from buoy.client.network import ip class",
"self.services[0] max_attempts = 1 ip_expected = \"172.16.58.3\" def mocked_requests_get(*args, **kwargs): mock_resp = MockResponse()",
"class MockResponse: def __init__(self, **kwargs): self.content = str.encode(kwargs.pop('content', \"\")) self.status_code = kwargs.pop('status_code', 404)",
"test_get_public_ip_return_exception(self, mock_method): max_attempts = len(self.services) def mocked_requests_get(*args, **kwargs): return MockResponse() mock_method.side_effect = mocked_requests_get",
"= MockResponse() if args[0] == service_ok: mock_resp = MockResponse(content=ip_expected, status_code=200) return mock_resp mock_method.side_effect",
"= mocked_requests_get eq_(ip_expected, ip.get_public_ip(services=self.services)) eq_(mock_method.call_count, max_attempts) @patch.object(ip, 'get') def test_get_public_ip_return_exception(self, mock_method): max_attempts =",
"mock_method): service_ok = self.services[-1] max_attempts = len(self.services) ip_expected = \"172.16.58.3\" def mocked_requests_get(*args, **kwargs):",
"TestPublicIP(unittest.TestCase): def setUp(self): self.services = ['http://icanhazip.com', 'http://ipv4bot.whatismyipaddress.com', 'https://api.ipify1.org', 'http://ip.42.pl/raw'] @patch.object(ip, 'get') def test_get_public_ip_return_ip_in_last_service(self,",
"unittest.mock import patch from nose.tools import eq_ from buoy.client.network import ip class MockResponse:",
"test_get_public_ip_return_ip_in_last_service(self, mock_method): service_ok = self.services[-1] max_attempts = len(self.services) ip_expected = \"172.16.58.3\" def mocked_requests_get(*args,",
"service_ok = self.services[-1] max_attempts = len(self.services) ip_expected = \"172.16.58.3\" def mocked_requests_get(*args, **kwargs): mock_resp",
"mocked_requests_get(*args, **kwargs): return MockResponse() mock_method.side_effect = mocked_requests_get self.assertRaises(ip.NoIPException, ip.get_public_ip, services=self.services) eq_(mock_method.call_count, max_attempts) if",
"mocked_requests_get(*args, **kwargs): mock_resp = MockResponse() if args[0] == service_ok: mock_resp = MockResponse(content=ip_expected, status_code=200)",
"@patch.object(ip, 'get') def test_get_public_ip_return_ip_in_last_service(self, mock_method): service_ok = self.services[-1] max_attempts = len(self.services) ip_expected =",
"max_attempts) @patch.object(ip, 'get') def test_get_public_ip_return_exception(self, mock_method): max_attempts = len(self.services) def mocked_requests_get(*args, **kwargs): return",
"max_attempts = 1 ip_expected = \"172.16.58.3\" def mocked_requests_get(*args, **kwargs): mock_resp = MockResponse() if",
"mock_method): max_attempts = len(self.services) def mocked_requests_get(*args, **kwargs): return MockResponse() mock_method.side_effect = mocked_requests_get self.assertRaises(ip.NoIPException,",
"max_attempts = len(self.services) ip_expected = \"172.16.58.3\" def mocked_requests_get(*args, **kwargs): mock_resp = MockResponse() if",
"mock_resp = MockResponse(content=ip_expected, status_code=200) return mock_resp mock_method.side_effect = mocked_requests_get eq_(ip_expected, ip.get_public_ip(services=self.services)) eq_(mock_method.call_count, max_attempts)",
"self.services = ['http://icanhazip.com', 'http://ipv4bot.whatismyipaddress.com', 'https://api.ipify1.org', 'http://ip.42.pl/raw'] @patch.object(ip, 'get') def test_get_public_ip_return_ip_in_last_service(self, mock_method): service_ok =",
"mock_method.side_effect = mocked_requests_get eq_(ip_expected, ip.get_public_ip(services=self.services)) eq_(mock_method.call_count, max_attempts) @patch.object(ip, 'get') def test_get_public_ip_return_ip_in_first_service(self, mock_method): service_ok",
"if args[0] == service_ok: mock_resp = MockResponse(content=ip_expected, status_code=200) return mock_resp mock_method.side_effect = mocked_requests_get",
"**kwargs): self.content = str.encode(kwargs.pop('content', \"\")) self.status_code = kwargs.pop('status_code', 404) class TestPublicIP(unittest.TestCase): def setUp(self):",
"return MockResponse() mock_method.side_effect = mocked_requests_get self.assertRaises(ip.NoIPException, ip.get_public_ip, services=self.services) eq_(mock_method.call_count, max_attempts) if __name__ ==",
"= ['http://icanhazip.com', 'http://ipv4bot.whatismyipaddress.com', 'https://api.ipify1.org', 'http://ip.42.pl/raw'] @patch.object(ip, 'get') def test_get_public_ip_return_ip_in_last_service(self, mock_method): service_ok = self.services[-1]",
"def test_get_public_ip_return_ip_in_last_service(self, mock_method): service_ok = self.services[-1] max_attempts = len(self.services) ip_expected = \"172.16.58.3\" def",
"def test_get_public_ip_return_exception(self, mock_method): max_attempts = len(self.services) def mocked_requests_get(*args, **kwargs): return MockResponse() mock_method.side_effect =",
"['http://icanhazip.com', 'http://ipv4bot.whatismyipaddress.com', 'https://api.ipify1.org', 'http://ip.42.pl/raw'] @patch.object(ip, 'get') def test_get_public_ip_return_ip_in_last_service(self, mock_method): service_ok = self.services[-1] max_attempts",
"= \"172.16.58.3\" def mocked_requests_get(*args, **kwargs): mock_resp = MockResponse() if args[0] == service_ok: mock_resp",
"return mock_resp mock_method.side_effect = mocked_requests_get eq_(ip_expected, ip.get_public_ip(services=self.services)) eq_(mock_method.call_count, max_attempts) @patch.object(ip, 'get') def test_get_public_ip_return_exception(self,",
"__init__(self, **kwargs): self.content = str.encode(kwargs.pop('content', \"\")) self.status_code = kwargs.pop('status_code', 404) class TestPublicIP(unittest.TestCase): def",
"return mock_resp mock_method.side_effect = mocked_requests_get eq_(ip_expected, ip.get_public_ip(services=self.services)) eq_(mock_method.call_count, max_attempts) @patch.object(ip, 'get') def test_get_public_ip_return_ip_in_first_service(self,",
"import unittest from unittest.mock import patch from nose.tools import eq_ from buoy.client.network import",
"MockResponse() if args[0] == service_ok: mock_resp = MockResponse(content=ip_expected, status_code=200) return mock_resp mock_method.side_effect =",
"mock_resp mock_method.side_effect = mocked_requests_get eq_(ip_expected, ip.get_public_ip(services=self.services)) eq_(mock_method.call_count, max_attempts) @patch.object(ip, 'get') def test_get_public_ip_return_exception(self, mock_method):",
"def test_get_public_ip_return_ip_in_first_service(self, mock_method): service_ok = self.services[0] max_attempts = 1 ip_expected = \"172.16.58.3\" def",
"service_ok: mock_resp = MockResponse(content=ip_expected, status_code=200) return mock_resp mock_method.side_effect = mocked_requests_get eq_(ip_expected, ip.get_public_ip(services=self.services)) eq_(mock_method.call_count,",
"self.content = str.encode(kwargs.pop('content', \"\")) self.status_code = kwargs.pop('status_code', 404) class TestPublicIP(unittest.TestCase): def setUp(self): self.services",
"mock_method.side_effect = mocked_requests_get self.assertRaises(ip.NoIPException, ip.get_public_ip, services=self.services) eq_(mock_method.call_count, max_attempts) if __name__ == '__main__': unittest.main()",
"import patch from nose.tools import eq_ from buoy.client.network import ip class MockResponse: def",
"404) class TestPublicIP(unittest.TestCase): def setUp(self): self.services = ['http://icanhazip.com', 'http://ipv4bot.whatismyipaddress.com', 'https://api.ipify1.org', 'http://ip.42.pl/raw'] @patch.object(ip, 'get')",
"'https://api.ipify1.org', 'http://ip.42.pl/raw'] @patch.object(ip, 'get') def test_get_public_ip_return_ip_in_last_service(self, mock_method): service_ok = self.services[-1] max_attempts = len(self.services)",
"buoy.client.network import ip class MockResponse: def __init__(self, **kwargs): self.content = str.encode(kwargs.pop('content', \"\")) self.status_code",
"def mocked_requests_get(*args, **kwargs): return MockResponse() mock_method.side_effect = mocked_requests_get self.assertRaises(ip.NoIPException, ip.get_public_ip, services=self.services) eq_(mock_method.call_count, max_attempts)",
"@patch.object(ip, 'get') def test_get_public_ip_return_exception(self, mock_method): max_attempts = len(self.services) def mocked_requests_get(*args, **kwargs): return MockResponse()",
"**kwargs): mock_resp = MockResponse() if args[0] == service_ok: mock_resp = MockResponse(content=ip_expected, status_code=200) return",
"'get') def test_get_public_ip_return_ip_in_first_service(self, mock_method): service_ok = self.services[0] max_attempts = 1 ip_expected = \"172.16.58.3\"",
"max_attempts = len(self.services) def mocked_requests_get(*args, **kwargs): return MockResponse() mock_method.side_effect = mocked_requests_get self.assertRaises(ip.NoIPException, ip.get_public_ip,",
"= self.services[-1] max_attempts = len(self.services) ip_expected = \"172.16.58.3\" def mocked_requests_get(*args, **kwargs): mock_resp =",
"len(self.services) ip_expected = \"172.16.58.3\" def mocked_requests_get(*args, **kwargs): mock_resp = MockResponse() if args[0] ==",
"test_get_public_ip_return_ip_in_first_service(self, mock_method): service_ok = self.services[0] max_attempts = 1 ip_expected = \"172.16.58.3\" def mocked_requests_get(*args,",
"args[0] == service_ok: mock_resp = MockResponse(content=ip_expected, status_code=200) return mock_resp mock_method.side_effect = mocked_requests_get eq_(ip_expected,",
"service_ok = self.services[0] max_attempts = 1 ip_expected = \"172.16.58.3\" def mocked_requests_get(*args, **kwargs): mock_resp",
"kwargs.pop('status_code', 404) class TestPublicIP(unittest.TestCase): def setUp(self): self.services = ['http://icanhazip.com', 'http://ipv4bot.whatismyipaddress.com', 'https://api.ipify1.org', 'http://ip.42.pl/raw'] @patch.object(ip,",
"= str.encode(kwargs.pop('content', \"\")) self.status_code = kwargs.pop('status_code', 404) class TestPublicIP(unittest.TestCase): def setUp(self): self.services =",
"eq_(ip_expected, ip.get_public_ip(services=self.services)) eq_(mock_method.call_count, max_attempts) @patch.object(ip, 'get') def test_get_public_ip_return_ip_in_first_service(self, mock_method): service_ok = self.services[0] max_attempts"
] |
[
"= largeScale) initialPos = (initialPos[0], initialPos[1] + gap) frame = self._addValues(inputValues, frame, pos",
"float(v)) frame = self.__addText(frame, text, position, c, largeScale) position = (position[0], position[1]+(70 if",
"color = (255, 255, 255) cv2.polylines(frame, [pPts], True, color, 2, cv2.LINE_AA) if len(pPts)",
"dir = Paths.MergedVideosFolder + ('%s%s' % (id, Paths.sep)) if not os.path.isdir(dir): os.makedirs(dir, exist_ok",
"= estimator for frame in streamer: landmarks = estimator.detectFacialLandmarks(frame) k = self.showFrameWithAllInputs(frame, landmarks",
"= self.__addText(frame, text, position, c, largeScale) position = (position[0], position[1]+(70 if largeScale else",
"def _addValues(self, inputValues, frame, pos = (20, 60), prefix = '', largeScale =",
"int(h/2)), (0,0,0), 5) cv2.line(frame, (int(w/2), 0), (int(w/2), h), (0,0,0), 5) if not pose",
"= self._addValues(inputValues, frame, pos = initialPos, prefix = 'Gaze', largeScale = largeScale) return",
"return frame def _addValues(self, inputValues, frame, pos = (20, 60), prefix = '',",
"not i in [39, 42]: # continue cv2.circle(frame, (x, y), 6, c, -1,",
"int(xRange*width), int(yRange*height) else: x, y = outputValues[:2].astype(int) cv2.circle(frame, (x, y), 1, (0, 0,",
"return frame def addPointer(self, frame, outputValues): #boundaries = self._mappingFunc.getOutputBoundaries() outputSize = (1920, 1080)",
"c = (255, 0, 0)): for i, (x, y) in enumerate(landmarks): #if not",
"= self.showFrameWithAllInputs(frame, pPts, landmarks, headGaze, pose) if not k: break return def __initializeRecorder(self,",
"720)): fourcc = cv2.VideoWriter_fourcc(*'MP42') dir = Paths.MergedVideosFolder + ('%s%s' % (id, Paths.sep)) if",
"k: break return def __initializeRecorder(self, id, trailName, fps = 30, dims = (1280,",
"is None: frame = self._addMeasurements(outputValues, pose, frame) return self.showFrame(frame, delay) def playSubjectVideoWithLandmarks(self, estimator,",
"def __init__(self, sceneScale = 1, landmarkColorStr = '#00ff00', screenColorStr = '#0000ff'): super() self._size",
"((20, 30), 90) frame = self._addValues(pose[:3], frame, pos = initialPos, prefix = 'Pos',",
"largeScale) position = (position[0], position[1]+(70 if largeScale else 30)) return frame def _addValues(self,",
"(id, now) print(dir + recordName, 'will be written') print(dims) return cv2.VideoWriter(dir + recordName,",
"= [prefix+l for l in ['X', 'Y', 'Z']] g = 0 if largeScale",
"in jointStreamer: k = self.showFrameWithAllInputs(frame, pPts, landmarks, headGaze, pose) if not k: break",
"#if not i in [39, 42]: # continue cv2.circle(frame, (x, y), 6, c,",
"self.addPointer(frame, outputValues.astype(int)) return frame def showFrame(self, frame, delay = 1): cv2.imshow('frame', frame) k",
"outputValues[0] boundaries = Boundary(0, outputSize[0], 0, outputSize[1]) (height, width, depth) = frame.shape (xRange,",
"(0, 0, 235), 56, cv2.LINE_AA) return frame def addAllInputs(self, frame, pPts = None,",
"written') print(dims) return cv2.VideoWriter(dir + recordName, fourcc, fps, dims) def recordSubjectVideoWithPostdata(self, postData, id,",
"start, end in [(1,6), (2, 7), (3, 8)]: p = (tuple(pPts[start]), tuple(pPts[end])) _pPts.append(p)",
"= '#0000ff'): super() self._size = (1280, 720) def addBox(self, frame, pPts): color =",
"h), (0,0,0), 5) if not pose is None: frame = self._addMeasurements(outputValues, pose, frame)",
"= self.addLandmarks(frame, landmarks.astype(int)) if not pPts is None: frame = self.addBox(frame, pPts.astype(int)) if",
"v, l, c in zip(values, labels, colors): text = \"{:s}: {:7.2f}\".format(l, float(v)) frame",
"largeScale = True): initialPos, gap = ((20, 60), 200) if largeScale else ((20,",
"frame) k = cv2.waitKey(delay) if k == 27 or k == ord('q'): return",
"dims) def recordSubjectVideoWithPostdata(self, postData, id, trailName, streamer): recorder = self.__initializeRecorder(id, trailName, dims =",
"def showFrame(self, frame, delay = 1): cv2.imshow('frame', frame) k = cv2.waitKey(delay) if k",
"os.path.isdir(dir): os.makedirs(dir, exist_ok = True) now = str(datetime.now())[:-7].replace(':', '-').replace(' ', '_') recordName =",
"cv2.line(frame, (int(w/2), 0), (int(w/2), h), (0,0,0), 5) if not pose is None: frame",
"cv2.imshow('frame', frame) k = cv2.waitKey(delay) if k == 27 or k == ord('q'):",
"x, y = int(xRange*width), int(yRange*height) else: x, y = outputValues[:2].astype(int) cv2.circle(frame, (x, y),",
"pPts, frame in jointStreamer: frame = self.addAllInputs(frame, pPts, landmarks, outputValues) k = self.showFrame(frame)",
"frame = self.addAllInputs(frame, pPts, landmarks, outputValues) h, w, _ = frame.shape cv2.line(frame, (0,",
"frame def addPointer(self, frame, outputValues): #boundaries = self._mappingFunc.getOutputBoundaries() outputSize = (1920, 1080) outputValues[0]",
"int(h/2)), (w, int(h/2)), (0,0,0), 5) cv2.line(frame, (int(w/2), 0), (int(w/2), h), (0,0,0), 5) if",
"0)) return self._addValuesLineByLine(frame, inputValues, labels, pos, colors, largeScale) def _addMeasurements(self, inputValues, pose, frame,",
"yRange != height: xRange, yRange = boundaries.getVolumeAbsRatio(outputValues) x, y = int(xRange*width), int(yRange*height) else:",
"60), 200) if largeScale else ((20, 30), 90) frame = self._addValues(pose[:3], frame, pos",
"[(1,6), (2, 7), (3, 8)]: p = (tuple(pPts[start]), tuple(pPts[end])) _pPts.append(p) for start, end",
"= self.addAllInputs(frame, pPts, landmarks, outputValues) k = self.showFrame(frame) if not k: recorder.release() break",
"return frame def addLandmarks(self, frame, landmarks, c = (255, 0, 0)): for i,",
"frame, landmarks, c = (255, 0, 0)): for i, (x, y) in enumerate(landmarks):",
"break return def playSubjectVideoWithAllInputs(self, estimator, streamer): self._estimator = estimator for frame in streamer:",
"color, thickness=8) else: cv2.putText(frame, text, pos, cv2.FONT_HERSHEY_SIMPLEX, 0.8, color, thickness=1) return frame def",
"= self._mappingFunc.getOutputBoundaries() outputSize = (1920, 1080) outputValues[0] = outputSize[0] - outputValues[0] boundaries =",
"(w, int(h/2)), (0,0,0), 5) cv2.line(frame, (int(w/2), 0), (int(w/2), h), (0,0,0), 5) if not",
"(0, int(h/2)), (w, int(h/2)), (0,0,0), 5) cv2.line(frame, (int(w/2), 0), (int(w/2), h), (0,0,0), 5)",
"pose = estimator.getHeadPose() inputValues, pPts, landmarks = annotations k = self.showFrameWithAllInputs(frame, pPts, landmarks,",
"largeScale) initialPos = (initialPos[0], initialPos[1] + gap) frame = self._addValues(pose[3:], frame, pos =initialPos,",
"200) if largeScale else ((20, 30), 90) frame = self._addValues(pose[:3], frame, pos =",
"boundaries.getVolumeAbsRatio(outputValues) x, y = int(xRange*width), int(yRange*height) else: x, y = outputValues[:2].astype(int) cv2.circle(frame, (x,",
"yRange, _) = boundaries.getRanges() if xRange != width or yRange != height: xRange,",
"'Y', 'Z']] g = 0 if largeScale else 200 colors = ((0, 0,",
"initialPos, prefix = 'Pos', largeScale = largeScale) initialPos = (initialPos[0], initialPos[1] + gap)",
"position, c, largeScale) position = (position[0], position[1]+(70 if largeScale else 30)) return frame",
"xRange != width or yRange != height: xRange, yRange = boundaries.getVolumeAbsRatio(outputValues) x, y",
"cv2.line(frame, start, end, color, 2, cv2.LINE_AA) return frame def addLandmarks(self, frame, landmarks, c",
"= cv2.waitKey(delay) if k == 27 or k == ord('q'): return False else:",
"# continue cv2.circle(frame, (x, y), 6, c, -1, cv2.LINE_AA) return frame def addPointer(self,",
"= 0 if largeScale else 200 colors = ((0, 0, 255), (0, 255,",
"text, pos, cv2.FONT_HERSHEY_SIMPLEX, 2, color, thickness=8) else: cv2.putText(frame, text, pos, cv2.FONT_HERSHEY_SIMPLEX, 0.8, color,",
"def __initializeRecorder(self, id, trailName, fps = 30, dims = (1280, 720)): fourcc =",
"largeScale else 30)) return frame def _addValues(self, inputValues, frame, pos = (20, 60),",
"= frame.shape (xRange, yRange, _) = boundaries.getRanges() if xRange != width or yRange",
"delay = 1): frame = self.addAllInputs(frame, pPts, landmarks, outputValues) h, w, _ =",
"def _addValuesLineByLine(self, frame, values, labels, position, colors, largeScale = True): for v, l,",
"frame, largeScale = True): initialPos, gap = ((20, 60), 200) if largeScale else",
"['X', 'Y', 'Z']] g = 0 if largeScale else 200 colors = ((0,",
"= 'Pos', largeScale = largeScale) initialPos = (initialPos[0], initialPos[1] + gap) frame =",
"in jointStreamer: frame = self.addAllInputs(frame, landmarks = landmarks) k = self.showFrame(frame) if not",
"labels = [prefix+l for l in ['X', 'Y', 'Z']] g = 0 if",
"27 or k == ord('q'): return False else: return True def __addText(self, frame,",
"x, y = outputValues[:2].astype(int) cv2.circle(frame, (x, y), 1, (0, 0, 235), 56, cv2.LINE_AA)",
"\\ estimator.estimateInputValuesWithAnnotations(frame) pose = estimator.getHeadPose() inputValues, pPts, landmarks = annotations k = self.showFrameWithAllInputs(frame,",
"landmarks is None: frame = self.addLandmarks(frame, landmarks.astype(int)) if not pPts is None: frame",
"True): if largeScale: cv2.putText(frame, text, pos, cv2.FONT_HERSHEY_SIMPLEX, 2, color, thickness=8) else: cv2.putText(frame, text,",
"= None, outputValues = None): if not landmarks is None: frame = self.addLandmarks(frame,",
"color, largeScale = True): if largeScale: cv2.putText(frame, text, pos, cv2.FONT_HERSHEY_SIMPLEX, 2, color, thickness=8)",
"= ((20, 60), 200) if largeScale else ((20, 30), 90) frame = self._addValues(pose[:3],",
"frame in streamer: annotations = \\ estimator.estimateInputValuesWithAnnotations(frame) pose = estimator.getHeadPose() inputValues, pPts, landmarks",
"'#00ff00', screenColorStr = '#0000ff'): super() self._size = (1280, 720) def addBox(self, frame, pPts):",
"(id, Paths.sep)) if not os.path.isdir(dir): os.makedirs(dir, exist_ok = True) now = str(datetime.now())[:-7].replace(':', '-').replace('",
"recordName, 'will be written') print(dims) return cv2.VideoWriter(dir + recordName, fourcc, fps, dims) def",
"frame, pPts): color = (255, 255, 255) cv2.polylines(frame, [pPts], True, color, 2, cv2.LINE_AA)",
"5) if not pose is None: frame = self._addMeasurements(outputValues, pose, frame) return self.showFrame(frame,",
"[prefix+l for l in ['X', 'Y', 'Z']] g = 0 if largeScale else",
"jointStreamer = zip(postData, streamer) for landmarks, frame in jointStreamer: frame = self.addAllInputs(frame, landmarks",
"1, landmarkColorStr = '#00ff00', screenColorStr = '#0000ff'): super() self._size = (1280, 720) def",
"(2, 7), (3, 8)]: p = (tuple(pPts[start]), tuple(pPts[end])) _pPts.append(p) for start, end in",
"gap = ((20, 60), 200) if largeScale else ((20, 30), 90) frame =",
"= \"{:s}: {:7.2f}\".format(l, float(v)) frame = self.__addText(frame, text, position, c, largeScale) position =",
"(int(w/2), 0), (int(w/2), h), (0,0,0), 5) if not pose is None: frame =",
"_ = frame.shape cv2.line(frame, (0, int(h/2)), (w, int(h/2)), (0,0,0), 5) cv2.line(frame, (int(w/2), 0),",
"cv2.putText(frame, text, pos, cv2.FONT_HERSHEY_SIMPLEX, 2, color, thickness=8) else: cv2.putText(frame, text, pos, cv2.FONT_HERSHEY_SIMPLEX, 0.8,",
"= 'Gaze', largeScale = largeScale) return frame def showFrameWithAllInputs(self, frame, pPts = None,",
"(x, y), 1, (0, 0, 235), 56, cv2.LINE_AA) return frame def addAllInputs(self, frame,",
"streamer: annotations = \\ estimator.estimateInputValuesWithAnnotations(frame) pose = estimator.getHeadPose() inputValues, pPts, landmarks = annotations",
"self._addValues(pose[:3], frame, pos = initialPos, prefix = 'Pos', largeScale = largeScale) initialPos =",
"if not landmarks is None: frame = self.addLandmarks(frame, landmarks.astype(int)) if not pPts is",
"= largeScale) return frame def showFrameWithAllInputs(self, frame, pPts = None, landmarks = None,",
"landmarks) if not k: break return def playSubjectVideoWithAllInputs(self, estimator, streamer): self._estimator = estimator",
"dims = (1280, 720)): fourcc = cv2.VideoWriter_fourcc(*'MP42') dir = Paths.MergedVideosFolder + ('%s%s' %",
"import datetime from ... import Paths import numpy as np import cv2 import",
"if not k: break return def playSubjectVideoWithAllInputs(self, estimator, streamer): self._estimator = estimator for",
"jointStreamer: k = self.showFrameWithAllInputs(frame, landmarks = landmarks) if not k: break return else:",
"pose) if not k: break return def __initializeRecorder(self, id, trailName, fps = 30,",
"cv2.polylines(frame, [pPts], True, color, 2, cv2.LINE_AA) if len(pPts) > 4: _pPts = []",
"jointStreamer = zip(postData, streamer) for landmarks, frame in jointStreamer: k = self.showFrameWithAllInputs(frame, landmarks",
"k = self.showFrameWithAllInputs(frame, pPts, landmarks, inputValues, pose) if not k: break return def",
"frame def showFrameWithAllInputs(self, frame, pPts = None, landmarks = None, outputValues = None,",
"k = self.showFrameWithAllInputs(frame, pPts, landmarks, headGaze, pose) if not k: break return def",
"% (id, Paths.sep)) if not os.path.isdir(dir): os.makedirs(dir, exist_ok = True) now = str(datetime.now())[:-7].replace(':',",
"fps, dims) def recordSubjectVideoWithPostdata(self, postData, id, trailName, streamer): recorder = self.__initializeRecorder(id, trailName, dims",
"annotations = \\ estimator.estimateInputValuesWithAnnotations(frame) pose = estimator.getHeadPose() inputValues, pPts, landmarks = annotations k",
"recorder = self.__initializeRecorder(id, trailName, dims = (1280, 720)) print(self._size) if not isinstance(postData, tuple):",
"(20, 60), prefix = '', largeScale = True): labels = [prefix+l for l",
"% (id, now) print(dir + recordName, 'will be written') print(dims) return cv2.VideoWriter(dir +",
"sceneScale = 1, landmarkColorStr = '#00ff00', screenColorStr = '#0000ff'): super() self._size = (1280,",
"(x, y) in enumerate(landmarks): #if not i in [39, 42]: # continue cv2.circle(frame,",
"for i, (x, y) in enumerate(landmarks): #if not i in [39, 42]: #",
"else: jointStreamer = zip(*(postData + (streamer,))) for headGaze, pose, landmarks, pPts, frame in",
"h, w, _ = frame.shape cv2.line(frame, (0, int(h/2)), (w, int(h/2)), (0,0,0), 5) cv2.line(frame,",
"largeScale) def _addMeasurements(self, inputValues, pose, frame, largeScale = True): initialPos, gap = ((20,",
"for frame in streamer: landmarks = estimator.detectFacialLandmarks(frame) k = self.showFrameWithAllInputs(frame, landmarks = landmarks)",
"True): initialPos, gap = ((20, 60), 200) if largeScale else ((20, 30), 90)",
"= zip(*(postData + (streamer,))) for headGaze, pose, landmarks, pPts, frame in jointStreamer: k",
"frame, text, pos, color, largeScale = True): if largeScale: cv2.putText(frame, text, pos, cv2.FONT_HERSHEY_SIMPLEX,",
"(1280, 720)) print(self._size) if not isinstance(postData, tuple): jointStreamer = zip(postData, streamer) for landmarks,",
"', '_') recordName = trailName + '_%s_%s_WithAllInput.avi' % (id, now) print(dir + recordName,",
"if largeScale: cv2.putText(frame, text, pos, cv2.FONT_HERSHEY_SIMPLEX, 2, color, thickness=8) else: cv2.putText(frame, text, pos,",
"jointStreamer: frame = self.addAllInputs(frame, landmarks = landmarks) k = self.showFrame(frame) if not k:",
"int(yRange*height) else: x, y = outputValues[:2].astype(int) cv2.circle(frame, (x, y), 1, (0, 0, 235),",
"self._estimator = estimator for frame in streamer: landmarks = estimator.detectFacialLandmarks(frame) k = self.showFrameWithAllInputs(frame,",
"30)) return frame def _addValues(self, inputValues, frame, pos = (20, 60), prefix =",
"not isinstance(postData, tuple): jointStreamer = zip(postData, streamer) for landmarks, frame in jointStreamer: frame",
"y = int(xRange*width), int(yRange*height) else: x, y = outputValues[:2].astype(int) cv2.circle(frame, (x, y), 1,",
"= \\ estimator.estimateInputValuesWithAnnotations(frame) pose = estimator.getHeadPose() inputValues, pPts, landmarks = annotations k =",
"initialPos[1] + gap) frame = self._addValues(inputValues, frame, pos = initialPos, prefix = 'Gaze',",
"0.8, color, thickness=1) return frame def _addValuesLineByLine(self, frame, values, labels, position, colors, largeScale",
"= True): labels = [prefix+l for l in ['X', 'Y', 'Z']] g =",
"estimator.detectFacialLandmarks(frame) k = self.showFrameWithAllInputs(frame, landmarks = landmarks) if not k: break return def",
"pose, frame) return self.showFrame(frame, delay) def playSubjectVideoWithLandmarks(self, estimator, streamer): self._estimator = estimator for",
"in zip(values, labels, colors): text = \"{:s}: {:7.2f}\".format(l, float(v)) frame = self.__addText(frame, text,",
"zip(postData, streamer) for landmarks, frame in jointStreamer: k = self.showFrameWithAllInputs(frame, landmarks = landmarks)",
"None, landmarks = None, outputValues = None, pose = None, delay = 1):",
"= True): for v, l, c in zip(values, labels, colors): text = \"{:s}:",
"_addMeasurements(self, inputValues, pose, frame, largeScale = True): initialPos, gap = ((20, 60), 200)",
"annotations k = self.showFrameWithAllInputs(frame, pPts, landmarks, inputValues, pose) if not k: break return",
"postData, id, trailName, streamer): recorder = self.__initializeRecorder(id, trailName, dims = (1280, 720)) print(self._size)",
"in ['X', 'Y', 'Z']] g = 0 if largeScale else 200 colors =",
"continue cv2.circle(frame, (x, y), 6, c, -1, cv2.LINE_AA) return frame def addPointer(self, frame,",
"position = (position[0], position[1]+(70 if largeScale else 30)) return frame def _addValues(self, inputValues,",
"(1280, 720) def addBox(self, frame, pPts): color = (255, 255, 255) cv2.polylines(frame, [pPts],",
"_pPts: cv2.line(frame, start, end, color, 2, cv2.LINE_AA) return frame def addLandmarks(self, frame, landmarks,",
"= (1920, 1080) outputValues[0] = outputSize[0] - outputValues[0] boundaries = Boundary(0, outputSize[0], 0,",
"_addValues(self, inputValues, frame, pos = (20, 60), prefix = '', largeScale = True):",
"self._addValues(pose[3:], frame, pos =initialPos, prefix = 'Or', largeScale = largeScale) initialPos = (initialPos[0],",
"= initialPos, prefix = 'Pos', largeScale = largeScale) initialPos = (initialPos[0], initialPos[1] +",
"inputValues, pPts, landmarks = annotations k = self.showFrameWithAllInputs(frame, pPts, landmarks, inputValues, pose) if",
"_pPts.append(p) for start, end in _pPts: cv2.line(frame, start, end, color, 2, cv2.LINE_AA) return",
"if not k: recorder.release() break recorder.write(frame.astype(np.uint8)) else: jointStreamer = zip(*(postData + (streamer,))) for",
"import numpy as np import cv2 import os class InputEstimationVisualizer(object): def __init__(self, sceneScale",
"+ (streamer,))) for headGaze, pose, landmarks, pPts, frame in jointStreamer: frame = self.addAllInputs(frame,",
"pose, landmarks, pPts, frame in jointStreamer: k = self.showFrameWithAllInputs(frame, pPts, landmarks, headGaze, pose)",
"<reponame>muratcancicek/pointer_head from .MappingFunctions import Boundary, StaticMapping, DynamicMapping from datetime import datetime from ...",
"print(dir + recordName, 'will be written') print(dims) return cv2.VideoWriter(dir + recordName, fourcc, fps,",
"colors = ((0, 0, 255), (0, 255, 0), (255, g, 0)) return self._addValuesLineByLine(frame,",
"k == 27 or k == ord('q'): return False else: return True def",
"self.addLandmarks(frame, landmarks.astype(int)) if not pPts is None: frame = self.addBox(frame, pPts.astype(int)) if not",
"playSubjectVideoWithLandmarks(self, estimator, streamer): self._estimator = estimator for frame in streamer: landmarks = estimator.detectFacialLandmarks(frame)",
"frame def addLandmarks(self, frame, landmarks, c = (255, 0, 0)): for i, (x,",
"1): frame = self.addAllInputs(frame, pPts, landmarks, outputValues) h, w, _ = frame.shape cv2.line(frame,",
"frame) return self.showFrame(frame, delay) def playSubjectVideoWithLandmarks(self, estimator, streamer): self._estimator = estimator for frame",
"= estimator for frame in streamer: annotations = \\ estimator.estimateInputValuesWithAnnotations(frame) pose = estimator.getHeadPose()",
"cv2.FONT_HERSHEY_SIMPLEX, 2, color, thickness=8) else: cv2.putText(frame, text, pos, cv2.FONT_HERSHEY_SIMPLEX, 0.8, color, thickness=1) return",
"cv2.line(frame, (0, int(h/2)), (w, int(h/2)), (0,0,0), 5) cv2.line(frame, (int(w/2), 0), (int(w/2), h), (0,0,0),",
"0, outputSize[1]) (height, width, depth) = frame.shape (xRange, yRange, _) = boundaries.getRanges() if",
"landmarks, headGaze, pose) if not k: break return def __initializeRecorder(self, id, trailName, fps",
"frame = self.addLandmarks(frame, landmarks.astype(int)) if not pPts is None: frame = self.addBox(frame, pPts.astype(int))",
"'', largeScale = True): labels = [prefix+l for l in ['X', 'Y', 'Z']]",
"frame in jointStreamer: k = self.showFrameWithAllInputs(frame, pPts, landmarks, headGaze, pose) if not k:",
"end, color, 2, cv2.LINE_AA) return frame def addLandmarks(self, frame, landmarks, c = (255,",
"recorder.write(frame.astype(np.uint8)) else: jointStreamer = zip(*(postData + (streamer,))) for headGaze, pose, landmarks, pPts, frame",
"self.showFrameWithAllInputs(frame, pPts, landmarks, inputValues, pose) if not k: break return def replaySubjectVideoWithPostData(self, postData,",
"frame, delay = 1): cv2.imshow('frame', frame) k = cv2.waitKey(delay) if k == 27",
"color, 2, cv2.LINE_AA) if len(pPts) > 4: _pPts = [] for start, end",
"return cv2.VideoWriter(dir + recordName, fourcc, fps, dims) def recordSubjectVideoWithPostdata(self, postData, id, trailName, streamer):",
"!= width or yRange != height: xRange, yRange = boundaries.getVolumeAbsRatio(outputValues) x, y =",
"largeScale = True): labels = [prefix+l for l in ['X', 'Y', 'Z']] g",
"streamer): self._estimator = estimator for frame in streamer: landmarks = estimator.detectFacialLandmarks(frame) k =",
"k = cv2.waitKey(delay) if k == 27 or k == ord('q'): return False",
"= self.addAllInputs(frame, landmarks = landmarks) k = self.showFrame(frame) if not k: recorder.release() break",
"2, cv2.LINE_AA) if len(pPts) > 4: _pPts = [] for start, end in",
"outputSize[0] - outputValues[0] boundaries = Boundary(0, outputSize[0], 0, outputSize[1]) (height, width, depth) =",
"pPts): color = (255, 255, 255) cv2.polylines(frame, [pPts], True, color, 2, cv2.LINE_AA) if",
"[] for start, end in [(1,6), (2, 7), (3, 8)]: p = (tuple(pPts[start]),",
"in jointStreamer: frame = self.addAllInputs(frame, pPts, landmarks, outputValues) k = self.showFrame(frame) if not",
"\"{:s}: {:7.2f}\".format(l, float(v)) frame = self.__addText(frame, text, position, c, largeScale) position = (position[0],",
"for landmarks, frame in jointStreamer: frame = self.addAllInputs(frame, landmarks = landmarks) k =",
"y = outputValues[:2].astype(int) cv2.circle(frame, (x, y), 1, (0, 0, 235), 56, cv2.LINE_AA) return",
"largeScale: cv2.putText(frame, text, pos, cv2.FONT_HERSHEY_SIMPLEX, 2, color, thickness=8) else: cv2.putText(frame, text, pos, cv2.FONT_HERSHEY_SIMPLEX,",
"zip(postData, streamer) for landmarks, frame in jointStreamer: frame = self.addAllInputs(frame, landmarks = landmarks)",
"True def __addText(self, frame, text, pos, color, largeScale = True): if largeScale: cv2.putText(frame,",
"def addPointer(self, frame, outputValues): #boundaries = self._mappingFunc.getOutputBoundaries() outputSize = (1920, 1080) outputValues[0] =",
"'will be written') print(dims) return cv2.VideoWriter(dir + recordName, fourcc, fps, dims) def recordSubjectVideoWithPostdata(self,",
"cv2.waitKey(delay) if k == 27 or k == ord('q'): return False else: return",
"break return def replaySubjectVideoWithPostData(self, postData, streamer): if not isinstance(postData, tuple): jointStreamer = zip(postData,",
"from ... import Paths import numpy as np import cv2 import os class",
"=initialPos, prefix = 'Or', largeScale = largeScale) initialPos = (initialPos[0], initialPos[1] + gap)",
"l, c in zip(values, labels, colors): text = \"{:s}: {:7.2f}\".format(l, float(v)) frame =",
"prefix = 'Gaze', largeScale = largeScale) return frame def showFrameWithAllInputs(self, frame, pPts =",
"(streamer,))) for headGaze, pose, landmarks, pPts, frame in jointStreamer: k = self.showFrameWithAllInputs(frame, pPts,",
"break return else: jointStreamer = zip(*(postData + (streamer,))) for headGaze, pose, landmarks, pPts,",
"return frame def showFrameWithAllInputs(self, frame, pPts = None, landmarks = None, outputValues =",
"pos, cv2.FONT_HERSHEY_SIMPLEX, 0.8, color, thickness=1) return frame def _addValuesLineByLine(self, frame, values, labels, position,",
"__initializeRecorder(self, id, trailName, fps = 30, dims = (1280, 720)): fourcc = cv2.VideoWriter_fourcc(*'MP42')",
"trailName + '_%s_%s_WithAllInput.avi' % (id, now) print(dir + recordName, 'will be written') print(dims)",
"pose) if not k: break return def replaySubjectVideoWithPostData(self, postData, streamer): if not isinstance(postData,",
"== 27 or k == ord('q'): return False else: return True def __addText(self,",
"60), prefix = '', largeScale = True): labels = [prefix+l for l in",
"i, (x, y) in enumerate(landmarks): #if not i in [39, 42]: # continue",
"tuple(pPts[end])) _pPts.append(p) for start, end in _pPts: cv2.line(frame, start, end, color, 2, cv2.LINE_AA)",
"postData, streamer): if not isinstance(postData, tuple): jointStreamer = zip(postData, streamer) for landmarks, frame",
"for v, l, c in zip(values, labels, colors): text = \"{:s}: {:7.2f}\".format(l, float(v))",
"estimator, streamer): self._estimator = estimator for frame in streamer: landmarks = estimator.detectFacialLandmarks(frame) k",
"c, -1, cv2.LINE_AA) return frame def addPointer(self, frame, outputValues): #boundaries = self._mappingFunc.getOutputBoundaries() outputSize",
"= None, pose = None, delay = 1): frame = self.addAllInputs(frame, pPts, landmarks,",
"is None: frame = self.addPointer(frame, outputValues.astype(int)) return frame def showFrame(self, frame, delay =",
"replaySubjectVideoWithPostData(self, postData, streamer): if not isinstance(postData, tuple): jointStreamer = zip(postData, streamer) for landmarks,",
"frame = self.__addText(frame, text, position, c, largeScale) position = (position[0], position[1]+(70 if largeScale",
"k = self.showFrameWithAllInputs(frame, landmarks = landmarks) if not k: break return else: jointStreamer",
"print(dims) return cv2.VideoWriter(dir + recordName, fourcc, fps, dims) def recordSubjectVideoWithPostdata(self, postData, id, trailName,",
"largeScale = True): for v, l, c in zip(values, labels, colors): text =",
"(height, width, depth) = frame.shape (xRange, yRange, _) = boundaries.getRanges() if xRange !=",
"streamer): self._estimator = estimator for frame in streamer: annotations = \\ estimator.estimateInputValuesWithAnnotations(frame) pose",
"0, 235), 56, cv2.LINE_AA) return frame def addAllInputs(self, frame, pPts = None, landmarks",
"None: frame = self.addBox(frame, pPts.astype(int)) if not outputValues is None: frame = self.addPointer(frame,",
"addPointer(self, frame, outputValues): #boundaries = self._mappingFunc.getOutputBoundaries() outputSize = (1920, 1080) outputValues[0] = outputSize[0]",
"cv2.putText(frame, text, pos, cv2.FONT_HERSHEY_SIMPLEX, 0.8, color, thickness=1) return frame def _addValuesLineByLine(self, frame, values,",
"prefix = 'Pos', largeScale = largeScale) initialPos = (initialPos[0], initialPos[1] + gap) frame",
"(streamer,))) for headGaze, pose, landmarks, pPts, frame in jointStreamer: frame = self.addAllInputs(frame, pPts,",
"(tuple(pPts[start]), tuple(pPts[end])) _pPts.append(p) for start, end in _pPts: cv2.line(frame, start, end, color, 2,",
"prefix = '', largeScale = True): labels = [prefix+l for l in ['X',",
"outputValues[0] = outputSize[0] - outputValues[0] boundaries = Boundary(0, outputSize[0], 0, outputSize[1]) (height, width,",
"'-').replace(' ', '_') recordName = trailName + '_%s_%s_WithAllInput.avi' % (id, now) print(dir +",
"235), 56, cv2.LINE_AA) return frame def addAllInputs(self, frame, pPts = None, landmarks =",
"frame, pos = initialPos, prefix = 'Pos', largeScale = largeScale) initialPos = (initialPos[0],",
"not landmarks is None: frame = self.addLandmarks(frame, landmarks.astype(int)) if not pPts is None:",
"pPts.astype(int)) if not outputValues is None: frame = self.addPointer(frame, outputValues.astype(int)) return frame def",
"frame def _addValues(self, inputValues, frame, pos = (20, 60), prefix = '', largeScale",
"True): for v, l, c in zip(values, labels, colors): text = \"{:s}: {:7.2f}\".format(l,",
"np import cv2 import os class InputEstimationVisualizer(object): def __init__(self, sceneScale = 1, landmarkColorStr",
"(3, 8)]: p = (tuple(pPts[start]), tuple(pPts[end])) _pPts.append(p) for start, end in _pPts: cv2.line(frame,",
"class InputEstimationVisualizer(object): def __init__(self, sceneScale = 1, landmarkColorStr = '#00ff00', screenColorStr = '#0000ff'):",
"= outputValues[:2].astype(int) cv2.circle(frame, (x, y), 1, (0, 0, 235), 56, cv2.LINE_AA) return frame",
"def playSubjectVideoWithAllInputs(self, estimator, streamer): self._estimator = estimator for frame in streamer: annotations =",
"k: break return def playSubjectVideoWithAllInputs(self, estimator, streamer): self._estimator = estimator for frame in",
"= boundaries.getRanges() if xRange != width or yRange != height: xRange, yRange =",
"= (255, 255, 255) cv2.polylines(frame, [pPts], True, color, 2, cv2.LINE_AA) if len(pPts) >",
"((20, 60), 200) if largeScale else ((20, 30), 90) frame = self._addValues(pose[:3], frame,",
"self._estimator = estimator for frame in streamer: annotations = \\ estimator.estimateInputValuesWithAnnotations(frame) pose =",
"initialPos, gap = ((20, 60), 200) if largeScale else ((20, 30), 90) frame",
"42]: # continue cv2.circle(frame, (x, y), 6, c, -1, cv2.LINE_AA) return frame def",
"outputValues = None): if not landmarks is None: frame = self.addLandmarks(frame, landmarks.astype(int)) if",
"frame, outputValues): #boundaries = self._mappingFunc.getOutputBoundaries() outputSize = (1920, 1080) outputValues[0] = outputSize[0] -",
"pPts, landmarks, inputValues, pose) if not k: break return def replaySubjectVideoWithPostData(self, postData, streamer):",
"os.makedirs(dir, exist_ok = True) now = str(datetime.now())[:-7].replace(':', '-').replace(' ', '_') recordName = trailName",
"pose is None: frame = self._addMeasurements(outputValues, pose, frame) return self.showFrame(frame, delay) def playSubjectVideoWithLandmarks(self,",
"largeScale else ((20, 30), 90) frame = self._addValues(pose[:3], frame, pos = initialPos, prefix",
"inputValues, pose) if not k: break return def replaySubjectVideoWithPostData(self, postData, streamer): if not",
"= zip(postData, streamer) for landmarks, frame in jointStreamer: frame = self.addAllInputs(frame, landmarks =",
"(255, 255, 255) cv2.polylines(frame, [pPts], True, color, 2, cv2.LINE_AA) if len(pPts) > 4:",
"end in _pPts: cv2.line(frame, start, end, color, 2, cv2.LINE_AA) return frame def addLandmarks(self,",
"5) cv2.line(frame, (int(w/2), 0), (int(w/2), h), (0,0,0), 5) if not pose is None:",
"= annotations k = self.showFrameWithAllInputs(frame, pPts, landmarks, inputValues, pose) if not k: break",
"addBox(self, frame, pPts): color = (255, 255, 255) cv2.polylines(frame, [pPts], True, color, 2,",
"not k: recorder.release() break recorder.write(frame.astype(np.uint8)) else: jointStreamer = zip(*(postData + (streamer,))) for headGaze,",
"0)): for i, (x, y) in enumerate(landmarks): #if not i in [39, 42]:",
"text, pos, color, largeScale = True): if largeScale: cv2.putText(frame, text, pos, cv2.FONT_HERSHEY_SIMPLEX, 2,",
"+ gap) frame = self._addValues(inputValues, frame, pos = initialPos, prefix = 'Gaze', largeScale",
"pPts = None, landmarks = None, outputValues = None, pose = None, delay",
"6, c, -1, cv2.LINE_AA) return frame def addPointer(self, frame, outputValues): #boundaries = self._mappingFunc.getOutputBoundaries()",
"thickness=8) else: cv2.putText(frame, text, pos, cv2.FONT_HERSHEY_SIMPLEX, 0.8, color, thickness=1) return frame def _addValuesLineByLine(self,",
"= self.showFrameWithAllInputs(frame, pPts, landmarks, inputValues, pose) if not k: break return def replaySubjectVideoWithPostData(self,",
"frame def _addValuesLineByLine(self, frame, values, labels, position, colors, largeScale = True): for v,",
"not k: break return else: jointStreamer = zip(*(postData + (streamer,))) for headGaze, pose,",
"landmarks, outputValues) h, w, _ = frame.shape cv2.line(frame, (0, int(h/2)), (w, int(h/2)), (0,0,0),",
"return frame def _addValuesLineByLine(self, frame, values, labels, position, colors, largeScale = True): for",
"largeScale = largeScale) initialPos = (initialPos[0], initialPos[1] + gap) frame = self._addValues(inputValues, frame,",
"frame = self._addMeasurements(outputValues, pose, frame) return self.showFrame(frame, delay) def playSubjectVideoWithLandmarks(self, estimator, streamer): self._estimator",
"frame, values, labels, position, colors, largeScale = True): for v, l, c in",
"else: cv2.putText(frame, text, pos, cv2.FONT_HERSHEY_SIMPLEX, 0.8, color, thickness=1) return frame def _addValuesLineByLine(self, frame,",
"(xRange, yRange, _) = boundaries.getRanges() if xRange != width or yRange != height:",
"'Pos', largeScale = largeScale) initialPos = (initialPos[0], initialPos[1] + gap) frame = self._addValues(pose[3:],",
"text, position, c, largeScale) position = (position[0], position[1]+(70 if largeScale else 30)) return",
"'Or', largeScale = largeScale) initialPos = (initialPos[0], initialPos[1] + gap) frame = self._addValues(inputValues,",
"outputValues = None, pose = None, delay = 1): frame = self.addAllInputs(frame, pPts,",
"self._addValues(inputValues, frame, pos = initialPos, prefix = 'Gaze', largeScale = largeScale) return frame",
"pos =initialPos, prefix = 'Or', largeScale = largeScale) initialPos = (initialPos[0], initialPos[1] +",
"InputEstimationVisualizer(object): def __init__(self, sceneScale = 1, landmarkColorStr = '#00ff00', screenColorStr = '#0000ff'): super()",
"exist_ok = True) now = str(datetime.now())[:-7].replace(':', '-').replace(' ', '_') recordName = trailName +",
"l in ['X', 'Y', 'Z']] g = 0 if largeScale else 200 colors",
"(initialPos[0], initialPos[1] + gap) frame = self._addValues(pose[3:], frame, pos =initialPos, prefix = 'Or',",
"tuple): jointStreamer = zip(postData, streamer) for landmarks, frame in jointStreamer: k = self.showFrameWithAllInputs(frame,",
"+ gap) frame = self._addValues(pose[3:], frame, pos =initialPos, prefix = 'Or', largeScale =",
"return def __initializeRecorder(self, id, trailName, fps = 30, dims = (1280, 720)): fourcc",
"(0, 255, 0), (255, g, 0)) return self._addValuesLineByLine(frame, inputValues, labels, pos, colors, largeScale)",
"else: return True def __addText(self, frame, text, pos, color, largeScale = True): if",
"y), 6, c, -1, cv2.LINE_AA) return frame def addPointer(self, frame, outputValues): #boundaries =",
"4: _pPts = [] for start, end in [(1,6), (2, 7), (3, 8)]:",
"for start, end in _pPts: cv2.line(frame, start, end, color, 2, cv2.LINE_AA) return frame",
"headGaze, pose) if not k: break return def __initializeRecorder(self, id, trailName, fps =",
"numpy as np import cv2 import os class InputEstimationVisualizer(object): def __init__(self, sceneScale =",
"inputValues, pose, frame, largeScale = True): initialPos, gap = ((20, 60), 200) if",
"Paths import numpy as np import cv2 import os class InputEstimationVisualizer(object): def __init__(self,",
"= '#00ff00', screenColorStr = '#0000ff'): super() self._size = (1280, 720) def addBox(self, frame,",
"None: frame = self.addLandmarks(frame, landmarks.astype(int)) if not pPts is None: frame = self.addBox(frame,",
"outputValues) h, w, _ = frame.shape cv2.line(frame, (0, int(h/2)), (w, int(h/2)), (0,0,0), 5)",
"self._size = (1280, 720) def addBox(self, frame, pPts): color = (255, 255, 255)",
"streamer): if not isinstance(postData, tuple): jointStreamer = zip(postData, streamer) for landmarks, frame in",
"for landmarks, frame in jointStreamer: k = self.showFrameWithAllInputs(frame, landmarks = landmarks) if not",
"self.showFrameWithAllInputs(frame, landmarks = landmarks) if not k: break return else: jointStreamer = zip(*(postData",
"labels, pos, colors, largeScale) def _addMeasurements(self, inputValues, pose, frame, largeScale = True): initialPos,",
"streamer: landmarks = estimator.detectFacialLandmarks(frame) k = self.showFrameWithAllInputs(frame, landmarks = landmarks) if not k:",
"landmarks, c = (255, 0, 0)): for i, (x, y) in enumerate(landmarks): #if",
"__init__(self, sceneScale = 1, landmarkColorStr = '#00ff00', screenColorStr = '#0000ff'): super() self._size =",
"== ord('q'): return False else: return True def __addText(self, frame, text, pos, color,",
"colors, largeScale) def _addMeasurements(self, inputValues, pose, frame, largeScale = True): initialPos, gap =",
"landmarks = landmarks) if not k: break return def playSubjectVideoWithAllInputs(self, estimator, streamer): self._estimator",
"= estimator.getHeadPose() inputValues, pPts, landmarks = annotations k = self.showFrameWithAllInputs(frame, pPts, landmarks, inputValues,",
"landmarks, frame in jointStreamer: k = self.showFrameWithAllInputs(frame, landmarks = landmarks) if not k:",
"isinstance(postData, tuple): jointStreamer = zip(postData, streamer) for landmarks, frame in jointStreamer: frame =",
"(x, y), 6, c, -1, cv2.LINE_AA) return frame def addPointer(self, frame, outputValues): #boundaries",
"(1280, 720)): fourcc = cv2.VideoWriter_fourcc(*'MP42') dir = Paths.MergedVideosFolder + ('%s%s' % (id, Paths.sep))",
"landmarks = landmarks) if not k: break return else: jointStreamer = zip(*(postData +",
"landmarks, pPts, frame in jointStreamer: frame = self.addAllInputs(frame, pPts, landmarks, outputValues) k =",
"for headGaze, pose, landmarks, pPts, frame in jointStreamer: k = self.showFrameWithAllInputs(frame, pPts, landmarks,",
"56, cv2.LINE_AA) return frame def addAllInputs(self, frame, pPts = None, landmarks = None,",
"addLandmarks(self, frame, landmarks, c = (255, 0, 0)): for i, (x, y) in",
"if not k: break return def __initializeRecorder(self, id, trailName, fps = 30, dims",
"#boundaries = self._mappingFunc.getOutputBoundaries() outputSize = (1920, 1080) outputValues[0] = outputSize[0] - outputValues[0] boundaries",
"len(pPts) > 4: _pPts = [] for start, end in [(1,6), (2, 7),",
"frame, pos = (20, 60), prefix = '', largeScale = True): labels =",
"frame = self._addValues(pose[:3], frame, pos = initialPos, prefix = 'Pos', largeScale = largeScale)",
"= int(xRange*width), int(yRange*height) else: x, y = outputValues[:2].astype(int) cv2.circle(frame, (x, y), 1, (0,",
"+ recordName, 'will be written') print(dims) return cv2.VideoWriter(dir + recordName, fourcc, fps, dims)",
"boundaries = Boundary(0, outputSize[0], 0, outputSize[1]) (height, width, depth) = frame.shape (xRange, yRange,",
"largeScale) initialPos = (initialPos[0], initialPos[1] + gap) frame = self._addValues(inputValues, frame, pos =",
"cv2.VideoWriter(dir + recordName, fourcc, fps, dims) def recordSubjectVideoWithPostdata(self, postData, id, trailName, streamer): recorder",
"screenColorStr = '#0000ff'): super() self._size = (1280, 720) def addBox(self, frame, pPts): color",
"Boundary, StaticMapping, DynamicMapping from datetime import datetime from ... import Paths import numpy",
"> 4: _pPts = [] for start, end in [(1,6), (2, 7), (3,",
"frame = self._addValues(pose[3:], frame, pos =initialPos, prefix = 'Or', largeScale = largeScale) initialPos",
".MappingFunctions import Boundary, StaticMapping, DynamicMapping from datetime import datetime from ... import Paths",
"_pPts = [] for start, end in [(1,6), (2, 7), (3, 8)]: p",
"ord('q'): return False else: return True def __addText(self, frame, text, pos, color, largeScale",
"def recordSubjectVideoWithPostdata(self, postData, id, trailName, streamer): recorder = self.__initializeRecorder(id, trailName, dims = (1280,",
"jointStreamer: frame = self.addAllInputs(frame, pPts, landmarks, outputValues) k = self.showFrame(frame) if not k:",
"+ (streamer,))) for headGaze, pose, landmarks, pPts, frame in jointStreamer: k = self.showFrameWithAllInputs(frame,",
"= self.__initializeRecorder(id, trailName, dims = (1280, 720)) print(self._size) if not isinstance(postData, tuple): jointStreamer",
"= trailName + '_%s_%s_WithAllInput.avi' % (id, now) print(dir + recordName, 'will be written')",
"trailName, dims = (1280, 720)) print(self._size) if not isinstance(postData, tuple): jointStreamer = zip(postData,",
"= cv2.VideoWriter_fourcc(*'MP42') dir = Paths.MergedVideosFolder + ('%s%s' % (id, Paths.sep)) if not os.path.isdir(dir):",
"height: xRange, yRange = boundaries.getVolumeAbsRatio(outputValues) x, y = int(xRange*width), int(yRange*height) else: x, y",
"if not pPts is None: frame = self.addBox(frame, pPts.astype(int)) if not outputValues is",
"is None: frame = self.addLandmarks(frame, landmarks.astype(int)) if not pPts is None: frame =",
"landmarks, inputValues, pose) if not k: break return def replaySubjectVideoWithPostData(self, postData, streamer): if",
"addAllInputs(self, frame, pPts = None, landmarks = None, outputValues = None): if not",
"Boundary(0, outputSize[0], 0, outputSize[1]) (height, width, depth) = frame.shape (xRange, yRange, _) =",
"c in zip(values, labels, colors): text = \"{:s}: {:7.2f}\".format(l, float(v)) frame = self.__addText(frame,",
"self.showFrame(frame) if not k: recorder.release() break recorder.write(frame.astype(np.uint8)) else: jointStreamer = zip(*(postData + (streamer,)))",
"= None, outputValues = None, pose = None, delay = 1): frame =",
"landmarks = annotations k = self.showFrameWithAllInputs(frame, pPts, landmarks, inputValues, pose) if not k:",
"True): labels = [prefix+l for l in ['X', 'Y', 'Z']] g = 0",
"= [] for start, end in [(1,6), (2, 7), (3, 8)]: p =",
"= initialPos, prefix = 'Gaze', largeScale = largeScale) return frame def showFrameWithAllInputs(self, frame,",
"landmarks = estimator.detectFacialLandmarks(frame) k = self.showFrameWithAllInputs(frame, landmarks = landmarks) if not k: break",
"(0,0,0), 5) cv2.line(frame, (int(w/2), 0), (int(w/2), h), (0,0,0), 5) if not pose is",
"pPts, frame in jointStreamer: k = self.showFrameWithAllInputs(frame, pPts, landmarks, headGaze, pose) if not",
"datetime import datetime from ... import Paths import numpy as np import cv2",
"1080) outputValues[0] = outputSize[0] - outputValues[0] boundaries = Boundary(0, outputSize[0], 0, outputSize[1]) (height,",
"self._addValuesLineByLine(frame, inputValues, labels, pos, colors, largeScale) def _addMeasurements(self, inputValues, pose, frame, largeScale =",
"(initialPos[0], initialPos[1] + gap) frame = self._addValues(inputValues, frame, pos = initialPos, prefix =",
"not k: break return def __initializeRecorder(self, id, trailName, fps = 30, dims =",
"cv2.LINE_AA) return frame def addLandmarks(self, frame, landmarks, c = (255, 0, 0)): for",
"frame.shape cv2.line(frame, (0, int(h/2)), (w, int(h/2)), (0,0,0), 5) cv2.line(frame, (int(w/2), 0), (int(w/2), h),",
"frame = self.addBox(frame, pPts.astype(int)) if not outputValues is None: frame = self.addPointer(frame, outputValues.astype(int))",
"c, largeScale) position = (position[0], position[1]+(70 if largeScale else 30)) return frame def",
"None, landmarks = None, outputValues = None): if not landmarks is None: frame",
"def addLandmarks(self, frame, landmarks, c = (255, 0, 0)): for i, (x, y)",
"('%s%s' % (id, Paths.sep)) if not os.path.isdir(dir): os.makedirs(dir, exist_ok = True) now =",
"import os class InputEstimationVisualizer(object): def __init__(self, sceneScale = 1, landmarkColorStr = '#00ff00', screenColorStr",
"30), 90) frame = self._addValues(pose[:3], frame, pos = initialPos, prefix = 'Pos', largeScale",
"30, dims = (1280, 720)): fourcc = cv2.VideoWriter_fourcc(*'MP42') dir = Paths.MergedVideosFolder + ('%s%s'",
"k: break return else: jointStreamer = zip(*(postData + (streamer,))) for headGaze, pose, landmarks,",
"text, pos, cv2.FONT_HERSHEY_SIMPLEX, 0.8, color, thickness=1) return frame def _addValuesLineByLine(self, frame, values, labels,",
"= self.addPointer(frame, outputValues.astype(int)) return frame def showFrame(self, frame, delay = 1): cv2.imshow('frame', frame)",
"frame, pos = initialPos, prefix = 'Gaze', largeScale = largeScale) return frame def",
"landmarks = None, outputValues = None, pose = None, delay = 1): frame",
"k: break return def replaySubjectVideoWithPostData(self, postData, streamer): if not isinstance(postData, tuple): jointStreamer =",
"landmarks = landmarks) k = self.showFrame(frame) if not k: recorder.release() break recorder.write(frame.astype(np.uint8)) else:",
"if not k: break return else: jointStreamer = zip(*(postData + (streamer,))) for headGaze,",
"jointStreamer: k = self.showFrameWithAllInputs(frame, pPts, landmarks, headGaze, pose) if not k: break return",
"= zip(*(postData + (streamer,))) for headGaze, pose, landmarks, pPts, frame in jointStreamer: frame",
"gap) frame = self._addValues(inputValues, frame, pos = initialPos, prefix = 'Gaze', largeScale =",
"str(datetime.now())[:-7].replace(':', '-').replace(' ', '_') recordName = trailName + '_%s_%s_WithAllInput.avi' % (id, now) print(dir",
"os class InputEstimationVisualizer(object): def __init__(self, sceneScale = 1, landmarkColorStr = '#00ff00', screenColorStr =",
"recordName = trailName + '_%s_%s_WithAllInput.avi' % (id, now) print(dir + recordName, 'will be",
"7), (3, 8)]: p = (tuple(pPts[start]), tuple(pPts[end])) _pPts.append(p) for start, end in _pPts:",
"frame.shape (xRange, yRange, _) = boundaries.getRanges() if xRange != width or yRange !=",
"frame, pos =initialPos, prefix = 'Or', largeScale = largeScale) initialPos = (initialPos[0], initialPos[1]",
"zip(*(postData + (streamer,))) for headGaze, pose, landmarks, pPts, frame in jointStreamer: k =",
"= '', largeScale = True): labels = [prefix+l for l in ['X', 'Y',",
"255, 255) cv2.polylines(frame, [pPts], True, color, 2, cv2.LINE_AA) if len(pPts) > 4: _pPts",
"outputSize[1]) (height, width, depth) = frame.shape (xRange, yRange, _) = boundaries.getRanges() if xRange",
"values, labels, position, colors, largeScale = True): for v, l, c in zip(values,",
"= True) now = str(datetime.now())[:-7].replace(':', '-').replace(' ', '_') recordName = trailName + '_%s_%s_WithAllInput.avi'",
"delay) def playSubjectVideoWithLandmarks(self, estimator, streamer): self._estimator = estimator for frame in streamer: landmarks",
"boundaries.getRanges() if xRange != width or yRange != height: xRange, yRange = boundaries.getVolumeAbsRatio(outputValues)",
"= 30, dims = (1280, 720)): fourcc = cv2.VideoWriter_fourcc(*'MP42') dir = Paths.MergedVideosFolder +",
"= None, landmarks = None, outputValues = None, pose = None, delay =",
"None, pose = None, delay = 1): frame = self.addAllInputs(frame, pPts, landmarks, outputValues)",
"largeScale) return frame def showFrameWithAllInputs(self, frame, pPts = None, landmarks = None, outputValues",
"cv2 import os class InputEstimationVisualizer(object): def __init__(self, sceneScale = 1, landmarkColorStr = '#00ff00',",
"estimator for frame in streamer: landmarks = estimator.detectFacialLandmarks(frame) k = self.showFrameWithAllInputs(frame, landmarks =",
"not os.path.isdir(dir): os.makedirs(dir, exist_ok = True) now = str(datetime.now())[:-7].replace(':', '-').replace(' ', '_') recordName",
"= 1): frame = self.addAllInputs(frame, pPts, landmarks, outputValues) h, w, _ = frame.shape",
"None, outputValues = None): if not landmarks is None: frame = self.addLandmarks(frame, landmarks.astype(int))",
"= None, delay = 1): frame = self.addAllInputs(frame, pPts, landmarks, outputValues) h, w,",
"not outputValues is None: frame = self.addPointer(frame, outputValues.astype(int)) return frame def showFrame(self, frame,",
"in enumerate(landmarks): #if not i in [39, 42]: # continue cv2.circle(frame, (x, y),",
"= zip(postData, streamer) for landmarks, frame in jointStreamer: k = self.showFrameWithAllInputs(frame, landmarks =",
"!= height: xRange, yRange = boundaries.getVolumeAbsRatio(outputValues) x, y = int(xRange*width), int(yRange*height) else: x,",
"largeScale = largeScale) initialPos = (initialPos[0], initialPos[1] + gap) frame = self._addValues(pose[3:], frame,",
"1, (0, 0, 235), 56, cv2.LINE_AA) return frame def addAllInputs(self, frame, pPts =",
"-1, cv2.LINE_AA) return frame def addPointer(self, frame, outputValues): #boundaries = self._mappingFunc.getOutputBoundaries() outputSize =",
"frame = self.addPointer(frame, outputValues.astype(int)) return frame def showFrame(self, frame, delay = 1): cv2.imshow('frame',",
"initialPos = (initialPos[0], initialPos[1] + gap) frame = self._addValues(pose[3:], frame, pos =initialPos, prefix",
"= self.showFrameWithAllInputs(frame, landmarks = landmarks) if not k: break return def playSubjectVideoWithAllInputs(self, estimator,",
"cv2.VideoWriter_fourcc(*'MP42') dir = Paths.MergedVideosFolder + ('%s%s' % (id, Paths.sep)) if not os.path.isdir(dir): os.makedirs(dir,",
"gap) frame = self._addValues(pose[3:], frame, pos =initialPos, prefix = 'Or', largeScale = largeScale)",
"(1920, 1080) outputValues[0] = outputSize[0] - outputValues[0] boundaries = Boundary(0, outputSize[0], 0, outputSize[1])",
"enumerate(landmarks): #if not i in [39, 42]: # continue cv2.circle(frame, (x, y), 6,",
"now) print(dir + recordName, 'will be written') print(dims) return cv2.VideoWriter(dir + recordName, fourcc,",
"= True): initialPos, gap = ((20, 60), 200) if largeScale else ((20, 30),",
"Paths.sep)) if not os.path.isdir(dir): os.makedirs(dir, exist_ok = True) now = str(datetime.now())[:-7].replace(':', '-').replace(' ',",
"pPts is None: frame = self.addBox(frame, pPts.astype(int)) if not outputValues is None: frame",
"initialPos[1] + gap) frame = self._addValues(pose[3:], frame, pos =initialPos, prefix = 'Or', largeScale",
"return self._addValuesLineByLine(frame, inputValues, labels, pos, colors, largeScale) def _addMeasurements(self, inputValues, pose, frame, largeScale",
"if not pose is None: frame = self._addMeasurements(outputValues, pose, frame) return self.showFrame(frame, delay)",
"8)]: p = (tuple(pPts[start]), tuple(pPts[end])) _pPts.append(p) for start, end in _pPts: cv2.line(frame, start,",
"delay = 1): cv2.imshow('frame', frame) k = cv2.waitKey(delay) if k == 27 or",
"start, end, color, 2, cv2.LINE_AA) return frame def addLandmarks(self, frame, landmarks, c =",
"= outputSize[0] - outputValues[0] boundaries = Boundary(0, outputSize[0], 0, outputSize[1]) (height, width, depth)",
"self._addMeasurements(outputValues, pose, frame) return self.showFrame(frame, delay) def playSubjectVideoWithLandmarks(self, estimator, streamer): self._estimator = estimator",
"pos, cv2.FONT_HERSHEY_SIMPLEX, 2, color, thickness=8) else: cv2.putText(frame, text, pos, cv2.FONT_HERSHEY_SIMPLEX, 0.8, color, thickness=1)",
"= largeScale) initialPos = (initialPos[0], initialPos[1] + gap) frame = self._addValues(pose[3:], frame, pos",
"or yRange != height: xRange, yRange = boundaries.getVolumeAbsRatio(outputValues) x, y = int(xRange*width), int(yRange*height)",
"self.addAllInputs(frame, landmarks = landmarks) k = self.showFrame(frame) if not k: recorder.release() break recorder.write(frame.astype(np.uint8))",
"inputValues, frame, pos = (20, 60), prefix = '', largeScale = True): labels",
"zip(*(postData + (streamer,))) for headGaze, pose, landmarks, pPts, frame in jointStreamer: frame =",
"is None: frame = self.addBox(frame, pPts.astype(int)) if not outputValues is None: frame =",
"estimator, streamer): self._estimator = estimator for frame in streamer: annotations = \\ estimator.estimateInputValuesWithAnnotations(frame)",
"255, 0), (255, g, 0)) return self._addValuesLineByLine(frame, inputValues, labels, pos, colors, largeScale) def",
"cv2.circle(frame, (x, y), 6, c, -1, cv2.LINE_AA) return frame def addPointer(self, frame, outputValues):",
"not pose is None: frame = self._addMeasurements(outputValues, pose, frame) return self.showFrame(frame, delay) def",
"'_%s_%s_WithAllInput.avi' % (id, now) print(dir + recordName, 'will be written') print(dims) return cv2.VideoWriter(dir",
"showFrameWithAllInputs(self, frame, pPts = None, landmarks = None, outputValues = None, pose =",
"print(self._size) if not isinstance(postData, tuple): jointStreamer = zip(postData, streamer) for landmarks, frame in",
"pPts, landmarks, headGaze, pose) if not k: break return def __initializeRecorder(self, id, trailName,",
"... import Paths import numpy as np import cv2 import os class InputEstimationVisualizer(object):",
"= self._addMeasurements(outputValues, pose, frame) return self.showFrame(frame, delay) def playSubjectVideoWithLandmarks(self, estimator, streamer): self._estimator =",
"import Boundary, StaticMapping, DynamicMapping from datetime import datetime from ... import Paths import",
"= self.addAllInputs(frame, pPts, landmarks, outputValues) h, w, _ = frame.shape cv2.line(frame, (0, int(h/2)),",
"return frame def addAllInputs(self, frame, pPts = None, landmarks = None, outputValues =",
"self.addAllInputs(frame, pPts, landmarks, outputValues) h, w, _ = frame.shape cv2.line(frame, (0, int(h/2)), (w,",
"not k: break return def playSubjectVideoWithAllInputs(self, estimator, streamer): self._estimator = estimator for frame",
"'_') recordName = trailName + '_%s_%s_WithAllInput.avi' % (id, now) print(dir + recordName, 'will",
"text = \"{:s}: {:7.2f}\".format(l, float(v)) frame = self.__addText(frame, text, position, c, largeScale) position",
"(position[0], position[1]+(70 if largeScale else 30)) return frame def _addValues(self, inputValues, frame, pos",
"else 30)) return frame def _addValues(self, inputValues, frame, pos = (20, 60), prefix",
"90) frame = self._addValues(pose[:3], frame, pos = initialPos, prefix = 'Pos', largeScale =",
"k: recorder.release() break recorder.write(frame.astype(np.uint8)) else: jointStreamer = zip(*(postData + (streamer,))) for headGaze, pose,",
"pos = (20, 60), prefix = '', largeScale = True): labels = [prefix+l",
"None): if not landmarks is None: frame = self.addLandmarks(frame, landmarks.astype(int)) if not pPts",
"prefix = 'Or', largeScale = largeScale) initialPos = (initialPos[0], initialPos[1] + gap) frame",
"in [39, 42]: # continue cv2.circle(frame, (x, y), 6, c, -1, cv2.LINE_AA) return",
"thickness=1) return frame def _addValuesLineByLine(self, frame, values, labels, position, colors, largeScale = True):",
"def showFrameWithAllInputs(self, frame, pPts = None, landmarks = None, outputValues = None, pose",
"frame in jointStreamer: frame = self.addAllInputs(frame, landmarks = landmarks) k = self.showFrame(frame) if",
"start, end in _pPts: cv2.line(frame, start, end, color, 2, cv2.LINE_AA) return frame def",
"(255, g, 0)) return self._addValuesLineByLine(frame, inputValues, labels, pos, colors, largeScale) def _addMeasurements(self, inputValues,",
"return def playSubjectVideoWithAllInputs(self, estimator, streamer): self._estimator = estimator for frame in streamer: annotations",
"0, 0)): for i, (x, y) in enumerate(landmarks): #if not i in [39,",
"landmarks.astype(int)) if not pPts is None: frame = self.addBox(frame, pPts.astype(int)) if not outputValues",
"= True): if largeScale: cv2.putText(frame, text, pos, cv2.FONT_HERSHEY_SIMPLEX, 2, color, thickness=8) else: cv2.putText(frame,",
"= self.addBox(frame, pPts.astype(int)) if not outputValues is None: frame = self.addPointer(frame, outputValues.astype(int)) return",
"(int(w/2), h), (0,0,0), 5) if not pose is None: frame = self._addMeasurements(outputValues, pose,",
"= Paths.MergedVideosFolder + ('%s%s' % (id, Paths.sep)) if not os.path.isdir(dir): os.makedirs(dir, exist_ok =",
"= self._addValues(pose[:3], frame, pos = initialPos, prefix = 'Pos', largeScale = largeScale) initialPos",
"largeScale = largeScale) return frame def showFrameWithAllInputs(self, frame, pPts = None, landmarks =",
"= (1280, 720)) print(self._size) if not isinstance(postData, tuple): jointStreamer = zip(postData, streamer) for",
"None: frame = self._addMeasurements(outputValues, pose, frame) return self.showFrame(frame, delay) def playSubjectVideoWithLandmarks(self, estimator, streamer):",
"else ((20, 30), 90) frame = self._addValues(pose[:3], frame, pos = initialPos, prefix =",
"g, 0)) return self._addValuesLineByLine(frame, inputValues, labels, pos, colors, largeScale) def _addMeasurements(self, inputValues, pose,",
"if not isinstance(postData, tuple): jointStreamer = zip(postData, streamer) for landmarks, frame in jointStreamer:",
"end in [(1,6), (2, 7), (3, 8)]: p = (tuple(pPts[start]), tuple(pPts[end])) _pPts.append(p) for",
"= landmarks) if not k: break return else: jointStreamer = zip(*(postData + (streamer,)))",
"jointStreamer = zip(*(postData + (streamer,))) for headGaze, pose, landmarks, pPts, frame in jointStreamer:",
"streamer) for landmarks, frame in jointStreamer: frame = self.addAllInputs(frame, landmarks = landmarks) k",
"frame def addAllInputs(self, frame, pPts = None, landmarks = None, outputValues = None):",
"position, colors, largeScale = True): for v, l, c in zip(values, labels, colors):",
"break recorder.write(frame.astype(np.uint8)) else: jointStreamer = zip(*(postData + (streamer,))) for headGaze, pose, landmarks, pPts,",
"estimator.estimateInputValuesWithAnnotations(frame) pose = estimator.getHeadPose() inputValues, pPts, landmarks = annotations k = self.showFrameWithAllInputs(frame, pPts,",
"in jointStreamer: k = self.showFrameWithAllInputs(frame, landmarks = landmarks) if not k: break return",
"= estimator.detectFacialLandmarks(frame) k = self.showFrameWithAllInputs(frame, landmarks = landmarks) if not k: break return",
"((0, 0, 255), (0, 255, 0), (255, g, 0)) return self._addValuesLineByLine(frame, inputValues, labels,",
"landmarks, outputValues) k = self.showFrame(frame) if not k: recorder.release() break recorder.write(frame.astype(np.uint8)) recorder.release() return",
"StaticMapping, DynamicMapping from datetime import datetime from ... import Paths import numpy as",
"720) def addBox(self, frame, pPts): color = (255, 255, 255) cv2.polylines(frame, [pPts], True,",
"outputValues.astype(int)) return frame def showFrame(self, frame, delay = 1): cv2.imshow('frame', frame) k =",
"pos = initialPos, prefix = 'Pos', largeScale = largeScale) initialPos = (initialPos[0], initialPos[1]",
"return def replaySubjectVideoWithPostData(self, postData, streamer): if not isinstance(postData, tuple): jointStreamer = zip(postData, streamer)",
"- outputValues[0] boundaries = Boundary(0, outputSize[0], 0, outputSize[1]) (height, width, depth) = frame.shape",
"id, trailName, streamer): recorder = self.__initializeRecorder(id, trailName, dims = (1280, 720)) print(self._size) if",
"= (255, 0, 0)): for i, (x, y) in enumerate(landmarks): #if not i",
"outputSize[0], 0, outputSize[1]) (height, width, depth) = frame.shape (xRange, yRange, _) = boundaries.getRanges()",
"width, depth) = frame.shape (xRange, yRange, _) = boundaries.getRanges() if xRange != width",
"1): cv2.imshow('frame', frame) k = cv2.waitKey(delay) if k == 27 or k ==",
"None, delay = 1): frame = self.addAllInputs(frame, pPts, landmarks, outputValues) h, w, _",
"xRange, yRange = boundaries.getVolumeAbsRatio(outputValues) x, y = int(xRange*width), int(yRange*height) else: x, y =",
"= None, landmarks = None, outputValues = None): if not landmarks is None:",
"frame = self._addValues(inputValues, frame, pos = initialPos, prefix = 'Gaze', largeScale = largeScale)",
"landmarks, frame in jointStreamer: frame = self.addAllInputs(frame, landmarks = landmarks) k = self.showFrame(frame)",
"or k == ord('q'): return False else: return True def __addText(self, frame, text,",
"2, color, thickness=8) else: cv2.putText(frame, text, pos, cv2.FONT_HERSHEY_SIMPLEX, 0.8, color, thickness=1) return frame",
"(0,0,0), 5) if not pose is None: frame = self._addMeasurements(outputValues, pose, frame) return",
"trailName, fps = 30, dims = (1280, 720)): fourcc = cv2.VideoWriter_fourcc(*'MP42') dir =",
"= ((0, 0, 255), (0, 255, 0), (255, g, 0)) return self._addValuesLineByLine(frame, inputValues,",
"inputValues, labels, pos, colors, largeScale) def _addMeasurements(self, inputValues, pose, frame, largeScale = True):",
"pose, landmarks, pPts, frame in jointStreamer: frame = self.addAllInputs(frame, pPts, landmarks, outputValues) k",
"super() self._size = (1280, 720) def addBox(self, frame, pPts): color = (255, 255,",
"0, 255), (0, 255, 0), (255, g, 0)) return self._addValuesLineByLine(frame, inputValues, labels, pos,",
"fourcc, fps, dims) def recordSubjectVideoWithPostdata(self, postData, id, trailName, streamer): recorder = self.__initializeRecorder(id, trailName,",
"return False else: return True def __addText(self, frame, text, pos, color, largeScale =",
"frame in jointStreamer: frame = self.addAllInputs(frame, pPts, landmarks, outputValues) k = self.showFrame(frame) if",
"if not k: break return def replaySubjectVideoWithPostData(self, postData, streamer): if not isinstance(postData, tuple):",
"def addBox(self, frame, pPts): color = (255, 255, 255) cv2.polylines(frame, [pPts], True, color,",
"0 if largeScale else 200 colors = ((0, 0, 255), (0, 255, 0),",
"0), (255, g, 0)) return self._addValuesLineByLine(frame, inputValues, labels, pos, colors, largeScale) def _addMeasurements(self,",
"_) = boundaries.getRanges() if xRange != width or yRange != height: xRange, yRange",
"colors, largeScale = True): for v, l, c in zip(values, labels, colors): text",
"def __addText(self, frame, text, pos, color, largeScale = True): if largeScale: cv2.putText(frame, text,",
"= (initialPos[0], initialPos[1] + gap) frame = self._addValues(pose[3:], frame, pos =initialPos, prefix =",
"'Gaze', largeScale = largeScale) return frame def showFrameWithAllInputs(self, frame, pPts = None, landmarks",
"import cv2 import os class InputEstimationVisualizer(object): def __init__(self, sceneScale = 1, landmarkColorStr =",
"fps = 30, dims = (1280, 720)): fourcc = cv2.VideoWriter_fourcc(*'MP42') dir = Paths.MergedVideosFolder",
"not k: break return def replaySubjectVideoWithPostData(self, postData, streamer): if not isinstance(postData, tuple): jointStreamer",
"pose, frame, largeScale = True): initialPos, gap = ((20, 60), 200) if largeScale",
"in [(1,6), (2, 7), (3, 8)]: p = (tuple(pPts[start]), tuple(pPts[end])) _pPts.append(p) for start,",
"landmarks, pPts, frame in jointStreamer: k = self.showFrameWithAllInputs(frame, pPts, landmarks, headGaze, pose) if",
"g = 0 if largeScale else 200 colors = ((0, 0, 255), (0,",
"for start, end in [(1,6), (2, 7), (3, 8)]: p = (tuple(pPts[start]), tuple(pPts[end]))",
"= 1): cv2.imshow('frame', frame) k = cv2.waitKey(delay) if k == 27 or k",
"+ ('%s%s' % (id, Paths.sep)) if not os.path.isdir(dir): os.makedirs(dir, exist_ok = True) now",
"= Boundary(0, outputSize[0], 0, outputSize[1]) (height, width, depth) = frame.shape (xRange, yRange, _)",
"for frame in streamer: annotations = \\ estimator.estimateInputValuesWithAnnotations(frame) pose = estimator.getHeadPose() inputValues, pPts,",
"255), (0, 255, 0), (255, g, 0)) return self._addValuesLineByLine(frame, inputValues, labels, pos, colors,",
"[39, 42]: # continue cv2.circle(frame, (x, y), 6, c, -1, cv2.LINE_AA) return frame",
"return self.showFrame(frame, delay) def playSubjectVideoWithLandmarks(self, estimator, streamer): self._estimator = estimator for frame in",
"= (position[0], position[1]+(70 if largeScale else 30)) return frame def _addValues(self, inputValues, frame,",
"_addValuesLineByLine(self, frame, values, labels, position, colors, largeScale = True): for v, l, c",
"= (20, 60), prefix = '', largeScale = True): labels = [prefix+l for",
"outputValues): #boundaries = self._mappingFunc.getOutputBoundaries() outputSize = (1920, 1080) outputValues[0] = outputSize[0] - outputValues[0]",
"= frame.shape cv2.line(frame, (0, int(h/2)), (w, int(h/2)), (0,0,0), 5) cv2.line(frame, (int(w/2), 0), (int(w/2),",
"headGaze, pose, landmarks, pPts, frame in jointStreamer: frame = self.addAllInputs(frame, pPts, landmarks, outputValues)",
"frame, pPts = None, landmarks = None, outputValues = None, pose = None,",
"tuple): jointStreamer = zip(postData, streamer) for landmarks, frame in jointStreamer: frame = self.addAllInputs(frame,",
"if xRange != width or yRange != height: xRange, yRange = boundaries.getVolumeAbsRatio(outputValues) x,",
"255) cv2.polylines(frame, [pPts], True, color, 2, cv2.LINE_AA) if len(pPts) > 4: _pPts =",
"self.__addText(frame, text, position, c, largeScale) position = (position[0], position[1]+(70 if largeScale else 30))",
"if k == 27 or k == ord('q'): return False else: return True",
"colors): text = \"{:s}: {:7.2f}\".format(l, float(v)) frame = self.__addText(frame, text, position, c, largeScale)",
"width or yRange != height: xRange, yRange = boundaries.getVolumeAbsRatio(outputValues) x, y = int(xRange*width),",
"landmarks) k = self.showFrame(frame) if not k: recorder.release() break recorder.write(frame.astype(np.uint8)) else: jointStreamer =",
"if largeScale else ((20, 30), 90) frame = self._addValues(pose[:3], frame, pos = initialPos,",
"None: frame = self.addPointer(frame, outputValues.astype(int)) return frame def showFrame(self, frame, delay = 1):",
"color, thickness=1) return frame def _addValuesLineByLine(self, frame, values, labels, position, colors, largeScale =",
"in _pPts: cv2.line(frame, start, end, color, 2, cv2.LINE_AA) return frame def addLandmarks(self, frame,",
"pos, colors, largeScale) def _addMeasurements(self, inputValues, pose, frame, largeScale = True): initialPos, gap",
"self.showFrame(frame, delay) def playSubjectVideoWithLandmarks(self, estimator, streamer): self._estimator = estimator for frame in streamer:",
"position[1]+(70 if largeScale else 30)) return frame def _addValues(self, inputValues, frame, pos =",
"initialPos = (initialPos[0], initialPos[1] + gap) frame = self._addValues(inputValues, frame, pos = initialPos,",
"(255, 0, 0)): for i, (x, y) in enumerate(landmarks): #if not i in",
"= 'Or', largeScale = largeScale) initialPos = (initialPos[0], initialPos[1] + gap) frame =",
"2, cv2.LINE_AA) return frame def addLandmarks(self, frame, landmarks, c = (255, 0, 0)):",
"playSubjectVideoWithAllInputs(self, estimator, streamer): self._estimator = estimator for frame in streamer: annotations = \\",
"self._mappingFunc.getOutputBoundaries() outputSize = (1920, 1080) outputValues[0] = outputSize[0] - outputValues[0] boundaries = Boundary(0,",
"isinstance(postData, tuple): jointStreamer = zip(postData, streamer) for landmarks, frame in jointStreamer: k =",
"def addAllInputs(self, frame, pPts = None, landmarks = None, outputValues = None): if",
"headGaze, pose, landmarks, pPts, frame in jointStreamer: k = self.showFrameWithAllInputs(frame, pPts, landmarks, headGaze,",
"labels, position, colors, largeScale = True): for v, l, c in zip(values, labels,",
"True, color, 2, cv2.LINE_AA) if len(pPts) > 4: _pPts = [] for start,",
"recordName, fourcc, fps, dims) def recordSubjectVideoWithPostdata(self, postData, id, trailName, streamer): recorder = self.__initializeRecorder(id,",
"for headGaze, pose, landmarks, pPts, frame in jointStreamer: frame = self.addAllInputs(frame, pPts, landmarks,",
"= landmarks) if not k: break return def playSubjectVideoWithAllInputs(self, estimator, streamer): self._estimator =",
"pPts, landmarks, outputValues) k = self.showFrame(frame) if not k: recorder.release() break recorder.write(frame.astype(np.uint8)) recorder.release()",
"cv2.LINE_AA) return frame def addAllInputs(self, frame, pPts = None, landmarks = None, outputValues",
"= self.showFrameWithAllInputs(frame, landmarks = landmarks) if not k: break return else: jointStreamer =",
"estimator.getHeadPose() inputValues, pPts, landmarks = annotations k = self.showFrameWithAllInputs(frame, pPts, landmarks, inputValues, pose)",
"= (initialPos[0], initialPos[1] + gap) frame = self._addValues(inputValues, frame, pos = initialPos, prefix",
"streamer) for landmarks, frame in jointStreamer: k = self.showFrameWithAllInputs(frame, landmarks = landmarks) if",
"cv2.FONT_HERSHEY_SIMPLEX, 0.8, color, thickness=1) return frame def _addValuesLineByLine(self, frame, values, labels, position, colors,",
"frame = self.addAllInputs(frame, landmarks = landmarks) k = self.showFrame(frame) if not k: recorder.release()",
"i in [39, 42]: # continue cv2.circle(frame, (x, y), 6, c, -1, cv2.LINE_AA)",
"__addText(self, frame, text, pos, color, largeScale = True): if largeScale: cv2.putText(frame, text, pos,",
"labels, colors): text = \"{:s}: {:7.2f}\".format(l, float(v)) frame = self.__addText(frame, text, position, c,",
"= None): if not landmarks is None: frame = self.addLandmarks(frame, landmarks.astype(int)) if not",
"self.addBox(frame, pPts.astype(int)) if not outputValues is None: frame = self.addPointer(frame, outputValues.astype(int)) return frame",
"landmarkColorStr = '#00ff00', screenColorStr = '#0000ff'): super() self._size = (1280, 720) def addBox(self,",
"be written') print(dims) return cv2.VideoWriter(dir + recordName, fourcc, fps, dims) def recordSubjectVideoWithPostdata(self, postData,",
"200 colors = ((0, 0, 255), (0, 255, 0), (255, g, 0)) return",
"not isinstance(postData, tuple): jointStreamer = zip(postData, streamer) for landmarks, frame in jointStreamer: k",
"+ recordName, fourcc, fps, dims) def recordSubjectVideoWithPostdata(self, postData, id, trailName, streamer): recorder =",
"pos, color, largeScale = True): if largeScale: cv2.putText(frame, text, pos, cv2.FONT_HERSHEY_SIMPLEX, 2, color,",
"self.__initializeRecorder(id, trailName, dims = (1280, 720)) print(self._size) if not isinstance(postData, tuple): jointStreamer =",
"Paths.MergedVideosFolder + ('%s%s' % (id, Paths.sep)) if not os.path.isdir(dir): os.makedirs(dir, exist_ok = True)",
"landmarks = None, outputValues = None): if not landmarks is None: frame =",
"zip(values, labels, colors): text = \"{:s}: {:7.2f}\".format(l, float(v)) frame = self.__addText(frame, text, position,",
"cv2.LINE_AA) return frame def addPointer(self, frame, outputValues): #boundaries = self._mappingFunc.getOutputBoundaries() outputSize = (1920,",
"landmarks) if not k: break return else: jointStreamer = zip(*(postData + (streamer,))) for",
"True) now = str(datetime.now())[:-7].replace(':', '-').replace(' ', '_') recordName = trailName + '_%s_%s_WithAllInput.avi' %",
"'#0000ff'): super() self._size = (1280, 720) def addBox(self, frame, pPts): color = (255,",
"= 1, landmarkColorStr = '#00ff00', screenColorStr = '#0000ff'): super() self._size = (1280, 720)",
"720)) print(self._size) if not isinstance(postData, tuple): jointStreamer = zip(postData, streamer) for landmarks, frame",
"initialPos, prefix = 'Gaze', largeScale = largeScale) return frame def showFrameWithAllInputs(self, frame, pPts",
"pose = None, delay = 1): frame = self.addAllInputs(frame, pPts, landmarks, outputValues) h,",
"outputValues is None: frame = self.addPointer(frame, outputValues.astype(int)) return frame def showFrame(self, frame, delay",
"frame = self.addAllInputs(frame, pPts, landmarks, outputValues) k = self.showFrame(frame) if not k: recorder.release()",
"else 200 colors = ((0, 0, 255), (0, 255, 0), (255, g, 0))",
"if len(pPts) > 4: _pPts = [] for start, end in [(1,6), (2,",
"if largeScale else 30)) return frame def _addValues(self, inputValues, frame, pos = (20,",
"in streamer: landmarks = estimator.detectFacialLandmarks(frame) k = self.showFrameWithAllInputs(frame, landmarks = landmarks) if not",
"+ '_%s_%s_WithAllInput.avi' % (id, now) print(dir + recordName, 'will be written') print(dims) return",
"outputSize = (1920, 1080) outputValues[0] = outputSize[0] - outputValues[0] boundaries = Boundary(0, outputSize[0],",
"pPts, landmarks = annotations k = self.showFrameWithAllInputs(frame, pPts, landmarks, inputValues, pose) if not",
"= (tuple(pPts[start]), tuple(pPts[end])) _pPts.append(p) for start, end in _pPts: cv2.line(frame, start, end, color,",
"outputValues[:2].astype(int) cv2.circle(frame, (x, y), 1, (0, 0, 235), 56, cv2.LINE_AA) return frame def",
"from datetime import datetime from ... import Paths import numpy as np import",
"id, trailName, fps = 30, dims = (1280, 720)): fourcc = cv2.VideoWriter_fourcc(*'MP42') dir",
"0), (int(w/2), h), (0,0,0), 5) if not pose is None: frame = self._addMeasurements(outputValues,",
"largeScale else 200 colors = ((0, 0, 255), (0, 255, 0), (255, g,",
"= self.showFrame(frame) if not k: recorder.release() break recorder.write(frame.astype(np.uint8)) else: jointStreamer = zip(*(postData +",
"if not os.path.isdir(dir): os.makedirs(dir, exist_ok = True) now = str(datetime.now())[:-7].replace(':', '-').replace(' ', '_')",
"'Z']] g = 0 if largeScale else 200 colors = ((0, 0, 255),",
"fourcc = cv2.VideoWriter_fourcc(*'MP42') dir = Paths.MergedVideosFolder + ('%s%s' % (id, Paths.sep)) if not",
"def playSubjectVideoWithLandmarks(self, estimator, streamer): self._estimator = estimator for frame in streamer: landmarks =",
"cv2.circle(frame, (x, y), 1, (0, 0, 235), 56, cv2.LINE_AA) return frame def addAllInputs(self,",
"k == ord('q'): return False else: return True def __addText(self, frame, text, pos,",
"k = self.showFrameWithAllInputs(frame, landmarks = landmarks) if not k: break return def playSubjectVideoWithAllInputs(self,",
"import Paths import numpy as np import cv2 import os class InputEstimationVisualizer(object): def",
"streamer): recorder = self.__initializeRecorder(id, trailName, dims = (1280, 720)) print(self._size) if not isinstance(postData,",
"= landmarks) k = self.showFrame(frame) if not k: recorder.release() break recorder.write(frame.astype(np.uint8)) else: jointStreamer",
"in streamer: annotations = \\ estimator.estimateInputValuesWithAnnotations(frame) pose = estimator.getHeadPose() inputValues, pPts, landmarks =",
"pos = initialPos, prefix = 'Gaze', largeScale = largeScale) return frame def showFrameWithAllInputs(self,",
"p = (tuple(pPts[start]), tuple(pPts[end])) _pPts.append(p) for start, end in _pPts: cv2.line(frame, start, end,",
"from .MappingFunctions import Boundary, StaticMapping, DynamicMapping from datetime import datetime from ... import",
"= (1280, 720)): fourcc = cv2.VideoWriter_fourcc(*'MP42') dir = Paths.MergedVideosFolder + ('%s%s' % (id,",
"frame in jointStreamer: k = self.showFrameWithAllInputs(frame, landmarks = landmarks) if not k: break",
"yRange = boundaries.getVolumeAbsRatio(outputValues) x, y = int(xRange*width), int(yRange*height) else: x, y = outputValues[:2].astype(int)",
"frame in streamer: landmarks = estimator.detectFacialLandmarks(frame) k = self.showFrameWithAllInputs(frame, landmarks = landmarks) if",
"recorder.release() break recorder.write(frame.astype(np.uint8)) else: jointStreamer = zip(*(postData + (streamer,))) for headGaze, pose, landmarks,",
"def replaySubjectVideoWithPostData(self, postData, streamer): if not isinstance(postData, tuple): jointStreamer = zip(postData, streamer) for",
"showFrame(self, frame, delay = 1): cv2.imshow('frame', frame) k = cv2.waitKey(delay) if k ==",
"color, 2, cv2.LINE_AA) return frame def addLandmarks(self, frame, landmarks, c = (255, 0,",
"DynamicMapping from datetime import datetime from ... import Paths import numpy as np",
"if not outputValues is None: frame = self.addPointer(frame, outputValues.astype(int)) return frame def showFrame(self,",
"False else: return True def __addText(self, frame, text, pos, color, largeScale = True):",
"= str(datetime.now())[:-7].replace(':', '-').replace(' ', '_') recordName = trailName + '_%s_%s_WithAllInput.avi' % (id, now)",
"if largeScale else 200 colors = ((0, 0, 255), (0, 255, 0), (255,",
"cv2.LINE_AA) if len(pPts) > 4: _pPts = [] for start, end in [(1,6),",
"dims = (1280, 720)) print(self._size) if not isinstance(postData, tuple): jointStreamer = zip(postData, streamer)",
"else: x, y = outputValues[:2].astype(int) cv2.circle(frame, (x, y), 1, (0, 0, 235), 56,",
"return frame def showFrame(self, frame, delay = 1): cv2.imshow('frame', frame) k = cv2.waitKey(delay)",
"k = self.showFrame(frame) if not k: recorder.release() break recorder.write(frame.astype(np.uint8)) else: jointStreamer = zip(*(postData",
"y), 1, (0, 0, 235), 56, cv2.LINE_AA) return frame def addAllInputs(self, frame, pPts",
"recordSubjectVideoWithPostdata(self, postData, id, trailName, streamer): recorder = self.__initializeRecorder(id, trailName, dims = (1280, 720))",
"for l in ['X', 'Y', 'Z']] g = 0 if largeScale else 200",
"self.showFrameWithAllInputs(frame, landmarks = landmarks) if not k: break return def playSubjectVideoWithAllInputs(self, estimator, streamer):",
"= self._addValues(pose[3:], frame, pos =initialPos, prefix = 'Or', largeScale = largeScale) initialPos =",
"trailName, streamer): recorder = self.__initializeRecorder(id, trailName, dims = (1280, 720)) print(self._size) if not",
"def _addMeasurements(self, inputValues, pose, frame, largeScale = True): initialPos, gap = ((20, 60),",
"now = str(datetime.now())[:-7].replace(':', '-').replace(' ', '_') recordName = trailName + '_%s_%s_WithAllInput.avi' % (id,",
"estimator for frame in streamer: annotations = \\ estimator.estimateInputValuesWithAnnotations(frame) pose = estimator.getHeadPose() inputValues,",
"= (1280, 720) def addBox(self, frame, pPts): color = (255, 255, 255) cv2.polylines(frame,",
"datetime from ... import Paths import numpy as np import cv2 import os",
"as np import cv2 import os class InputEstimationVisualizer(object): def __init__(self, sceneScale = 1,",
"None, outputValues = None, pose = None, delay = 1): frame = self.addAllInputs(frame,",
"pPts = None, landmarks = None, outputValues = None): if not landmarks is",
"w, _ = frame.shape cv2.line(frame, (0, int(h/2)), (w, int(h/2)), (0,0,0), 5) cv2.line(frame, (int(w/2),",
"[pPts], True, color, 2, cv2.LINE_AA) if len(pPts) > 4: _pPts = [] for",
"frame, pPts = None, landmarks = None, outputValues = None): if not landmarks",
"frame def showFrame(self, frame, delay = 1): cv2.imshow('frame', frame) k = cv2.waitKey(delay) if",
"largeScale = True): if largeScale: cv2.putText(frame, text, pos, cv2.FONT_HERSHEY_SIMPLEX, 2, color, thickness=8) else:",
"{:7.2f}\".format(l, float(v)) frame = self.__addText(frame, text, position, c, largeScale) position = (position[0], position[1]+(70",
"self.addAllInputs(frame, pPts, landmarks, outputValues) k = self.showFrame(frame) if not k: recorder.release() break recorder.write(frame.astype(np.uint8))",
"break return def __initializeRecorder(self, id, trailName, fps = 30, dims = (1280, 720)):",
"return else: jointStreamer = zip(*(postData + (streamer,))) for headGaze, pose, landmarks, pPts, frame",
"pPts, landmarks, outputValues) h, w, _ = frame.shape cv2.line(frame, (0, int(h/2)), (w, int(h/2)),",
"self.showFrameWithAllInputs(frame, pPts, landmarks, headGaze, pose) if not k: break return def __initializeRecorder(self, id,",
"not pPts is None: frame = self.addBox(frame, pPts.astype(int)) if not outputValues is None:",
"return True def __addText(self, frame, text, pos, color, largeScale = True): if largeScale:",
"depth) = frame.shape (xRange, yRange, _) = boundaries.getRanges() if xRange != width or",
"= boundaries.getVolumeAbsRatio(outputValues) x, y = int(xRange*width), int(yRange*height) else: x, y = outputValues[:2].astype(int) cv2.circle(frame,",
"y) in enumerate(landmarks): #if not i in [39, 42]: # continue cv2.circle(frame, (x,"
] |
[
"# ---------------------------------------------------------------------------- # # Data Augmentation # ---------------------------------------------------------------------------- # # mosaic augmentation _C.DATALOADER.MOSAIC_PROB",
"# mix-up augmentation _C.DATALOADER.MIXUP_PROB = 0.34 _C.DATALOADER.CUTOUT_PROB = 0.0 # output prediction and",
"augmentation _C.DATALOADER.MIXUP_PROB = 0.34 _C.DATALOADER.CUTOUT_PROB = 0.0 # output prediction and ground truth",
"_C.DATALOADER.CUTOUT_PROB = 0.0 # output prediction and ground truth to image _C.TEST.VISUAL_OUTPUT =",
"from detectron2.config import CfgNode as CN def add_alleria_config(cfg): _C = cfg # ----------------------------------------------------------------------------",
"# # Data Augmentation # ---------------------------------------------------------------------------- # # mosaic augmentation _C.DATALOADER.MOSAIC_PROB = 0.33",
"---------------------------------------------------------------------------- # # Data Augmentation # ---------------------------------------------------------------------------- # # mosaic augmentation _C.DATALOADER.MOSAIC_PROB =",
"ensemble multi model _C.TEST.ENSEMBLE = CN() _C.TEST.ENSEMBLE.ENABLED = False _C.TEST.ENSEMBLE.NUM = 2 _C.TEST.ENSEMBLE.CONFIGS",
"output prediction and ground truth to image _C.TEST.VISUAL_OUTPUT = False _C.TEST.AUG.NMS_TH = 0.6",
"Augmentation # ---------------------------------------------------------------------------- # # mosaic augmentation _C.DATALOADER.MOSAIC_PROB = 0.33 # mix-up augmentation",
"# # mosaic augmentation _C.DATALOADER.MOSAIC_PROB = 0.33 # mix-up augmentation _C.DATALOADER.MIXUP_PROB = 0.34",
"add_alleria_config(cfg): _C = cfg # ---------------------------------------------------------------------------- # # Data Augmentation # ---------------------------------------------------------------------------- #",
"PM # @Author : fangcheng.ji # @FileName: config.py from detectron2.config import CfgNode as",
"0.6 # ensemble multi model _C.TEST.ENSEMBLE = CN() _C.TEST.ENSEMBLE.ENABLED = False _C.TEST.ENSEMBLE.NUM =",
"_C = cfg # ---------------------------------------------------------------------------- # # Data Augmentation # ---------------------------------------------------------------------------- # #",
"# @Time : 4/6/20 5:47 PM # @Author : fangcheng.ji # @FileName: config.py",
"_C.DATALOADER.MOSAIC_PROB = 0.33 # mix-up augmentation _C.DATALOADER.MIXUP_PROB = 0.34 _C.DATALOADER.CUTOUT_PROB = 0.0 #",
"mosaic augmentation _C.DATALOADER.MOSAIC_PROB = 0.33 # mix-up augmentation _C.DATALOADER.MIXUP_PROB = 0.34 _C.DATALOADER.CUTOUT_PROB =",
"as CN def add_alleria_config(cfg): _C = cfg # ---------------------------------------------------------------------------- # # Data Augmentation",
"# @FileName: config.py from detectron2.config import CfgNode as CN def add_alleria_config(cfg): _C =",
"0.33 # mix-up augmentation _C.DATALOADER.MIXUP_PROB = 0.34 _C.DATALOADER.CUTOUT_PROB = 0.0 # output prediction",
"_C.DATALOADER.MIXUP_PROB = 0.34 _C.DATALOADER.CUTOUT_PROB = 0.0 # output prediction and ground truth to",
": 4/6/20 5:47 PM # @Author : fangcheng.ji # @FileName: config.py from detectron2.config",
"detectron2.config import CfgNode as CN def add_alleria_config(cfg): _C = cfg # ---------------------------------------------------------------------------- #",
"cfg # ---------------------------------------------------------------------------- # # Data Augmentation # ---------------------------------------------------------------------------- # # mosaic augmentation",
"config.py from detectron2.config import CfgNode as CN def add_alleria_config(cfg): _C = cfg #",
"@Time : 4/6/20 5:47 PM # @Author : fangcheng.ji # @FileName: config.py from",
"# Data Augmentation # ---------------------------------------------------------------------------- # # mosaic augmentation _C.DATALOADER.MOSAIC_PROB = 0.33 #",
"---------------------------------------------------------------------------- # # mosaic augmentation _C.DATALOADER.MOSAIC_PROB = 0.33 # mix-up augmentation _C.DATALOADER.MIXUP_PROB =",
"multi model _C.TEST.ENSEMBLE = CN() _C.TEST.ENSEMBLE.ENABLED = False _C.TEST.ENSEMBLE.NUM = 2 _C.TEST.ENSEMBLE.CONFIGS =",
"5:47 PM # @Author : fangcheng.ji # @FileName: config.py from detectron2.config import CfgNode",
"CN def add_alleria_config(cfg): _C = cfg # ---------------------------------------------------------------------------- # # Data Augmentation #",
"ground truth to image _C.TEST.VISUAL_OUTPUT = False _C.TEST.AUG.NMS_TH = 0.6 # ensemble multi",
"image _C.TEST.VISUAL_OUTPUT = False _C.TEST.AUG.NMS_TH = 0.6 # ensemble multi model _C.TEST.ENSEMBLE =",
"mix-up augmentation _C.DATALOADER.MIXUP_PROB = 0.34 _C.DATALOADER.CUTOUT_PROB = 0.0 # output prediction and ground",
"= 0.6 # ensemble multi model _C.TEST.ENSEMBLE = CN() _C.TEST.ENSEMBLE.ENABLED = False _C.TEST.ENSEMBLE.NUM",
"= False _C.TEST.AUG.NMS_TH = 0.6 # ensemble multi model _C.TEST.ENSEMBLE = CN() _C.TEST.ENSEMBLE.ENABLED",
"truth to image _C.TEST.VISUAL_OUTPUT = False _C.TEST.AUG.NMS_TH = 0.6 # ensemble multi model",
"# ---------------------------------------------------------------------------- # # mosaic augmentation _C.DATALOADER.MOSAIC_PROB = 0.33 # mix-up augmentation _C.DATALOADER.MIXUP_PROB",
"model _C.TEST.ENSEMBLE = CN() _C.TEST.ENSEMBLE.ENABLED = False _C.TEST.ENSEMBLE.NUM = 2 _C.TEST.ENSEMBLE.CONFIGS = (\"\",)",
"fangcheng.ji # @FileName: config.py from detectron2.config import CfgNode as CN def add_alleria_config(cfg): _C",
"= cfg # ---------------------------------------------------------------------------- # # Data Augmentation # ---------------------------------------------------------------------------- # # mosaic",
"to image _C.TEST.VISUAL_OUTPUT = False _C.TEST.AUG.NMS_TH = 0.6 # ensemble multi model _C.TEST.ENSEMBLE",
"False _C.TEST.AUG.NMS_TH = 0.6 # ensemble multi model _C.TEST.ENSEMBLE = CN() _C.TEST.ENSEMBLE.ENABLED =",
"# output prediction and ground truth to image _C.TEST.VISUAL_OUTPUT = False _C.TEST.AUG.NMS_TH =",
": fangcheng.ji # @FileName: config.py from detectron2.config import CfgNode as CN def add_alleria_config(cfg):",
"# mosaic augmentation _C.DATALOADER.MOSAIC_PROB = 0.33 # mix-up augmentation _C.DATALOADER.MIXUP_PROB = 0.34 _C.DATALOADER.CUTOUT_PROB",
"0.0 # output prediction and ground truth to image _C.TEST.VISUAL_OUTPUT = False _C.TEST.AUG.NMS_TH",
"Data Augmentation # ---------------------------------------------------------------------------- # # mosaic augmentation _C.DATALOADER.MOSAIC_PROB = 0.33 # mix-up",
"<reponame>sm047/detectron2<gh_stars>1-10 #!/usr/bin/env python3 # @Time : 4/6/20 5:47 PM # @Author : fangcheng.ji",
"import CfgNode as CN def add_alleria_config(cfg): _C = cfg # ---------------------------------------------------------------------------- # #",
"# @Author : fangcheng.ji # @FileName: config.py from detectron2.config import CfgNode as CN",
"# ensemble multi model _C.TEST.ENSEMBLE = CN() _C.TEST.ENSEMBLE.ENABLED = False _C.TEST.ENSEMBLE.NUM = 2",
"def add_alleria_config(cfg): _C = cfg # ---------------------------------------------------------------------------- # # Data Augmentation # ----------------------------------------------------------------------------",
"@Author : fangcheng.ji # @FileName: config.py from detectron2.config import CfgNode as CN def",
"= 0.0 # output prediction and ground truth to image _C.TEST.VISUAL_OUTPUT = False",
"augmentation _C.DATALOADER.MOSAIC_PROB = 0.33 # mix-up augmentation _C.DATALOADER.MIXUP_PROB = 0.34 _C.DATALOADER.CUTOUT_PROB = 0.0",
"= 0.34 _C.DATALOADER.CUTOUT_PROB = 0.0 # output prediction and ground truth to image",
"= 0.33 # mix-up augmentation _C.DATALOADER.MIXUP_PROB = 0.34 _C.DATALOADER.CUTOUT_PROB = 0.0 # output",
"prediction and ground truth to image _C.TEST.VISUAL_OUTPUT = False _C.TEST.AUG.NMS_TH = 0.6 #",
"@FileName: config.py from detectron2.config import CfgNode as CN def add_alleria_config(cfg): _C = cfg",
"#!/usr/bin/env python3 # @Time : 4/6/20 5:47 PM # @Author : fangcheng.ji #",
"python3 # @Time : 4/6/20 5:47 PM # @Author : fangcheng.ji # @FileName:",
"and ground truth to image _C.TEST.VISUAL_OUTPUT = False _C.TEST.AUG.NMS_TH = 0.6 # ensemble",
"_C.TEST.AUG.NMS_TH = 0.6 # ensemble multi model _C.TEST.ENSEMBLE = CN() _C.TEST.ENSEMBLE.ENABLED = False",
"_C.TEST.VISUAL_OUTPUT = False _C.TEST.AUG.NMS_TH = 0.6 # ensemble multi model _C.TEST.ENSEMBLE = CN()",
"4/6/20 5:47 PM # @Author : fangcheng.ji # @FileName: config.py from detectron2.config import",
"0.34 _C.DATALOADER.CUTOUT_PROB = 0.0 # output prediction and ground truth to image _C.TEST.VISUAL_OUTPUT",
"CfgNode as CN def add_alleria_config(cfg): _C = cfg # ---------------------------------------------------------------------------- # # Data"
] |
[
"deliver the final cleaned DataFrame\"\"\" self.data = self.dsrc_types[self.dsrc_type]( os.path.join(self.dsrc_path, self.file_name), header = self.header_rows,",
"'long1', 'lat2', 'long2'])] = scaler.fit_transform( df[df.loc[:, df.dtypes == 'float64'].columns.difference(['lat1', 'long1', 'lat2', 'long2'])] )",
"NYK(DataSource.DataSource): def __init__(self, app, dsrc_name='', dsrc_type='csv', dsrc_path='data/', file_name='', header_rows=None, date_cols=None, skip_rows=None, lat1=None, long1=None,",
"need to generalize them once I know more about different types of data",
"6378137 df['wm%s'%lon] = df[lon] * (k * np.pi/180.0) df['wm%s'%lat] = np.log(np.tan((90 + df[lat])",
"k return df def read_prepare_data(self): \"\"\"Use all data tools above to deliver the",
"self.lat2 = lat2 self.long2 = long2 self.read_prepare_data() self.init_dsrc() \"\"\"These methods are fine-tuned for",
"the current data sets. I need to generalize them once I know more",
") self.data['timestamp2'] = pd.to_datetime(self.data[0]) self.data['timestamp1'] = pd.to_datetime(self.data[1]) self.clean(self.data, self.dsrc_name) self.convert_coordinate(self.data, str(self.lat1), 'lat1') self.convert_coordinate(self.data,",
"np import pandas as pd pd.options.mode.chained_assignment = None from sklearn.preprocessing import Imputer, StandardScaler",
"def scale_impute(cls, df, method): \"\"\"Find float columns, impute their NaN values with 'method',",
"str(self.lat2), 'lat2') self.convert_coordinate(self.data, str(self.long2), 'long2') self.scale_impute(self.data, 'mean') self.wgs84_to_web_mercator(self.data, 'long1', 'lat1') self.wgs84_to_web_mercator(self.data, 'long2', 'lat2')",
"'float64'].columns.difference(['lat1', 'long1', 'lat2', 'long2'])] ) return df @classmethod def convert_coordinate(cls, df, col_in, col_out):",
"column/feature\"\"\" fill_NaN = Imputer(missing_values=np.nan, strategy=method, axis=1) df[df.loc[:, df.dtypes == 'float64'].columns.difference(['lat1', 'long1', 'lat2', 'long2'])]",
"file_name self.header_rows = header_rows self.date_cols = date_cols self.skip_rows = skip_rows self.lat1 = lat1",
"and drops them from the DataFrame\"\"\" df.replace(r'\\s+', np.nan, regex=True, inplace=True) df.replace(r'-', np.nan, regex=True,",
"assuming all coordinates are E and N df[col_out] = (df[col_in]/100 - (df[col_in]/100).astype(int))*100.*0.0166666667 +",
"np.nan, regex=True, inplace=True) df.replace(r'-', np.nan, regex=True, inplace=True) df.dropna(axis=1, how='all', inplace=True) df.columns = [str(x)",
"of the format [d]ddmm.mmm to [dd]d.ddd\"\"\" ##FIXME! This is assuming all coordinates are",
"col_out): \"\"\"Convert coordinates of the format [d]ddmm.mmm to [dd]d.ddd\"\"\" ##FIXME! This is assuming",
"sets. I need to generalize them once I know more about different types",
"self.skip_rows = skip_rows self.lat1 = lat1 self.long1 = long1 self.lat2 = lat2 self.long2",
"'lat2') self.convert_coordinate(self.data, str(self.long2), 'long2') self.scale_impute(self.data, 'mean') self.wgs84_to_web_mercator(self.data, 'long1', 'lat1') self.wgs84_to_web_mercator(self.data, 'long2', 'lat2') self.data['timestamp_date']",
"[d]ddmm.mmm to [dd]d.ddd\"\"\" ##FIXME! This is assuming all coordinates are E and N",
"'mean') self.wgs84_to_web_mercator(self.data, 'long1', 'lat1') self.wgs84_to_web_mercator(self.data, 'long2', 'lat2') self.data['timestamp_date'] = self.data['timestamp1'].dt.strftime('%Y-%m-%d') DataSource.DataSource.types['NYK'] = NYK",
"for the current data sets. I need to generalize them once I know",
"= (df[col_in]/100 - (df[col_in]/100).astype(int))*100.*0.0166666667 + (df[col_in]/100).astype(int) return df @classmethod def wgs84_to_web_mercator(cls, df, lon,",
"\"\"\"Convert decimal longitude/latitude to Web Mercator format\"\"\" k = 6378137 df['wm%s'%lon] = df[lon]",
"dsrc_name) self.dsrc_type = dsrc_type self.dsrc_path = dsrc_path self.file_name = file_name self.header_rows = header_rows",
"= date_cols self.skip_rows = skip_rows self.lat1 = lat1 self.long1 = long1 self.lat2 =",
"@classmethod def clean(cls, df, name): \"\"\"Find all empty space or all NaN columns",
"regex=True, inplace=True) df.dropna(axis=1, how='all', inplace=True) df.columns = [str(x) for x in df.columns] df.reset_index(level=[0],",
"df.dtypes == 'float64'].columns.difference(['lat1', 'long1', 'lat2', 'long2'])] = fill_NaN.fit_transform( df[df.loc[:, df.dtypes == 'float64'].columns.difference(['lat1', 'long1',",
"__init__(self, app, dsrc_name='', dsrc_type='csv', dsrc_path='data/', file_name='', header_rows=None, date_cols=None, skip_rows=None, lat1=None, long1=None, lat2=None, long2=None):",
"'long1', 'lat2', 'long2'])] ) scaler = StandardScaler() df[df.loc[:, df.dtypes == 'float64'].columns.difference(['lat1', 'long1', 'lat2',",
"space or all NaN columns and drops them from the DataFrame\"\"\" df.replace(r'\\s+', np.nan,",
"and then min-max scale the column/feature\"\"\" fill_NaN = Imputer(missing_values=np.nan, strategy=method, axis=1) df[df.loc[:, df.dtypes",
"I know more about different types of data coming in\"\"\" @classmethod def clean(cls,",
"self.data = self.dsrc_types[self.dsrc_type]( os.path.join(self.dsrc_path, self.file_name), header = self.header_rows, parse_dates = self.date_cols, skiprows =",
"def clean(cls, df, name): \"\"\"Find all empty space or all NaN columns and",
"def wgs84_to_web_mercator(cls, df, lon, lat): \"\"\"Convert decimal longitude/latitude to Web Mercator format\"\"\" k",
"self.dsrc_path = dsrc_path self.file_name = file_name self.header_rows = header_rows self.date_cols = date_cols self.skip_rows",
"then min-max scale the column/feature\"\"\" fill_NaN = Imputer(missing_values=np.nan, strategy=method, axis=1) df[df.loc[:, df.dtypes ==",
"\"\"\"Find float columns, impute their NaN values with 'method', and then min-max scale",
"self.date_cols = date_cols self.skip_rows = skip_rows self.lat1 = lat1 self.long1 = long1 self.lat2",
"app, dsrc_name='', dsrc_type='csv', dsrc_path='data/', file_name='', header_rows=None, date_cols=None, skip_rows=None, lat1=None, long1=None, lat2=None, long2=None): DataSource.DataSource.__init__(self,",
"lat2 self.long2 = long2 self.read_prepare_data() self.init_dsrc() \"\"\"These methods are fine-tuned for the current",
"different types of data coming in\"\"\" @classmethod def clean(cls, df, name): \"\"\"Find all",
"to deliver the final cleaned DataFrame\"\"\" self.data = self.dsrc_types[self.dsrc_type]( os.path.join(self.dsrc_path, self.file_name), header =",
"from the DataFrame\"\"\" df.replace(r'\\s+', np.nan, regex=True, inplace=True) df.replace(r'-', np.nan, regex=True, inplace=True) df.dropna(axis=1, how='all',",
"skip_rows self.lat1 = lat1 self.long1 = long1 self.lat2 = lat2 self.long2 = long2",
"'lat2', 'long2'])] = scaler.fit_transform( df[df.loc[:, df.dtypes == 'float64'].columns.difference(['lat1', 'long1', 'lat2', 'long2'])] ) return",
"data!\"\"\" # df.to_csv('data/'+name+'_clean.csv') return df @classmethod def scale_impute(cls, df, method): \"\"\"Find float columns,",
"lon, lat): \"\"\"Convert decimal longitude/latitude to Web Mercator format\"\"\" k = 6378137 df['wm%s'%lon]",
"StandardScaler() df[df.loc[:, df.dtypes == 'float64'].columns.difference(['lat1', 'long1', 'lat2', 'long2'])] = scaler.fit_transform( df[df.loc[:, df.dtypes ==",
"df[lon] * (k * np.pi/180.0) df['wm%s'%lat] = np.log(np.tan((90 + df[lat]) * np.pi/360.0)) *",
"to generalize them once I know more about different types of data coming",
"= self.skip_rows, error_bad_lines = False, low_memory = False ) self.data['timestamp2'] = pd.to_datetime(self.data[0]) self.data['timestamp1']",
"know anything about the structure of our data!\"\"\" # df.to_csv('data/'+name+'_clean.csv') return df @classmethod",
"'long2'])] ) return df @classmethod def convert_coordinate(cls, df, col_in, col_out): \"\"\"Convert coordinates of",
"df.dtypes == 'float64'].columns.difference(['lat1', 'long1', 'lat2', 'long2'])] = scaler.fit_transform( df[df.loc[:, df.dtypes == 'float64'].columns.difference(['lat1', 'long1',",
"= self.dsrc_types[self.dsrc_type]( os.path.join(self.dsrc_path, self.file_name), header = self.header_rows, parse_dates = self.date_cols, skiprows = self.skip_rows,",
"'lat1') self.convert_coordinate(self.data, str(self.long1), 'long1') self.convert_coordinate(self.data, str(self.lat2), 'lat2') self.convert_coordinate(self.data, str(self.long2), 'long2') self.scale_impute(self.data, 'mean') self.wgs84_to_web_mercator(self.data,",
"anything about the structure of our data!\"\"\" # df.to_csv('data/'+name+'_clean.csv') return df @classmethod def",
"False, low_memory = False ) self.data['timestamp2'] = pd.to_datetime(self.data[0]) self.data['timestamp1'] = pd.to_datetime(self.data[1]) self.clean(self.data, self.dsrc_name)",
"self.convert_coordinate(self.data, str(self.long2), 'long2') self.scale_impute(self.data, 'mean') self.wgs84_to_web_mercator(self.data, 'long1', 'lat1') self.wgs84_to_web_mercator(self.data, 'long2', 'lat2') self.data['timestamp_date'] =",
"df, method): \"\"\"Find float columns, impute their NaN values with 'method', and then",
"data sets. I need to generalize them once I know more about different",
"df @classmethod def wgs84_to_web_mercator(cls, df, lon, lat): \"\"\"Convert decimal longitude/latitude to Web Mercator",
"Imputer(missing_values=np.nan, strategy=method, axis=1) df[df.loc[:, df.dtypes == 'float64'].columns.difference(['lat1', 'long1', 'lat2', 'long2'])] = fill_NaN.fit_transform( df[df.loc[:,",
"self.header_rows = header_rows self.date_cols = date_cols self.skip_rows = skip_rows self.lat1 = lat1 self.long1",
"self.lat1 = lat1 self.long1 = long1 self.lat2 = lat2 self.long2 = long2 self.read_prepare_data()",
"self.dsrc_name) self.convert_coordinate(self.data, str(self.lat1), 'lat1') self.convert_coordinate(self.data, str(self.long1), 'long1') self.convert_coordinate(self.data, str(self.lat2), 'lat2') self.convert_coordinate(self.data, str(self.long2), 'long2')",
"name): \"\"\"Find all empty space or all NaN columns and drops them from",
"are fine-tuned for the current data sets. I need to generalize them once",
"self.date_cols, skiprows = self.skip_rows, error_bad_lines = False, low_memory = False ) self.data['timestamp2'] =",
"= df[lon] * (k * np.pi/180.0) df['wm%s'%lat] = np.log(np.tan((90 + df[lat]) * np.pi/360.0))",
"== 'float64'].columns.difference(['lat1', 'long1', 'lat2', 'long2'])] ) return df @classmethod def convert_coordinate(cls, df, col_in,",
"self.long1 = long1 self.lat2 = lat2 self.long2 = long2 self.read_prepare_data() self.init_dsrc() \"\"\"These methods",
"our data!\"\"\" # df.to_csv('data/'+name+'_clean.csv') return df @classmethod def scale_impute(cls, df, method): \"\"\"Find float",
"(k * np.pi/180.0) df['wm%s'%lat] = np.log(np.tan((90 + df[lat]) * np.pi/360.0)) * k return",
"- (df[col_in]/100).astype(int))*100.*0.0166666667 + (df[col_in]/100).astype(int) return df @classmethod def wgs84_to_web_mercator(cls, df, lon, lat): \"\"\"Convert",
"fill_NaN.fit_transform( df[df.loc[:, df.dtypes == 'float64'].columns.difference(['lat1', 'long1', 'lat2', 'long2'])] ) scaler = StandardScaler() df[df.loc[:,",
"the structure of our data!\"\"\" # df.to_csv('data/'+name+'_clean.csv') return df @classmethod def scale_impute(cls, df,",
"= fill_NaN.fit_transform( df[df.loc[:, df.dtypes == 'float64'].columns.difference(['lat1', 'long1', 'lat2', 'long2'])] ) scaler = StandardScaler()",
"DataSource import os.path class NYK(DataSource.DataSource): def __init__(self, app, dsrc_name='', dsrc_type='csv', dsrc_path='data/', file_name='', header_rows=None,",
"structure of our data!\"\"\" # df.to_csv('data/'+name+'_clean.csv') return df @classmethod def scale_impute(cls, df, method):",
"min-max scale the column/feature\"\"\" fill_NaN = Imputer(missing_values=np.nan, strategy=method, axis=1) df[df.loc[:, df.dtypes == 'float64'].columns.difference(['lat1',",
"empty space or all NaN columns and drops them from the DataFrame\"\"\" df.replace(r'\\s+',",
"None from sklearn.preprocessing import Imputer, StandardScaler import DataSource import os.path class NYK(DataSource.DataSource): def",
"df[col_out] = (df[col_in]/100 - (df[col_in]/100).astype(int))*100.*0.0166666667 + (df[col_in]/100).astype(int) return df @classmethod def wgs84_to_web_mercator(cls, df,",
"types of data coming in\"\"\" @classmethod def clean(cls, df, name): \"\"\"Find all empty",
"for x in df.columns] df.reset_index(level=[0], inplace=True) df.rename(columns={'index': 'ind'}, inplace=True) \"\"\"This is to find",
"= dsrc_type self.dsrc_path = dsrc_path self.file_name = file_name self.header_rows = header_rows self.date_cols =",
"clean(cls, df, name): \"\"\"Find all empty space or all NaN columns and drops",
"@classmethod def wgs84_to_web_mercator(cls, df, lon, lat): \"\"\"Convert decimal longitude/latitude to Web Mercator format\"\"\"",
"'lat2', 'long2'])] ) return df @classmethod def convert_coordinate(cls, df, col_in, col_out): \"\"\"Convert coordinates",
"== 'float64'].columns.difference(['lat1', 'long1', 'lat2', 'long2'])] ) scaler = StandardScaler() df[df.loc[:, df.dtypes == 'float64'].columns.difference(['lat1',",
"lat1 self.long1 = long1 self.lat2 = lat2 self.long2 = long2 self.read_prepare_data() self.init_dsrc() \"\"\"These",
"'long2'])] = fill_NaN.fit_transform( df[df.loc[:, df.dtypes == 'float64'].columns.difference(['lat1', 'long1', 'lat2', 'long2'])] ) scaler =",
"This is assuming all coordinates are E and N df[col_out] = (df[col_in]/100 -",
"df[df.loc[:, df.dtypes == 'float64'].columns.difference(['lat1', 'long1', 'lat2', 'long2'])] ) return df @classmethod def convert_coordinate(cls,",
"method): \"\"\"Find float columns, impute their NaN values with 'method', and then min-max",
"data coming in\"\"\" @classmethod def clean(cls, df, name): \"\"\"Find all empty space or",
"import pandas as pd pd.options.mode.chained_assignment = None from sklearn.preprocessing import Imputer, StandardScaler import",
"them from the DataFrame\"\"\" df.replace(r'\\s+', np.nan, regex=True, inplace=True) df.replace(r'-', np.nan, regex=True, inplace=True) df.dropna(axis=1,",
"all coordinates are E and N df[col_out] = (df[col_in]/100 - (df[col_in]/100).astype(int))*100.*0.0166666667 + (df[col_in]/100).astype(int)",
"inplace=True) \"\"\"This is to find coordinate columns etc. manually, because we don't know",
"strategy=method, axis=1) df[df.loc[:, df.dtypes == 'float64'].columns.difference(['lat1', 'long1', 'lat2', 'long2'])] = fill_NaN.fit_transform( df[df.loc[:, df.dtypes",
"long2=None): DataSource.DataSource.__init__(self, app, dsrc_name) self.dsrc_type = dsrc_type self.dsrc_path = dsrc_path self.file_name = file_name",
"low_memory = False ) self.data['timestamp2'] = pd.to_datetime(self.data[0]) self.data['timestamp1'] = pd.to_datetime(self.data[1]) self.clean(self.data, self.dsrc_name) self.convert_coordinate(self.data,",
"= long2 self.read_prepare_data() self.init_dsrc() \"\"\"These methods are fine-tuned for the current data sets.",
"dsrc_name='', dsrc_type='csv', dsrc_path='data/', file_name='', header_rows=None, date_cols=None, skip_rows=None, lat1=None, long1=None, lat2=None, long2=None): DataSource.DataSource.__init__(self, app,",
"read_prepare_data(self): \"\"\"Use all data tools above to deliver the final cleaned DataFrame\"\"\" self.data",
"decimal longitude/latitude to Web Mercator format\"\"\" k = 6378137 df['wm%s'%lon] = df[lon] *",
"import Imputer, StandardScaler import DataSource import os.path class NYK(DataSource.DataSource): def __init__(self, app, dsrc_name='',",
"inplace=True) df.columns = [str(x) for x in df.columns] df.reset_index(level=[0], inplace=True) df.rename(columns={'index': 'ind'}, inplace=True)",
"know more about different types of data coming in\"\"\" @classmethod def clean(cls, df,",
"self.read_prepare_data() self.init_dsrc() \"\"\"These methods are fine-tuned for the current data sets. I need",
"inplace=True) df.replace(r'-', np.nan, regex=True, inplace=True) df.dropna(axis=1, how='all', inplace=True) df.columns = [str(x) for x",
"the final cleaned DataFrame\"\"\" self.data = self.dsrc_types[self.dsrc_type]( os.path.join(self.dsrc_path, self.file_name), header = self.header_rows, parse_dates",
"lat1=None, long1=None, lat2=None, long2=None): DataSource.DataSource.__init__(self, app, dsrc_name) self.dsrc_type = dsrc_type self.dsrc_path = dsrc_path",
"is assuming all coordinates are E and N df[col_out] = (df[col_in]/100 - (df[col_in]/100).astype(int))*100.*0.0166666667",
"str(self.long2), 'long2') self.scale_impute(self.data, 'mean') self.wgs84_to_web_mercator(self.data, 'long1', 'lat1') self.wgs84_to_web_mercator(self.data, 'long2', 'lat2') self.data['timestamp_date'] = self.data['timestamp1'].dt.strftime('%Y-%m-%d')",
"to find coordinate columns etc. manually, because we don't know anything about the",
"scale the column/feature\"\"\" fill_NaN = Imputer(missing_values=np.nan, strategy=method, axis=1) df[df.loc[:, df.dtypes == 'float64'].columns.difference(['lat1', 'long1',",
"how='all', inplace=True) df.columns = [str(x) for x in df.columns] df.reset_index(level=[0], inplace=True) df.rename(columns={'index': 'ind'},",
"all data tools above to deliver the final cleaned DataFrame\"\"\" self.data = self.dsrc_types[self.dsrc_type](",
"in\"\"\" @classmethod def clean(cls, df, name): \"\"\"Find all empty space or all NaN",
"= False ) self.data['timestamp2'] = pd.to_datetime(self.data[0]) self.data['timestamp1'] = pd.to_datetime(self.data[1]) self.clean(self.data, self.dsrc_name) self.convert_coordinate(self.data, str(self.lat1),",
"\"\"\"Convert coordinates of the format [d]ddmm.mmm to [dd]d.ddd\"\"\" ##FIXME! This is assuming all",
"str(self.lat1), 'lat1') self.convert_coordinate(self.data, str(self.long1), 'long1') self.convert_coordinate(self.data, str(self.lat2), 'lat2') self.convert_coordinate(self.data, str(self.long2), 'long2') self.scale_impute(self.data, 'mean')",
"pd.to_datetime(self.data[1]) self.clean(self.data, self.dsrc_name) self.convert_coordinate(self.data, str(self.lat1), 'lat1') self.convert_coordinate(self.data, str(self.long1), 'long1') self.convert_coordinate(self.data, str(self.lat2), 'lat2') self.convert_coordinate(self.data,",
"os.path.join(self.dsrc_path, self.file_name), header = self.header_rows, parse_dates = self.date_cols, skiprows = self.skip_rows, error_bad_lines =",
"json import numpy as np import pandas as pd pd.options.mode.chained_assignment = None from",
"'float64'].columns.difference(['lat1', 'long1', 'lat2', 'long2'])] = fill_NaN.fit_transform( df[df.loc[:, df.dtypes == 'float64'].columns.difference(['lat1', 'long1', 'lat2', 'long2'])]",
"are E and N df[col_out] = (df[col_in]/100 - (df[col_in]/100).astype(int))*100.*0.0166666667 + (df[col_in]/100).astype(int) return df",
"= np.log(np.tan((90 + df[lat]) * np.pi/360.0)) * k return df def read_prepare_data(self): \"\"\"Use",
"df[df.loc[:, df.dtypes == 'float64'].columns.difference(['lat1', 'long1', 'lat2', 'long2'])] = scaler.fit_transform( df[df.loc[:, df.dtypes == 'float64'].columns.difference(['lat1',",
"def __init__(self, app, dsrc_name='', dsrc_type='csv', dsrc_path='data/', file_name='', header_rows=None, date_cols=None, skip_rows=None, lat1=None, long1=None, lat2=None,",
"cleaned DataFrame\"\"\" self.data = self.dsrc_types[self.dsrc_type]( os.path.join(self.dsrc_path, self.file_name), header = self.header_rows, parse_dates = self.date_cols,",
"data tools above to deliver the final cleaned DataFrame\"\"\" self.data = self.dsrc_types[self.dsrc_type]( os.path.join(self.dsrc_path,",
"* np.pi/180.0) df['wm%s'%lat] = np.log(np.tan((90 + df[lat]) * np.pi/360.0)) * k return df",
"= 6378137 df['wm%s'%lon] = df[lon] * (k * np.pi/180.0) df['wm%s'%lat] = np.log(np.tan((90 +",
"'ind'}, inplace=True) \"\"\"This is to find coordinate columns etc. manually, because we don't",
"return df @classmethod def wgs84_to_web_mercator(cls, df, lon, lat): \"\"\"Convert decimal longitude/latitude to Web",
"coordinate columns etc. manually, because we don't know anything about the structure of",
"scaler = StandardScaler() df[df.loc[:, df.dtypes == 'float64'].columns.difference(['lat1', 'long1', 'lat2', 'long2'])] = scaler.fit_transform( df[df.loc[:,",
"df[lat]) * np.pi/360.0)) * k return df def read_prepare_data(self): \"\"\"Use all data tools",
"x in df.columns] df.reset_index(level=[0], inplace=True) df.rename(columns={'index': 'ind'}, inplace=True) \"\"\"This is to find coordinate",
"is to find coordinate columns etc. manually, because we don't know anything about",
"inplace=True) df.dropna(axis=1, how='all', inplace=True) df.columns = [str(x) for x in df.columns] df.reset_index(level=[0], inplace=True)",
"'float64'].columns.difference(['lat1', 'long1', 'lat2', 'long2'])] = scaler.fit_transform( df[df.loc[:, df.dtypes == 'float64'].columns.difference(['lat1', 'long1', 'lat2', 'long2'])]",
"self.dsrc_types[self.dsrc_type]( os.path.join(self.dsrc_path, self.file_name), header = self.header_rows, parse_dates = self.date_cols, skiprows = self.skip_rows, error_bad_lines",
"to Web Mercator format\"\"\" k = 6378137 df['wm%s'%lon] = df[lon] * (k *",
"DataFrame\"\"\" self.data = self.dsrc_types[self.dsrc_type]( os.path.join(self.dsrc_path, self.file_name), header = self.header_rows, parse_dates = self.date_cols, skiprows",
"app, dsrc_name) self.dsrc_type = dsrc_type self.dsrc_path = dsrc_path self.file_name = file_name self.header_rows =",
"os.path class NYK(DataSource.DataSource): def __init__(self, app, dsrc_name='', dsrc_type='csv', dsrc_path='data/', file_name='', header_rows=None, date_cols=None, skip_rows=None,",
"inplace=True) df.rename(columns={'index': 'ind'}, inplace=True) \"\"\"This is to find coordinate columns etc. manually, because",
"columns, impute their NaN values with 'method', and then min-max scale the column/feature\"\"\"",
"df, name): \"\"\"Find all empty space or all NaN columns and drops them",
"final cleaned DataFrame\"\"\" self.data = self.dsrc_types[self.dsrc_type]( os.path.join(self.dsrc_path, self.file_name), header = self.header_rows, parse_dates =",
"coming in\"\"\" @classmethod def clean(cls, df, name): \"\"\"Find all empty space or all",
"to [dd]d.ddd\"\"\" ##FIXME! This is assuming all coordinates are E and N df[col_out]",
"Web Mercator format\"\"\" k = 6378137 df['wm%s'%lon] = df[lon] * (k * np.pi/180.0)",
"= dsrc_path self.file_name = file_name self.header_rows = header_rows self.date_cols = date_cols self.skip_rows =",
"'lat2', 'long2'])] ) scaler = StandardScaler() df[df.loc[:, df.dtypes == 'float64'].columns.difference(['lat1', 'long1', 'lat2', 'long2'])]",
"##FIXME! This is assuming all coordinates are E and N df[col_out] = (df[col_in]/100",
"self.scale_impute(self.data, 'mean') self.wgs84_to_web_mercator(self.data, 'long1', 'lat1') self.wgs84_to_web_mercator(self.data, 'long2', 'lat2') self.data['timestamp_date'] = self.data['timestamp1'].dt.strftime('%Y-%m-%d') DataSource.DataSource.types['NYK'] =",
"df[df.loc[:, df.dtypes == 'float64'].columns.difference(['lat1', 'long1', 'lat2', 'long2'])] ) scaler = StandardScaler() df[df.loc[:, df.dtypes",
"df.dropna(axis=1, how='all', inplace=True) df.columns = [str(x) for x in df.columns] df.reset_index(level=[0], inplace=True) df.rename(columns={'index':",
"self.skip_rows, error_bad_lines = False, low_memory = False ) self.data['timestamp2'] = pd.to_datetime(self.data[0]) self.data['timestamp1'] =",
"def convert_coordinate(cls, df, col_in, col_out): \"\"\"Convert coordinates of the format [d]ddmm.mmm to [dd]d.ddd\"\"\"",
"False ) self.data['timestamp2'] = pd.to_datetime(self.data[0]) self.data['timestamp1'] = pd.to_datetime(self.data[1]) self.clean(self.data, self.dsrc_name) self.convert_coordinate(self.data, str(self.lat1), 'lat1')",
"[str(x) for x in df.columns] df.reset_index(level=[0], inplace=True) df.rename(columns={'index': 'ind'}, inplace=True) \"\"\"This is to",
"'long1') self.convert_coordinate(self.data, str(self.lat2), 'lat2') self.convert_coordinate(self.data, str(self.long2), 'long2') self.scale_impute(self.data, 'mean') self.wgs84_to_web_mercator(self.data, 'long1', 'lat1') self.wgs84_to_web_mercator(self.data,",
"df.columns] df.reset_index(level=[0], inplace=True) df.rename(columns={'index': 'ind'}, inplace=True) \"\"\"This is to find coordinate columns etc.",
"'long2') self.scale_impute(self.data, 'mean') self.wgs84_to_web_mercator(self.data, 'long1', 'lat1') self.wgs84_to_web_mercator(self.data, 'long2', 'lat2') self.data['timestamp_date'] = self.data['timestamp1'].dt.strftime('%Y-%m-%d') DataSource.DataSource.types['NYK']",
"self.long2 = long2 self.read_prepare_data() self.init_dsrc() \"\"\"These methods are fine-tuned for the current data",
"the DataFrame\"\"\" df.replace(r'\\s+', np.nan, regex=True, inplace=True) df.replace(r'-', np.nan, regex=True, inplace=True) df.dropna(axis=1, how='all', inplace=True)",
"= self.date_cols, skiprows = self.skip_rows, error_bad_lines = False, low_memory = False ) self.data['timestamp2']",
"impute their NaN values with 'method', and then min-max scale the column/feature\"\"\" fill_NaN",
"date_cols=None, skip_rows=None, lat1=None, long1=None, lat2=None, long2=None): DataSource.DataSource.__init__(self, app, dsrc_name) self.dsrc_type = dsrc_type self.dsrc_path",
"date_cols self.skip_rows = skip_rows self.lat1 = lat1 self.long1 = long1 self.lat2 = lat2",
"wgs84_to_web_mercator(cls, df, lon, lat): \"\"\"Convert decimal longitude/latitude to Web Mercator format\"\"\" k =",
"\"\"\"This is to find coordinate columns etc. manually, because we don't know anything",
"# df.to_csv('data/'+name+'_clean.csv') return df @classmethod def scale_impute(cls, df, method): \"\"\"Find float columns, impute",
"df @classmethod def convert_coordinate(cls, df, col_in, col_out): \"\"\"Convert coordinates of the format [d]ddmm.mmm",
"fine-tuned for the current data sets. I need to generalize them once I",
"lat2=None, long2=None): DataSource.DataSource.__init__(self, app, dsrc_name) self.dsrc_type = dsrc_type self.dsrc_path = dsrc_path self.file_name =",
"longitude/latitude to Web Mercator format\"\"\" k = 6378137 df['wm%s'%lon] = df[lon] * (k",
"self.convert_coordinate(self.data, str(self.lat1), 'lat1') self.convert_coordinate(self.data, str(self.long1), 'long1') self.convert_coordinate(self.data, str(self.lat2), 'lat2') self.convert_coordinate(self.data, str(self.long2), 'long2') self.scale_impute(self.data,",
"error_bad_lines = False, low_memory = False ) self.data['timestamp2'] = pd.to_datetime(self.data[0]) self.data['timestamp1'] = pd.to_datetime(self.data[1])",
"their NaN values with 'method', and then min-max scale the column/feature\"\"\" fill_NaN =",
"float columns, impute their NaN values with 'method', and then min-max scale the",
"import DataSource import os.path class NYK(DataSource.DataSource): def __init__(self, app, dsrc_name='', dsrc_type='csv', dsrc_path='data/', file_name='',",
"np.log(np.tan((90 + df[lat]) * np.pi/360.0)) * k return df def read_prepare_data(self): \"\"\"Use all",
"format [d]ddmm.mmm to [dd]d.ddd\"\"\" ##FIXME! This is assuming all coordinates are E and",
"self.clean(self.data, self.dsrc_name) self.convert_coordinate(self.data, str(self.lat1), 'lat1') self.convert_coordinate(self.data, str(self.long1), 'long1') self.convert_coordinate(self.data, str(self.lat2), 'lat2') self.convert_coordinate(self.data, str(self.long2),",
"self.convert_coordinate(self.data, str(self.long1), 'long1') self.convert_coordinate(self.data, str(self.lat2), 'lat2') self.convert_coordinate(self.data, str(self.long2), 'long2') self.scale_impute(self.data, 'mean') self.wgs84_to_web_mercator(self.data, 'long1',",
"file_name='', header_rows=None, date_cols=None, skip_rows=None, lat1=None, long1=None, lat2=None, long2=None): DataSource.DataSource.__init__(self, app, dsrc_name) self.dsrc_type =",
"self.data['timestamp2'] = pd.to_datetime(self.data[0]) self.data['timestamp1'] = pd.to_datetime(self.data[1]) self.clean(self.data, self.dsrc_name) self.convert_coordinate(self.data, str(self.lat1), 'lat1') self.convert_coordinate(self.data, str(self.long1),",
"them once I know more about different types of data coming in\"\"\" @classmethod",
"= long1 self.lat2 = lat2 self.long2 = long2 self.read_prepare_data() self.init_dsrc() \"\"\"These methods are",
"== 'float64'].columns.difference(['lat1', 'long1', 'lat2', 'long2'])] = fill_NaN.fit_transform( df[df.loc[:, df.dtypes == 'float64'].columns.difference(['lat1', 'long1', 'lat2',",
"df['wm%s'%lon] = df[lon] * (k * np.pi/180.0) df['wm%s'%lat] = np.log(np.tan((90 + df[lat]) *",
"about the structure of our data!\"\"\" # df.to_csv('data/'+name+'_clean.csv') return df @classmethod def scale_impute(cls,",
"@classmethod def scale_impute(cls, df, method): \"\"\"Find float columns, impute their NaN values with",
"= [str(x) for x in df.columns] df.reset_index(level=[0], inplace=True) df.rename(columns={'index': 'ind'}, inplace=True) \"\"\"This is",
"manually, because we don't know anything about the structure of our data!\"\"\" #",
"tools above to deliver the final cleaned DataFrame\"\"\" self.data = self.dsrc_types[self.dsrc_type]( os.path.join(self.dsrc_path, self.file_name),",
"self.data['timestamp1'] = pd.to_datetime(self.data[1]) self.clean(self.data, self.dsrc_name) self.convert_coordinate(self.data, str(self.lat1), 'lat1') self.convert_coordinate(self.data, str(self.long1), 'long1') self.convert_coordinate(self.data, str(self.lat2),",
"StandardScaler import DataSource import os.path class NYK(DataSource.DataSource): def __init__(self, app, dsrc_name='', dsrc_type='csv', dsrc_path='data/',",
"pd.to_datetime(self.data[0]) self.data['timestamp1'] = pd.to_datetime(self.data[1]) self.clean(self.data, self.dsrc_name) self.convert_coordinate(self.data, str(self.lat1), 'lat1') self.convert_coordinate(self.data, str(self.long1), 'long1') self.convert_coordinate(self.data,",
"because we don't know anything about the structure of our data!\"\"\" # df.to_csv('data/'+name+'_clean.csv')",
"pandas as pd pd.options.mode.chained_assignment = None from sklearn.preprocessing import Imputer, StandardScaler import DataSource",
"dsrc_type self.dsrc_path = dsrc_path self.file_name = file_name self.header_rows = header_rows self.date_cols = date_cols",
"NaN values with 'method', and then min-max scale the column/feature\"\"\" fill_NaN = Imputer(missing_values=np.nan,",
"df, lon, lat): \"\"\"Convert decimal longitude/latitude to Web Mercator format\"\"\" k = 6378137",
"dsrc_type='csv', dsrc_path='data/', file_name='', header_rows=None, date_cols=None, skip_rows=None, lat1=None, long1=None, lat2=None, long2=None): DataSource.DataSource.__init__(self, app, dsrc_name)",
"df.columns = [str(x) for x in df.columns] df.reset_index(level=[0], inplace=True) df.rename(columns={'index': 'ind'}, inplace=True) \"\"\"This",
"all empty space or all NaN columns and drops them from the DataFrame\"\"\"",
"Mercator format\"\"\" k = 6378137 df['wm%s'%lon] = df[lon] * (k * np.pi/180.0) df['wm%s'%lat]",
"def read_prepare_data(self): \"\"\"Use all data tools above to deliver the final cleaned DataFrame\"\"\"",
"= lat2 self.long2 = long2 self.read_prepare_data() self.init_dsrc() \"\"\"These methods are fine-tuned for the",
"Imputer, StandardScaler import DataSource import os.path class NYK(DataSource.DataSource): def __init__(self, app, dsrc_name='', dsrc_type='csv',",
"= skip_rows self.lat1 = lat1 self.long1 = long1 self.lat2 = lat2 self.long2 =",
"(df[col_in]/100).astype(int) return df @classmethod def wgs84_to_web_mercator(cls, df, lon, lat): \"\"\"Convert decimal longitude/latitude to",
"as pd pd.options.mode.chained_assignment = None from sklearn.preprocessing import Imputer, StandardScaler import DataSource import",
"= StandardScaler() df[df.loc[:, df.dtypes == 'float64'].columns.difference(['lat1', 'long1', 'lat2', 'long2'])] = scaler.fit_transform( df[df.loc[:, df.dtypes",
"df.dtypes == 'float64'].columns.difference(['lat1', 'long1', 'lat2', 'long2'])] ) scaler = StandardScaler() df[df.loc[:, df.dtypes ==",
"coordinates of the format [d]ddmm.mmm to [dd]d.ddd\"\"\" ##FIXME! This is assuming all coordinates",
"dsrc_path='data/', file_name='', header_rows=None, date_cols=None, skip_rows=None, lat1=None, long1=None, lat2=None, long2=None): DataSource.DataSource.__init__(self, app, dsrc_name) self.dsrc_type",
"df['wm%s'%lat] = np.log(np.tan((90 + df[lat]) * np.pi/360.0)) * k return df def read_prepare_data(self):",
"drops them from the DataFrame\"\"\" df.replace(r'\\s+', np.nan, regex=True, inplace=True) df.replace(r'-', np.nan, regex=True, inplace=True)",
"scale_impute(cls, df, method): \"\"\"Find float columns, impute their NaN values with 'method', and",
"from sklearn.preprocessing import Imputer, StandardScaler import DataSource import os.path class NYK(DataSource.DataSource): def __init__(self,",
"self.header_rows, parse_dates = self.date_cols, skiprows = self.skip_rows, error_bad_lines = False, low_memory = False",
"* np.pi/360.0)) * k return df def read_prepare_data(self): \"\"\"Use all data tools above",
"of our data!\"\"\" # df.to_csv('data/'+name+'_clean.csv') return df @classmethod def scale_impute(cls, df, method): \"\"\"Find",
"== 'float64'].columns.difference(['lat1', 'long1', 'lat2', 'long2'])] = scaler.fit_transform( df[df.loc[:, df.dtypes == 'float64'].columns.difference(['lat1', 'long1', 'lat2',",
"pd.options.mode.chained_assignment = None from sklearn.preprocessing import Imputer, StandardScaler import DataSource import os.path class",
"long1=None, lat2=None, long2=None): DataSource.DataSource.__init__(self, app, dsrc_name) self.dsrc_type = dsrc_type self.dsrc_path = dsrc_path self.file_name",
"more about different types of data coming in\"\"\" @classmethod def clean(cls, df, name):",
"self.init_dsrc() \"\"\"These methods are fine-tuned for the current data sets. I need to",
"+ (df[col_in]/100).astype(int) return df @classmethod def wgs84_to_web_mercator(cls, df, lon, lat): \"\"\"Convert decimal longitude/latitude",
"all NaN columns and drops them from the DataFrame\"\"\" df.replace(r'\\s+', np.nan, regex=True, inplace=True)",
"= pd.to_datetime(self.data[0]) self.data['timestamp1'] = pd.to_datetime(self.data[1]) self.clean(self.data, self.dsrc_name) self.convert_coordinate(self.data, str(self.lat1), 'lat1') self.convert_coordinate(self.data, str(self.long1), 'long1')",
"find coordinate columns etc. manually, because we don't know anything about the structure",
") return df @classmethod def convert_coordinate(cls, df, col_in, col_out): \"\"\"Convert coordinates of the",
"long1 self.lat2 = lat2 self.long2 = long2 self.read_prepare_data() self.init_dsrc() \"\"\"These methods are fine-tuned",
"df.rename(columns={'index': 'ind'}, inplace=True) \"\"\"This is to find coordinate columns etc. manually, because we",
"= pd.to_datetime(self.data[1]) self.clean(self.data, self.dsrc_name) self.convert_coordinate(self.data, str(self.lat1), 'lat1') self.convert_coordinate(self.data, str(self.long1), 'long1') self.convert_coordinate(self.data, str(self.lat2), 'lat2')",
"+ df[lat]) * np.pi/360.0)) * k return df def read_prepare_data(self): \"\"\"Use all data",
"parse_dates = self.date_cols, skiprows = self.skip_rows, error_bad_lines = False, low_memory = False )",
"'float64'].columns.difference(['lat1', 'long1', 'lat2', 'long2'])] ) scaler = StandardScaler() df[df.loc[:, df.dtypes == 'float64'].columns.difference(['lat1', 'long1',",
"[dd]d.ddd\"\"\" ##FIXME! This is assuming all coordinates are E and N df[col_out] =",
"\"\"\"Find all empty space or all NaN columns and drops them from the",
"self.dsrc_type = dsrc_type self.dsrc_path = dsrc_path self.file_name = file_name self.header_rows = header_rows self.date_cols",
"with 'method', and then min-max scale the column/feature\"\"\" fill_NaN = Imputer(missing_values=np.nan, strategy=method, axis=1)",
"skiprows = self.skip_rows, error_bad_lines = False, low_memory = False ) self.data['timestamp2'] = pd.to_datetime(self.data[0])",
"df, col_in, col_out): \"\"\"Convert coordinates of the format [d]ddmm.mmm to [dd]d.ddd\"\"\" ##FIXME! This",
"don't know anything about the structure of our data!\"\"\" # df.to_csv('data/'+name+'_clean.csv') return df",
"* (k * np.pi/180.0) df['wm%s'%lat] = np.log(np.tan((90 + df[lat]) * np.pi/360.0)) * k",
"self.convert_coordinate(self.data, str(self.lat2), 'lat2') self.convert_coordinate(self.data, str(self.long2), 'long2') self.scale_impute(self.data, 'mean') self.wgs84_to_web_mercator(self.data, 'long1', 'lat1') self.wgs84_to_web_mercator(self.data, 'long2',",
"= lat1 self.long1 = long1 self.lat2 = lat2 self.long2 = long2 self.read_prepare_data() self.init_dsrc()",
"np.pi/180.0) df['wm%s'%lat] = np.log(np.tan((90 + df[lat]) * np.pi/360.0)) * k return df def",
"we don't know anything about the structure of our data!\"\"\" # df.to_csv('data/'+name+'_clean.csv') return",
"return df @classmethod def scale_impute(cls, df, method): \"\"\"Find float columns, impute their NaN",
"'method', and then min-max scale the column/feature\"\"\" fill_NaN = Imputer(missing_values=np.nan, strategy=method, axis=1) df[df.loc[:,",
"import os.path class NYK(DataSource.DataSource): def __init__(self, app, dsrc_name='', dsrc_type='csv', dsrc_path='data/', file_name='', header_rows=None, date_cols=None,",
"@classmethod def convert_coordinate(cls, df, col_in, col_out): \"\"\"Convert coordinates of the format [d]ddmm.mmm to",
"= file_name self.header_rows = header_rows self.date_cols = date_cols self.skip_rows = skip_rows self.lat1 =",
"str(self.long1), 'long1') self.convert_coordinate(self.data, str(self.lat2), 'lat2') self.convert_coordinate(self.data, str(self.long2), 'long2') self.scale_impute(self.data, 'mean') self.wgs84_to_web_mercator(self.data, 'long1', 'lat1')",
"'long2'])] = scaler.fit_transform( df[df.loc[:, df.dtypes == 'float64'].columns.difference(['lat1', 'long1', 'lat2', 'long2'])] ) return df",
"import numpy as np import pandas as pd pd.options.mode.chained_assignment = None from sklearn.preprocessing",
"sklearn.preprocessing import Imputer, StandardScaler import DataSource import os.path class NYK(DataSource.DataSource): def __init__(self, app,",
"DataSource.DataSource.__init__(self, app, dsrc_name) self.dsrc_type = dsrc_type self.dsrc_path = dsrc_path self.file_name = file_name self.header_rows",
"return df def read_prepare_data(self): \"\"\"Use all data tools above to deliver the final",
"= Imputer(missing_values=np.nan, strategy=method, axis=1) df[df.loc[:, df.dtypes == 'float64'].columns.difference(['lat1', 'long1', 'lat2', 'long2'])] = fill_NaN.fit_transform(",
"once I know more about different types of data coming in\"\"\" @classmethod def",
"methods are fine-tuned for the current data sets. I need to generalize them",
"\"\"\"These methods are fine-tuned for the current data sets. I need to generalize",
"regex=True, inplace=True) df.replace(r'-', np.nan, regex=True, inplace=True) df.dropna(axis=1, how='all', inplace=True) df.columns = [str(x) for",
"the column/feature\"\"\" fill_NaN = Imputer(missing_values=np.nan, strategy=method, axis=1) df[df.loc[:, df.dtypes == 'float64'].columns.difference(['lat1', 'long1', 'lat2',",
"df.dtypes == 'float64'].columns.difference(['lat1', 'long1', 'lat2', 'long2'])] ) return df @classmethod def convert_coordinate(cls, df,",
"long2 self.read_prepare_data() self.init_dsrc() \"\"\"These methods are fine-tuned for the current data sets. I",
"df def read_prepare_data(self): \"\"\"Use all data tools above to deliver the final cleaned",
"* k return df def read_prepare_data(self): \"\"\"Use all data tools above to deliver",
"values with 'method', and then min-max scale the column/feature\"\"\" fill_NaN = Imputer(missing_values=np.nan, strategy=method,",
"coordinates are E and N df[col_out] = (df[col_in]/100 - (df[col_in]/100).astype(int))*100.*0.0166666667 + (df[col_in]/100).astype(int) return",
"NaN columns and drops them from the DataFrame\"\"\" df.replace(r'\\s+', np.nan, regex=True, inplace=True) df.replace(r'-',",
"generalize them once I know more about different types of data coming in\"\"\"",
"numpy as np import pandas as pd pd.options.mode.chained_assignment = None from sklearn.preprocessing import",
"= header_rows self.date_cols = date_cols self.skip_rows = skip_rows self.lat1 = lat1 self.long1 =",
"current data sets. I need to generalize them once I know more about",
"df[df.loc[:, df.dtypes == 'float64'].columns.difference(['lat1', 'long1', 'lat2', 'long2'])] = fill_NaN.fit_transform( df[df.loc[:, df.dtypes == 'float64'].columns.difference(['lat1',",
"pd pd.options.mode.chained_assignment = None from sklearn.preprocessing import Imputer, StandardScaler import DataSource import os.path",
"columns etc. manually, because we don't know anything about the structure of our",
"E and N df[col_out] = (df[col_in]/100 - (df[col_in]/100).astype(int))*100.*0.0166666667 + (df[col_in]/100).astype(int) return df @classmethod",
"I need to generalize them once I know more about different types of",
"DataFrame\"\"\" df.replace(r'\\s+', np.nan, regex=True, inplace=True) df.replace(r'-', np.nan, regex=True, inplace=True) df.dropna(axis=1, how='all', inplace=True) df.columns",
"convert_coordinate(cls, df, col_in, col_out): \"\"\"Convert coordinates of the format [d]ddmm.mmm to [dd]d.ddd\"\"\" ##FIXME!",
"k = 6378137 df['wm%s'%lon] = df[lon] * (k * np.pi/180.0) df['wm%s'%lat] = np.log(np.tan((90",
"axis=1) df[df.loc[:, df.dtypes == 'float64'].columns.difference(['lat1', 'long1', 'lat2', 'long2'])] = fill_NaN.fit_transform( df[df.loc[:, df.dtypes ==",
"etc. manually, because we don't know anything about the structure of our data!\"\"\"",
"= None from sklearn.preprocessing import Imputer, StandardScaler import DataSource import os.path class NYK(DataSource.DataSource):",
"above to deliver the final cleaned DataFrame\"\"\" self.data = self.dsrc_types[self.dsrc_type]( os.path.join(self.dsrc_path, self.file_name), header",
"of data coming in\"\"\" @classmethod def clean(cls, df, name): \"\"\"Find all empty space",
"self.file_name = file_name self.header_rows = header_rows self.date_cols = date_cols self.skip_rows = skip_rows self.lat1",
"= scaler.fit_transform( df[df.loc[:, df.dtypes == 'float64'].columns.difference(['lat1', 'long1', 'lat2', 'long2'])] ) return df @classmethod",
"'lat2', 'long2'])] = fill_NaN.fit_transform( df[df.loc[:, df.dtypes == 'float64'].columns.difference(['lat1', 'long1', 'lat2', 'long2'])] ) scaler",
"'long1', 'lat2', 'long2'])] = fill_NaN.fit_transform( df[df.loc[:, df.dtypes == 'float64'].columns.difference(['lat1', 'long1', 'lat2', 'long2'])] )",
"skip_rows=None, lat1=None, long1=None, lat2=None, long2=None): DataSource.DataSource.__init__(self, app, dsrc_name) self.dsrc_type = dsrc_type self.dsrc_path =",
"'long1', 'lat2', 'long2'])] ) return df @classmethod def convert_coordinate(cls, df, col_in, col_out): \"\"\"Convert",
"= self.header_rows, parse_dates = self.date_cols, skiprows = self.skip_rows, error_bad_lines = False, low_memory =",
"dsrc_path self.file_name = file_name self.header_rows = header_rows self.date_cols = date_cols self.skip_rows = skip_rows",
"df.replace(r'-', np.nan, regex=True, inplace=True) df.dropna(axis=1, how='all', inplace=True) df.columns = [str(x) for x in",
"\"\"\"Use all data tools above to deliver the final cleaned DataFrame\"\"\" self.data =",
"= False, low_memory = False ) self.data['timestamp2'] = pd.to_datetime(self.data[0]) self.data['timestamp1'] = pd.to_datetime(self.data[1]) self.clean(self.data,",
"or all NaN columns and drops them from the DataFrame\"\"\" df.replace(r'\\s+', np.nan, regex=True,",
"as np import pandas as pd pd.options.mode.chained_assignment = None from sklearn.preprocessing import Imputer,",
"(df[col_in]/100).astype(int))*100.*0.0166666667 + (df[col_in]/100).astype(int) return df @classmethod def wgs84_to_web_mercator(cls, df, lon, lat): \"\"\"Convert decimal",
"the format [d]ddmm.mmm to [dd]d.ddd\"\"\" ##FIXME! This is assuming all coordinates are E",
"col_in, col_out): \"\"\"Convert coordinates of the format [d]ddmm.mmm to [dd]d.ddd\"\"\" ##FIXME! This is",
"df.reset_index(level=[0], inplace=True) df.rename(columns={'index': 'ind'}, inplace=True) \"\"\"This is to find coordinate columns etc. manually,",
"lat): \"\"\"Convert decimal longitude/latitude to Web Mercator format\"\"\" k = 6378137 df['wm%s'%lon] =",
") scaler = StandardScaler() df[df.loc[:, df.dtypes == 'float64'].columns.difference(['lat1', 'long1', 'lat2', 'long2'])] = scaler.fit_transform(",
"df @classmethod def scale_impute(cls, df, method): \"\"\"Find float columns, impute their NaN values",
"header = self.header_rows, parse_dates = self.date_cols, skiprows = self.skip_rows, error_bad_lines = False, low_memory",
"df.to_csv('data/'+name+'_clean.csv') return df @classmethod def scale_impute(cls, df, method): \"\"\"Find float columns, impute their",
"about different types of data coming in\"\"\" @classmethod def clean(cls, df, name): \"\"\"Find",
"np.pi/360.0)) * k return df def read_prepare_data(self): \"\"\"Use all data tools above to",
"return df @classmethod def convert_coordinate(cls, df, col_in, col_out): \"\"\"Convert coordinates of the format",
"'long2'])] ) scaler = StandardScaler() df[df.loc[:, df.dtypes == 'float64'].columns.difference(['lat1', 'long1', 'lat2', 'long2'])] =",
"N df[col_out] = (df[col_in]/100 - (df[col_in]/100).astype(int))*100.*0.0166666667 + (df[col_in]/100).astype(int) return df @classmethod def wgs84_to_web_mercator(cls,",
"self.file_name), header = self.header_rows, parse_dates = self.date_cols, skiprows = self.skip_rows, error_bad_lines = False,",
"np.nan, regex=True, inplace=True) df.dropna(axis=1, how='all', inplace=True) df.columns = [str(x) for x in df.columns]",
"import json import numpy as np import pandas as pd pd.options.mode.chained_assignment = None",
"class NYK(DataSource.DataSource): def __init__(self, app, dsrc_name='', dsrc_type='csv', dsrc_path='data/', file_name='', header_rows=None, date_cols=None, skip_rows=None, lat1=None,",
"and N df[col_out] = (df[col_in]/100 - (df[col_in]/100).astype(int))*100.*0.0166666667 + (df[col_in]/100).astype(int) return df @classmethod def",
"in df.columns] df.reset_index(level=[0], inplace=True) df.rename(columns={'index': 'ind'}, inplace=True) \"\"\"This is to find coordinate columns",
"scaler.fit_transform( df[df.loc[:, df.dtypes == 'float64'].columns.difference(['lat1', 'long1', 'lat2', 'long2'])] ) return df @classmethod def",
"fill_NaN = Imputer(missing_values=np.nan, strategy=method, axis=1) df[df.loc[:, df.dtypes == 'float64'].columns.difference(['lat1', 'long1', 'lat2', 'long2'])] =",
"format\"\"\" k = 6378137 df['wm%s'%lon] = df[lon] * (k * np.pi/180.0) df['wm%s'%lat] =",
"header_rows self.date_cols = date_cols self.skip_rows = skip_rows self.lat1 = lat1 self.long1 = long1",
"columns and drops them from the DataFrame\"\"\" df.replace(r'\\s+', np.nan, regex=True, inplace=True) df.replace(r'-', np.nan,",
"(df[col_in]/100 - (df[col_in]/100).astype(int))*100.*0.0166666667 + (df[col_in]/100).astype(int) return df @classmethod def wgs84_to_web_mercator(cls, df, lon, lat):",
"df.replace(r'\\s+', np.nan, regex=True, inplace=True) df.replace(r'-', np.nan, regex=True, inplace=True) df.dropna(axis=1, how='all', inplace=True) df.columns =",
"header_rows=None, date_cols=None, skip_rows=None, lat1=None, long1=None, lat2=None, long2=None): DataSource.DataSource.__init__(self, app, dsrc_name) self.dsrc_type = dsrc_type"
] |
[
"y self.velocityX = 10 self.velocityY = 10 while not stopped: ev = pygame.event.get()",
"in ev: if event.type == pygame.MOUSEBUTTONDOWN: pos = pygame.mouse.get_pos() if event.type == pygame.QUIT:",
"800 window_width = 600 black = (0,0,0) white = (255,255,255) class agent: def",
"window_width = 600 black = (0,0,0) white = (255,255,255) class agent: def __init__(self,",
"= 800 window_width = 600 black = (0,0,0) white = (255,255,255) class agent:",
"for event in ev: if event.type == pygame.MOUSEBUTTONDOWN: pos = pygame.mouse.get_pos() if event.type",
"= 10 self.velocityY = 10 while not stopped: ev = pygame.event.get() for event",
"self.velocityX = 10 self.velocityY = 10 while not stopped: ev = pygame.event.get() for",
"sys, random, math pygame.init() stopped = False window_height = 800 window_width = 600",
"= 10 while not stopped: ev = pygame.event.get() for event in ev: if",
"window_height = 800 window_width = 600 black = (0,0,0) white = (255,255,255) class",
"example of multiple agent flocking behavior # Explanation: import pygame, sys, random, math",
"ev: if event.type == pygame.MOUSEBUTTONDOWN: pos = pygame.mouse.get_pos() if event.type == pygame.QUIT: running",
"False window_height = 800 window_width = 600 black = (0,0,0) white = (255,255,255)",
"= (255,255,255) class agent: def __init__(self, x, y): self.x = x self.y =",
"= x self.y = y self.velocityX = 10 self.velocityY = 10 while not",
"self.x = x self.y = y self.velocityX = 10 self.velocityY = 10 while",
"#!/usr/bin/env python3 # Created on 05/07/2018 # @author: <NAME> # @license: MIT-license #",
"flocking behavior # Explanation: import pygame, sys, random, math pygame.init() stopped = False",
"10 self.velocityY = 10 while not stopped: ev = pygame.event.get() for event in",
"event in ev: if event.type == pygame.MOUSEBUTTONDOWN: pos = pygame.mouse.get_pos() if event.type ==",
"Created on 05/07/2018 # @author: <NAME> # @license: MIT-license # Purpose: example of",
"= y self.velocityX = 10 self.velocityY = 10 while not stopped: ev =",
"on 05/07/2018 # @author: <NAME> # @license: MIT-license # Purpose: example of multiple",
"(0,0,0) white = (255,255,255) class agent: def __init__(self, x, y): self.x = x",
"while not stopped: ev = pygame.event.get() for event in ev: if event.type ==",
"# @author: <NAME> # @license: MIT-license # Purpose: example of multiple agent flocking",
"(255,255,255) class agent: def __init__(self, x, y): self.x = x self.y = y",
"not stopped: ev = pygame.event.get() for event in ev: if event.type == pygame.MOUSEBUTTONDOWN:",
"python3 # Created on 05/07/2018 # @author: <NAME> # @license: MIT-license # Purpose:",
"def __init__(self, x, y): self.x = x self.y = y self.velocityX = 10",
"math pygame.init() stopped = False window_height = 800 window_width = 600 black =",
"multiple agent flocking behavior # Explanation: import pygame, sys, random, math pygame.init() stopped",
"black = (0,0,0) white = (255,255,255) class agent: def __init__(self, x, y): self.x",
"Purpose: example of multiple agent flocking behavior # Explanation: import pygame, sys, random,",
"random, math pygame.init() stopped = False window_height = 800 window_width = 600 black",
"self.y = y self.velocityX = 10 self.velocityY = 10 while not stopped: ev",
"behavior # Explanation: import pygame, sys, random, math pygame.init() stopped = False window_height",
"= (0,0,0) white = (255,255,255) class agent: def __init__(self, x, y): self.x =",
"__init__(self, x, y): self.x = x self.y = y self.velocityX = 10 self.velocityY",
"# @license: MIT-license # Purpose: example of multiple agent flocking behavior # Explanation:",
"@license: MIT-license # Purpose: example of multiple agent flocking behavior # Explanation: import",
"agent flocking behavior # Explanation: import pygame, sys, random, math pygame.init() stopped =",
"x, y): self.x = x self.y = y self.velocityX = 10 self.velocityY =",
"# Explanation: import pygame, sys, random, math pygame.init() stopped = False window_height =",
"<NAME> # @license: MIT-license # Purpose: example of multiple agent flocking behavior #",
"of multiple agent flocking behavior # Explanation: import pygame, sys, random, math pygame.init()",
"600 black = (0,0,0) white = (255,255,255) class agent: def __init__(self, x, y):",
"05/07/2018 # @author: <NAME> # @license: MIT-license # Purpose: example of multiple agent",
"= pygame.event.get() for event in ev: if event.type == pygame.MOUSEBUTTONDOWN: pos = pygame.mouse.get_pos()",
"@author: <NAME> # @license: MIT-license # Purpose: example of multiple agent flocking behavior",
"pygame.init() stopped = False window_height = 800 window_width = 600 black = (0,0,0)",
"x self.y = y self.velocityX = 10 self.velocityY = 10 while not stopped:",
"pygame, sys, random, math pygame.init() stopped = False window_height = 800 window_width =",
"= 600 black = (0,0,0) white = (255,255,255) class agent: def __init__(self, x,",
"MIT-license # Purpose: example of multiple agent flocking behavior # Explanation: import pygame,",
"event.type == pygame.MOUSEBUTTONDOWN: pos = pygame.mouse.get_pos() if event.type == pygame.QUIT: running = False",
"stopped = False window_height = 800 window_width = 600 black = (0,0,0) white",
"= False window_height = 800 window_width = 600 black = (0,0,0) white =",
"class agent: def __init__(self, x, y): self.x = x self.y = y self.velocityX",
"import pygame, sys, random, math pygame.init() stopped = False window_height = 800 window_width",
"white = (255,255,255) class agent: def __init__(self, x, y): self.x = x self.y",
"# Created on 05/07/2018 # @author: <NAME> # @license: MIT-license # Purpose: example",
"y): self.x = x self.y = y self.velocityX = 10 self.velocityY = 10",
"ev = pygame.event.get() for event in ev: if event.type == pygame.MOUSEBUTTONDOWN: pos =",
"stopped: ev = pygame.event.get() for event in ev: if event.type == pygame.MOUSEBUTTONDOWN: pos",
"# Purpose: example of multiple agent flocking behavior # Explanation: import pygame, sys,",
"self.velocityY = 10 while not stopped: ev = pygame.event.get() for event in ev:",
"10 while not stopped: ev = pygame.event.get() for event in ev: if event.type",
"Explanation: import pygame, sys, random, math pygame.init() stopped = False window_height = 800",
"pygame.event.get() for event in ev: if event.type == pygame.MOUSEBUTTONDOWN: pos = pygame.mouse.get_pos() if",
"if event.type == pygame.MOUSEBUTTONDOWN: pos = pygame.mouse.get_pos() if event.type == pygame.QUIT: running =",
"agent: def __init__(self, x, y): self.x = x self.y = y self.velocityX ="
] |
[
"get_hess(self, input_var): assert 'shapes' in dir( self), 'You must first call get input",
"torch.tensor( input_var, dtype=self.precision, device=self.device) def func(inp): return self._eval_func(self._unconcat(inp, self.shapes)) hess = hessian(func, input_var_,",
"params regular Tensors instead of nn.Parameter params = tuple(p.detach().requires_grad_() for p in orig_params)",
"extract_weights(model) device = params[0].device prec_ = torch.float32 if precision == 'float32' else torch.float64",
"input_var_, vectorize=False) return hess.cpu().detach().numpy().astype(np.float64) def get_ctr_jac(self, input_var): assert 'shapes' in dir( self), 'You",
"Adapted from https://github.com/pytorch/pytorch/blob/21c04b4438a766cd998fddb42247d4eb2e010f9a/benchmarks/functional_autograd_benchmark/functional_autograd_benchmark.py # Utilities to make nn.Module \"functional\" # In particular the",
"tuple \"\"\" # named_params = {k: var.cpu().detach().numpy() for k, var in model.named_parameters()} params,",
"== 'hvp' else vhp def get_value_and_grad(self, input_var): assert 'shapes' in dir( self), 'You",
"device=self.device) def func(inp): return self._eval_func(self._unconcat(inp, self.shapes)) hess = hessian(func, input_var_, vectorize=False) return hess.cpu().detach().numpy().astype(np.float64)",
"or isinstance(input_, tuple): return func(*input_) else: return func(input_) # Adapted from https://github.com/pytorch/pytorch/blob/21c04b4438a766cd998fddb42247d4eb2e010f9a/benchmarks/functional_autograd_benchmark/functional_autograd_benchmark.py #",
"and return them as a tuple as well as their original attribute names.",
"= torch.tensor(train_x, dtype=prec_, device=device) if isinstance(train_y, np.ndarray): train_y = torch.tensor(train_y, dtype=prec_, device=device) def",
"hvp_type == 'hvp' else vhp def get_value_and_grad(self, input_var): assert 'shapes' in dir( self),",
"raise NotImplementedError def _tconcat(self, t_list, dim=0): if torch.is_tensor(t_list[0]): return torch.cat(t_list, dim) elif isinstance(t_list[0],",
"again. Note that this function modifies the model in place and after this",
"p.requires_grad: _del_nested_attr(mod, name.split(\".\")) names.append(name) # Make params regular Tensors instead of nn.Parameter params",
"torch.autograd.grad(ctr_val, input_var_grad) return grads.cpu().detach().numpy().astype(np.float64) def _reshape(self, t, sh): if torch.is_tensor(t): return t.reshape(sh) elif",
"For example, to set the attribute obj.conv.weight, use _del_nested_attr(obj, ['conv', 'weight'], value) \"\"\"",
"if isinstance(input_var_, dict): grads = {k: v for k, v in zip(input_var_.keys(), grads)}",
"return func(**input_) elif isinstance(input_, list) or isinstance(input_, tuple): return func(*input_) else: return func(input_)",
"vector): assert 'shapes' in dir( self), 'You must first call get input to",
"the nn.Module using fixed inputs. def _del_nested_attr(obj: nn.Module, names: List[str]) -> None: \"\"\"",
"-> None: \"\"\" Reload a set of weights so that `mod` can be",
"vector_ = tuple(vector_.values()) if isinstance(input_var_, list): input_var_ = tuple(input_var_) if isinstance(vector_, list): vector_",
":return: (function of the parameters, list of parameters, names of parameters) :rtype: tuple",
"load_weights(model, {k: v for k, v in zip(names, new_params)}) out = apply_func(model, train_x)",
"for k, v in zip(names, new_params)}) out = apply_func(model, train_x) return loss(out, train_y)",
"list): vector_ = tuple(vector_) loss, vhp_res = self.hvp_func(self.func, input_var_, v=vector_) return self._concat(vhp_res)[0].cpu().detach().numpy().astype(np.float64) def",
"_set_nested_attr(getattr(obj, names[0]), names[1:], value) def extract_weights(mod: nn.Module) -> Tuple[Tuple[Tensor, ...], List[str]]: \"\"\" This",
"return func, [p.cpu().detach().numpy() for p in params], names def apply_func(func, input_): if isinstance(input_,",
"torch parameter model. :param model: torch model :type model: torch.nn.Modle] :param loss: a",
"= torch.float32 elif precision == 'float64': self.precision = torch.float64 else: raise ValueError self.hvp_func",
"the model can be used again. Note that this function modifies the model",
"by the given list of names to value. For example, to set the",
"input_var_grad = input_var_.values() if isinstance( input_var_, dict) else input_var_ grads = torch.autograd.grad(ctr_val, input_var_grad)",
"zip(input_var_.keys(), grads)} return [loss.cpu().detach().numpy().astype(np.float64), self._concat(grads)[0].cpu().detach().numpy().astype(np.float64)] def get_hvp(self, input_var, vector): assert 'shapes' in dir(",
"dataset used as ground truth input of the loss :type train_y: np.ndarray :return:",
"# Not very clean... if 'device' in dir(func): self.device = func.device else: self.device",
"to delete the attribute obj.conv.weight, use _del_nested_attr(obj, ['conv', 'weight']) \"\"\" if len(names) ==",
"from typing import List, Tuple, Dict, Union, Callable from torch import nn, Tensor",
"to define the tensors shapes.' input_var_ = self._unconcat(torch.tensor( input_var, dtype=self.precision, requires_grad=True, device=self.device), self.shapes)",
"params[0].device prec_ = torch.float32 if precision == 'float32' else torch.float64 if isinstance(train_x, np.ndarray):",
"if p.requires_grad: _del_nested_attr(mod, name.split(\".\")) names.append(name) # Make params regular Tensors instead of nn.Parameter",
"assert 'shapes' in dir( self), 'You must first call get input to define",
"precision == 'float32' else torch.float64 if isinstance(train_x, np.ndarray): train_x = torch.tensor(train_x, dtype=prec_, device=device)",
"dtype=self.precision, requires_grad=True, device=self.device), self.shapes) ctr_val = self._eval_ctr_func(input_var_) input_var_grad = input_var_.values() if isinstance( input_var_,",
"= hessian(func, input_var_, vectorize=False) return hess.cpu().detach().numpy().astype(np.float64) def get_ctr_jac(self, input_var): assert 'shapes' in dir(",
"device='cpu'): self.func = func # Not very clean... if 'device' in dir(func): self.device",
"List[str], value: Tensor) -> None: \"\"\" Set the attribute specified by the given",
"value) \"\"\" if len(names) == 1: setattr(obj, names[0], value) else: _set_nested_attr(getattr(obj, names[0]), names[1:],",
"can be used again. Note that this function modifies the model in place",
"= self._unconcat(torch.tensor( input_var, dtype=self.precision, requires_grad=True, device=self.device), self.shapes) loss = self._eval_func(input_var_) input_var_grad = input_var_.values()",
"train_y: np.ndarray :return: (function of the parameters, list of parameters, names of parameters)",
"= tuple(vector_) loss, vhp_res = self.hvp_func(self.func, input_var_, v=vector_) return self._concat(vhp_res)[0].cpu().detach().numpy().astype(np.float64) def get_hess(self, input_var):",
"all the parameters in the model names = [] for name, p in",
"from .base_wrapper import BaseWrapper from torch.autograd.functional import hvp, vhp, hessian from typing import",
"func(input_) # Adapted from https://github.com/pytorch/pytorch/blob/21c04b4438a766cd998fddb42247d4eb2e010f9a/benchmarks/functional_autograd_benchmark/functional_autograd_benchmark.py # Utilities to make nn.Module \"functional\" # In",
"Dict, Union, Callable from torch import nn, Tensor class TorchWrapper(BaseWrapper): def __init__(self, func,",
"elif isinstance(t, np.ndarray): return np.reshape(t, sh) else: raise NotImplementedError def _tconcat(self, t_list, dim=0):",
"used as ground truth input of the loss :type train_y: np.ndarray :return: (function",
"= input_var_.values() if isinstance( input_var_, dict) else input_var_ grads = torch.autograd.grad(loss, input_var_grad) if",
"= torch.device(device) if precision == 'float32': self.precision = torch.float32 elif precision == 'float64':",
"grads = {k: v for k, v in zip(input_var_.keys(), grads)} return [loss.cpu().detach().numpy().astype(np.float64), self._concat(grads)[0].cpu().detach().numpy().astype(np.float64)]",
"apply_func(func, input_): if isinstance(input_, dict): return func(**input_) elif isinstance(input_, list) or isinstance(input_, tuple):",
"['conv', 'weight']) \"\"\" if len(names) == 1: delattr(obj, names[0]) else: _del_nested_attr(getattr(obj, names[0]), names[1:])",
"input_var): assert 'shapes' in dir( self), 'You must first call get input to",
"orig_params = [p for p in mod.parameters() if p.requires_grad] # Remove all the",
"ValueError self.hvp_func = hvp if hvp_type == 'hvp' else vhp def get_value_and_grad(self, input_var):",
"(that can have history) and so are left as Tensors. This means that",
"self._concat(vhp_res)[0].cpu().detach().numpy().astype(np.float64) def get_hess(self, input_var): assert 'shapes' in dir( self), 'You must first call",
"self.shapes) if isinstance(input_var_, dict): input_var_ = tuple(input_var_.values()) if isinstance(vector_, dict): vector_ = tuple(vector_.values())",
"names: List[str]) -> None: \"\"\" Deletes the attribute specified by the given list",
"self.func = func # Not very clean... if 'device' in dir(func): self.device =",
"extract_weights(mod: nn.Module) -> Tuple[Tuple[Tensor, ...], List[str]]: \"\"\" This function removes all the Parameters",
"signature loss_value = loss(pred_y, true_y). :type loss: function :param train_x: dataset used as",
"torch.float32 if precision == 'float32' else torch.float64 if isinstance(train_x, np.ndarray): train_x = torch.tensor(train_x,",
"names[0]) else: _del_nested_attr(getattr(obj, names[0]), names[1:]) def _set_nested_attr(obj: nn.Module, names: List[str], value: Tensor) ->",
"= loss(pred_y, true_y). :type loss: function :param train_x: dataset used as input of",
"def torch_function_factory(model, loss, train_x, train_y, precision='float32', optimized_vars=None): \"\"\" A factory to create a",
"model :type train_x: np.ndarray :param train_y: dataset used as ground truth input of",
"var.cpu().detach().numpy() for k, var in model.named_parameters()} params, names = extract_weights(model) device = params[0].device",
"if len(names) == 1: delattr(obj, names[0]) else: _del_nested_attr(getattr(obj, names[0]), names[1:]) def _set_nested_attr(obj: nn.Module,",
"return func(*input_) else: return func(input_) # Adapted from https://github.com/pytorch/pytorch/blob/21c04b4438a766cd998fddb42247d4eb2e010f9a/benchmarks/functional_autograd_benchmark/functional_autograd_benchmark.py # Utilities to make",
"torch.float64 else: raise ValueError self.hvp_func = hvp if hvp_type == 'hvp' else vhp",
"self._unconcat(torch.tensor( input_var, dtype=self.precision, device=self.device), self.shapes) vector_ = self._unconcat(torch.tensor( vector, dtype=self.precision, device=self.device), self.shapes) if",
"TorchWrapper(BaseWrapper): def __init__(self, func, precision='float32', hvp_type='vhp', device='cpu'): self.func = func # Not very",
"else vhp def get_value_and_grad(self, input_var): assert 'shapes' in dir( self), 'You must first",
"torch.is_tensor(t): return t.reshape(sh) elif isinstance(t, np.ndarray): return np.reshape(t, sh) else: raise NotImplementedError def",
"def get_hess(self, input_var): assert 'shapes' in dir( self), 'You must first call get",
"call, mod.parameters() will be empty. \"\"\" orig_params = [p for p in mod.parameters()",
"['conv', 'weight'], value) \"\"\" if len(names) == 1: setattr(obj, names[0], value) else: _set_nested_attr(getattr(obj,",
"the tensors shapes.' input_var_ = self._unconcat(torch.tensor( input_var, dtype=self.precision, requires_grad=True, device=self.device), self.shapes) ctr_val =",
"input_var_ = self._unconcat(torch.tensor( input_var, dtype=self.precision, requires_grad=True, device=self.device), self.shapes) ctr_val = self._eval_ctr_func(input_var_) input_var_grad =",
"dataset used as input of the model :type train_x: np.ndarray :param train_y: dataset",
"will still be empty after this call. \"\"\" for name, p in params.items():",
"instead of nn.Parameter params = tuple(p.detach().requires_grad_() for p in orig_params) return params, names",
"precision == 'float64': self.precision = torch.float64 else: raise ValueError self.hvp_func = hvp if",
"set the attribute obj.conv.weight, use _del_nested_attr(obj, ['conv', 'weight'], value) \"\"\" if len(names) ==",
"sh) else: raise NotImplementedError def _tconcat(self, t_list, dim=0): if torch.is_tensor(t_list[0]): return torch.cat(t_list, dim)",
"input_var_ = self._unconcat(torch.tensor( input_var, dtype=self.precision, requires_grad=True, device=self.device), self.shapes) loss = self._eval_func(input_var_) input_var_grad =",
"\"\"\" if len(names) == 1: setattr(obj, names[0], value) else: _set_nested_attr(getattr(obj, names[0]), names[1:], value)",
"input_var_ = tuple(input_var_.values()) if isinstance(vector_, dict): vector_ = tuple(vector_.values()) if isinstance(input_var_, list): input_var_",
"loss: function :param train_x: dataset used as input of the model :type train_x:",
"the attribute specified by the given list of names to value. For example,",
"a tuple as well as their original attribute names. The weights must be",
"hessian from typing import List, Tuple, Dict, Union, Callable from torch import nn,",
"elif isinstance(input_, list) or isinstance(input_, tuple): return func(*input_) else: return func(input_) # Adapted",
"isinstance(input_var_, dict): grads = {k: v for k, v in zip(input_var_.keys(), grads)} return",
"as a tuple as well as their original attribute names. The weights must",
"this function modifies the model in place and after this call, mod.parameters() will",
"input_var_, v=vector_) return self._concat(vhp_res)[0].cpu().detach().numpy().astype(np.float64) def get_hess(self, input_var): assert 'shapes' in dir( self), 'You",
"\"\"\" Deletes the attribute specified by the given list of names. For example,",
"vhp, hessian from typing import List, Tuple, Dict, Union, Callable from torch import",
"truth input of the loss :type train_y: np.ndarray :return: (function of the parameters,",
":param train_y: dataset used as ground truth input of the loss :type train_y:",
"get input to define the tensors shapes.' input_var_ = self._unconcat(torch.tensor( input_var, dtype=self.precision, requires_grad=True,",
"for name, p in list(mod.named_parameters()): if p.requires_grad: _del_nested_attr(mod, name.split(\".\")) names.append(name) # Make params",
"mod.parameters() if p.requires_grad] # Remove all the parameters in the model names =",
"as ground truth input of the loss :type train_y: np.ndarray :return: (function of",
"np.ndarray): return np.concatenate(t_list, dim) else: raise NotImplementedError def _gather(self, t, i, j): if",
"= torch.float64 else: raise ValueError self.hvp_func = hvp if hvp_type == 'hvp' else",
"input_var_.values() if isinstance( input_var_, dict) else input_var_ grads = torch.autograd.grad(loss, input_var_grad) if isinstance(input_var_,",
"def func(inp): return self._eval_func(self._unconcat(inp, self.shapes)) hess = hessian(func, input_var_, vectorize=False) return hess.cpu().detach().numpy().astype(np.float64) def",
"function of the torch parameter model. :param model: torch model :type model: torch.nn.Modle]",
"dtype=self.precision, device=self.device), self.shapes) if isinstance(input_var_, dict): input_var_ = tuple(input_var_.values()) if isinstance(vector_, dict): vector_",
"hessian(func, input_var_, vectorize=False) return hess.cpu().detach().numpy().astype(np.float64) def get_ctr_jac(self, input_var): assert 'shapes' in dir( self),",
"= torch.float32 if precision == 'float32' else torch.float64 if isinstance(train_x, np.ndarray): train_x =",
"-> None: \"\"\" Set the attribute specified by the given list of names",
"takes as input # the parameters and evaluate the nn.Module using fixed inputs.",
"isinstance( input_var_, dict) else input_var_ grads = torch.autograd.grad(ctr_val, input_var_grad) return grads.cpu().detach().numpy().astype(np.float64) def _reshape(self,",
"apply_func(model, train_x) return loss(out, train_y) func.device = device return func, [p.cpu().detach().numpy() for p",
"obj.conv.weight, use _del_nested_attr(obj, ['conv', 'weight']) \"\"\" if len(names) == 1: delattr(obj, names[0]) else:",
"params, names = extract_weights(model) device = params[0].device prec_ = torch.float32 if precision ==",
"loss, train_x, train_y, precision='float32', optimized_vars=None): \"\"\" A factory to create a function of",
"obj.conv.weight, use _del_nested_attr(obj, ['conv', 'weight'], value) \"\"\" if len(names) == 1: setattr(obj, names[0],",
"vector_ = self._unconcat(torch.tensor( vector, dtype=self.precision, device=self.device), self.shapes) if isinstance(input_var_, dict): input_var_ = tuple(input_var_.values())",
"input to define the tensors shapes.' input_var_ = torch.tensor( input_var, dtype=self.precision, device=self.device) def",
"example, to set the attribute obj.conv.weight, use _del_nested_attr(obj, ['conv', 'weight'], value) \"\"\" if",
"torch.is_tensor(t): return t[i:j] else: raise NotImplementedError def torch_function_factory(model, loss, train_x, train_y, precision='float32', optimized_vars=None):",
"self._concat(grads)[0].cpu().detach().numpy().astype(np.float64)] def get_hvp(self, input_var, vector): assert 'shapes' in dir( self), 'You must first",
"of nn.Parameter params = tuple(p.detach().requires_grad_() for p in orig_params) return params, names def",
"device=self.device), self.shapes) if isinstance(input_var_, dict): input_var_ = tuple(input_var_.values()) if isinstance(vector_, dict): vector_ =",
"mod.parameters() will be empty. \"\"\" orig_params = [p for p in mod.parameters() if",
"def apply_func(func, input_): if isinstance(input_, dict): return func(**input_) elif isinstance(input_, list) or isinstance(input_,",
"# Adapted from https://github.com/pytorch/pytorch/blob/21c04b4438a766cd998fddb42247d4eb2e010f9a/benchmarks/functional_autograd_benchmark/functional_autograd_benchmark.py # Utilities to make nn.Module \"functional\" # In particular",
"the model names = [] for name, p in list(mod.named_parameters()): if p.requires_grad: _del_nested_attr(mod,",
"names. The weights must be re-loaded with `load_weights` before the model can be",
"return self._eval_func(self._unconcat(inp, self.shapes)) hess = hessian(func, input_var_, vectorize=False) return hess.cpu().detach().numpy().astype(np.float64) def get_ctr_jac(self, input_var):",
"input_var_, dict) else input_var_ grads = torch.autograd.grad(ctr_val, input_var_grad) return grads.cpu().detach().numpy().astype(np.float64) def _reshape(self, t,",
"Note that this function modifies the model in place and after this call,",
"input_var_ = self._unconcat(torch.tensor( input_var, dtype=self.precision, device=self.device), self.shapes) vector_ = self._unconcat(torch.tensor( vector, dtype=self.precision, device=self.device),",
"1: delattr(obj, names[0]) else: _del_nested_attr(getattr(obj, names[0]), names[1:]) def _set_nested_attr(obj: nn.Module, names: List[str], value:",
"v in zip(input_var_.keys(), grads)} return [loss.cpu().detach().numpy().astype(np.float64), self._concat(grads)[0].cpu().detach().numpy().astype(np.float64)] def get_hvp(self, input_var, vector): assert 'shapes'",
"return grads.cpu().detach().numpy().astype(np.float64) def _reshape(self, t, sh): if torch.is_tensor(t): return t.reshape(sh) elif isinstance(t, np.ndarray):",
"else: raise NotImplementedError def _gather(self, t, i, j): if isinstance(t, np.ndarray) or torch.is_tensor(t):",
"def get_value_and_grad(self, input_var): assert 'shapes' in dir( self), 'You must first call get",
"== 'float32' else torch.float64 if isinstance(train_x, np.ndarray): train_x = torch.tensor(train_x, dtype=prec_, device=device) if",
"self.device = torch.device(device) if precision == 'float32': self.precision = torch.float32 elif precision ==",
"self.shapes) loss = self._eval_func(input_var_) input_var_grad = input_var_.values() if isinstance( input_var_, dict) else input_var_",
"torch.cat(t_list, dim) elif isinstance(t_list[0], np.ndarray): return np.concatenate(t_list, dim) else: raise NotImplementedError def _gather(self,",
"of parameters, names of parameters) :rtype: tuple \"\"\" # named_params = {k: var.cpu().detach().numpy()",
"well as their original attribute names. The weights must be re-loaded with `load_weights`",
"loss(out, train_y) func.device = device return func, [p.cpu().detach().numpy() for p in params], names",
"_reshape(self, t, sh): if torch.is_tensor(t): return t.reshape(sh) elif isinstance(t, np.ndarray): return np.reshape(t, sh)",
"specified by the given list of names to value. For example, to set",
"their original attribute names. The weights must be re-loaded with `load_weights` before the",
"used again. Note that this function modifies the model in place and after",
"This means that mod.parameters() will still be empty after this call. \"\"\" for",
"this call, mod.parameters() will be empty. \"\"\" orig_params = [p for p in",
"prec_ = torch.float32 if precision == 'float32' else torch.float64 if isinstance(train_x, np.ndarray): train_x",
"else torch.float64 if isinstance(train_x, np.ndarray): train_x = torch.tensor(train_x, dtype=prec_, device=device) if isinstance(train_y, np.ndarray):",
"to value. For example, to set the attribute obj.conv.weight, use _del_nested_attr(obj, ['conv', 'weight'],",
"list of parameters, names of parameters) :rtype: tuple \"\"\" # named_params = {k:",
"the parameters, list of parameters, names of parameters) :rtype: tuple \"\"\" # named_params",
"_del_nested_attr(getattr(obj, names[0]), names[1:]) def _set_nested_attr(obj: nn.Module, names: List[str], value: Tensor) -> None: \"\"\"",
"1: setattr(obj, names[0], value) else: _set_nested_attr(getattr(obj, names[0]), names[1:], value) def extract_weights(mod: nn.Module) ->",
"In particular the goal is to be able to provide a function that",
"isinstance( input_var_, dict) else input_var_ grads = torch.autograd.grad(loss, input_var_grad) if isinstance(input_var_, dict): grads",
"import List, Tuple, Dict, Union, Callable from torch import nn, Tensor class TorchWrapper(BaseWrapper):",
"the torch parameter model. :param model: torch model :type model: torch.nn.Modle] :param loss:",
"names of parameters) :rtype: tuple \"\"\" # named_params = {k: var.cpu().detach().numpy() for k,",
"get input to define the tensors shapes.' input_var_ = torch.tensor( input_var, dtype=self.precision, device=self.device)",
"attribute names. The weights must be re-loaded with `load_weights` before the model can",
"if isinstance(train_x, np.ndarray): train_x = torch.tensor(train_x, dtype=prec_, device=device) if isinstance(train_y, np.ndarray): train_y =",
"from torch import nn, Tensor class TorchWrapper(BaseWrapper): def __init__(self, func, precision='float32', hvp_type='vhp', device='cpu'):",
"func # Not very clean... if 'device' in dir(func): self.device = func.device else:",
"named_params = {k: var.cpu().detach().numpy() for k, var in model.named_parameters()} params, names = extract_weights(model)",
"Note that the `params` are regular Tensors (that can have history) and so",
"elif isinstance(t_list[0], np.ndarray): return np.concatenate(t_list, dim) else: raise NotImplementedError def _gather(self, t, i,",
"the model and return them as a tuple as well as their original",
"list(mod.named_parameters()): if p.requires_grad: _del_nested_attr(mod, name.split(\".\")) names.append(name) # Make params regular Tensors instead of",
"isinstance(train_y, np.ndarray): train_y = torch.tensor(train_y, dtype=prec_, device=device) def func(*new_params): load_weights(model, {k: v for",
"tuple as well as their original attribute names. The weights must be re-loaded",
"typing import List, Tuple, Dict, Union, Callable from torch import nn, Tensor class",
"self._eval_func(self._unconcat(inp, self.shapes)) hess = hessian(func, input_var_, vectorize=False) return hess.cpu().detach().numpy().astype(np.float64) def get_ctr_jac(self, input_var): assert",
"to be able to provide a function that takes as input # the",
"NotImplementedError def _tconcat(self, t_list, dim=0): if torch.is_tensor(t_list[0]): return torch.cat(t_list, dim) elif isinstance(t_list[0], np.ndarray):",
"def _set_nested_attr(obj: nn.Module, names: List[str], value: Tensor) -> None: \"\"\" Set the attribute",
"evaluate the nn.Module using fixed inputs. def _del_nested_attr(obj: nn.Module, names: List[str]) -> None:",
"shapes.' input_var_ = self._unconcat(torch.tensor( input_var, dtype=self.precision, requires_grad=True, device=self.device), self.shapes) ctr_val = self._eval_ctr_func(input_var_) input_var_grad",
"Tensors (that can have history) and so are left as Tensors. This means",
"the attribute specified by the given list of names. For example, to delete",
"vector, dtype=self.precision, device=self.device), self.shapes) if isinstance(input_var_, dict): input_var_ = tuple(input_var_.values()) if isinstance(vector_, dict):",
"are regular Tensors (that can have history) and so are left as Tensors.",
"device=device) def func(*new_params): load_weights(model, {k: v for k, v in zip(names, new_params)}) out",
"parameters in the model names = [] for name, p in list(mod.named_parameters()): if",
"as Tensors. This means that mod.parameters() will still be empty after this call.",
"else: raise NotImplementedError def _tconcat(self, t_list, dim=0): if torch.is_tensor(t_list[0]): return torch.cat(t_list, dim) elif",
"input to define the tensors shapes.' input_var_ = self._unconcat(torch.tensor( input_var, dtype=self.precision, requires_grad=True, device=self.device),",
"to make nn.Module \"functional\" # In particular the goal is to be able",
"attribute obj.conv.weight, use _del_nested_attr(obj, ['conv', 'weight'], value) \"\"\" if len(names) == 1: setattr(obj,",
"func, precision='float32', hvp_type='vhp', device='cpu'): self.func = func # Not very clean... if 'device'",
":param loss: a function with signature loss_value = loss(pred_y, true_y). :type loss: function",
"value) else: _set_nested_attr(getattr(obj, names[0]), names[1:], value) def extract_weights(mod: nn.Module) -> Tuple[Tuple[Tensor, ...], List[str]]:",
"for p in orig_params) return params, names def load_weights(mod: nn.Module, params: Dict[str, Tensor])",
"of names. For example, to delete the attribute obj.conv.weight, use _del_nested_attr(obj, ['conv', 'weight'])",
"params], names def apply_func(func, input_): if isinstance(input_, dict): return func(**input_) elif isinstance(input_, list)",
"func(inp): return self._eval_func(self._unconcat(inp, self.shapes)) hess = hessian(func, input_var_, vectorize=False) return hess.cpu().detach().numpy().astype(np.float64) def get_ctr_jac(self,",
"'float32': self.precision = torch.float32 elif precision == 'float64': self.precision = torch.float64 else: raise",
"torch.autograd.grad(loss, input_var_grad) if isinstance(input_var_, dict): grads = {k: v for k, v in",
"[p for p in mod.parameters() if p.requires_grad] # Remove all the parameters in",
"= self._unconcat(torch.tensor( vector, dtype=self.precision, device=self.device), self.shapes) if isinstance(input_var_, dict): input_var_ = tuple(input_var_.values()) if",
"def __init__(self, func, precision='float32', hvp_type='vhp', device='cpu'): self.func = func # Not very clean...",
"input to define the tensors shapes.' input_var_ = self._unconcat(torch.tensor( input_var, dtype=self.precision, device=self.device), self.shapes)",
"dict): return func(**input_) elif isinstance(input_, list) or isinstance(input_, tuple): return func(*input_) else: return",
"torch.float32 elif precision == 'float64': self.precision = torch.float64 else: raise ValueError self.hvp_func =",
"params = tuple(p.detach().requires_grad_() for p in orig_params) return params, names def load_weights(mod: nn.Module,",
"== 'float64': self.precision = torch.float64 else: raise ValueError self.hvp_func = hvp if hvp_type",
"names[0]), names[1:], value) def extract_weights(mod: nn.Module) -> Tuple[Tuple[Tensor, ...], List[str]]: \"\"\" This function",
"None: \"\"\" Deletes the attribute specified by the given list of names. For",
"be able to provide a function that takes as input # the parameters",
"the parameters in the model names = [] for name, p in list(mod.named_parameters()):",
"in list(mod.named_parameters()): if p.requires_grad: _del_nested_attr(mod, name.split(\".\")) names.append(name) # Make params regular Tensors instead",
"# the parameters and evaluate the nn.Module using fixed inputs. def _del_nested_attr(obj: nn.Module,",
"hess.cpu().detach().numpy().astype(np.float64) def get_ctr_jac(self, input_var): assert 'shapes' in dir( self), 'You must first call",
"_del_nested_attr(obj, ['conv', 'weight'], value) \"\"\" if len(names) == 1: setattr(obj, names[0], value) else:",
"can be used again to perform a forward pass. Note that the `params`",
"input_var_ = tuple(input_var_) if isinstance(vector_, list): vector_ = tuple(vector_) loss, vhp_res = self.hvp_func(self.func,",
"input of the model :type train_x: np.ndarray :param train_y: dataset used as ground",
"vhp_res = self.hvp_func(self.func, input_var_, v=vector_) return self._concat(vhp_res)[0].cpu().detach().numpy().astype(np.float64) def get_hess(self, input_var): assert 'shapes' in",
"nn.Parameter params = tuple(p.detach().requires_grad_() for p in orig_params) return params, names def load_weights(mod:",
"torch model :type model: torch.nn.Modle] :param loss: a function with signature loss_value =",
"Tensor) -> None: \"\"\" Set the attribute specified by the given list of",
"= self._eval_ctr_func(input_var_) input_var_grad = input_var_.values() if isinstance( input_var_, dict) else input_var_ grads =",
"Make params regular Tensors instead of nn.Parameter params = tuple(p.detach().requires_grad_() for p in",
"used as input of the model :type train_x: np.ndarray :param train_y: dataset used",
"else input_var_ grads = torch.autograd.grad(loss, input_var_grad) if isinstance(input_var_, dict): grads = {k: v",
"isinstance(train_x, np.ndarray): train_x = torch.tensor(train_x, dtype=prec_, device=device) if isinstance(train_y, np.ndarray): train_y = torch.tensor(train_y,",
"so are left as Tensors. This means that mod.parameters() will still be empty",
"loss(pred_y, true_y). :type loss: function :param train_x: dataset used as input of the",
"call get input to define the tensors shapes.' input_var_ = self._unconcat(torch.tensor( input_var, dtype=self.precision,",
"weights so that `mod` can be used again to perform a forward pass.",
"able to provide a function that takes as input # the parameters and",
"the parameters and evaluate the nn.Module using fixed inputs. def _del_nested_attr(obj: nn.Module, names:",
"else: self.device = torch.device(device) if precision == 'float32': self.precision = torch.float32 elif precision",
"j): if isinstance(t, np.ndarray) or torch.is_tensor(t): return t[i:j] else: raise NotImplementedError def torch_function_factory(model,",
"# In particular the goal is to be able to provide a function",
"isinstance(input_, tuple): return func(*input_) else: return func(input_) # Adapted from https://github.com/pytorch/pytorch/blob/21c04b4438a766cd998fddb42247d4eb2e010f9a/benchmarks/functional_autograd_benchmark/functional_autograd_benchmark.py # Utilities",
"a function that takes as input # the parameters and evaluate the nn.Module",
"be empty. \"\"\" orig_params = [p for p in mod.parameters() if p.requires_grad] #",
"func(*new_params): load_weights(model, {k: v for k, v in zip(names, new_params)}) out = apply_func(model,",
"p.requires_grad] # Remove all the parameters in the model names = [] for",
"input_var_.values() if isinstance( input_var_, dict) else input_var_ grads = torch.autograd.grad(ctr_val, input_var_grad) return grads.cpu().detach().numpy().astype(np.float64)",
"p in params], names def apply_func(func, input_): if isinstance(input_, dict): return func(**input_) elif",
"names[0]), names[1:]) def _set_nested_attr(obj: nn.Module, names: List[str], value: Tensor) -> None: \"\"\" Set",
"= [] for name, p in list(mod.named_parameters()): if p.requires_grad: _del_nested_attr(mod, name.split(\".\")) names.append(name) #",
"goal is to be able to provide a function that takes as input",
"'float64': self.precision = torch.float64 else: raise ValueError self.hvp_func = hvp if hvp_type ==",
"i, j): if isinstance(t, np.ndarray) or torch.is_tensor(t): return t[i:j] else: raise NotImplementedError def",
"names: List[str], value: Tensor) -> None: \"\"\" Set the attribute specified by the",
"torch from .base_wrapper import BaseWrapper from torch.autograd.functional import hvp, vhp, hessian from typing",
"model :type model: torch.nn.Modle] :param loss: a function with signature loss_value = loss(pred_y,",
"device=device) if isinstance(train_y, np.ndarray): train_y = torch.tensor(train_y, dtype=prec_, device=device) def func(*new_params): load_weights(model, {k:",
"will be empty. \"\"\" orig_params = [p for p in mod.parameters() if p.requires_grad]",
"`params` are regular Tensors (that can have history) and so are left as",
"np.ndarray): return np.reshape(t, sh) else: raise NotImplementedError def _tconcat(self, t_list, dim=0): if torch.is_tensor(t_list[0]):",
"in dir( self), 'You must first call get input to define the tensors",
"This function removes all the Parameters from the model and return them as",
"class TorchWrapper(BaseWrapper): def __init__(self, func, precision='float32', hvp_type='vhp', device='cpu'): self.func = func # Not",
"return hess.cpu().detach().numpy().astype(np.float64) def get_ctr_jac(self, input_var): assert 'shapes' in dir( self), 'You must first",
"func(**input_) elif isinstance(input_, list) or isinstance(input_, tuple): return func(*input_) else: return func(input_) #",
"in the model names = [] for name, p in list(mod.named_parameters()): if p.requires_grad:",
"input_var_grad) if isinstance(input_var_, dict): grads = {k: v for k, v in zip(input_var_.keys(),",
"return them as a tuple as well as their original attribute names. The",
"call get input to define the tensors shapes.' input_var_ = torch.tensor( input_var, dtype=self.precision,",
"the attribute obj.conv.weight, use _del_nested_attr(obj, ['conv', 'weight'], value) \"\"\" if len(names) == 1:",
"empty. \"\"\" orig_params = [p for p in mod.parameters() if p.requires_grad] # Remove",
"func.device else: self.device = torch.device(device) if precision == 'float32': self.precision = torch.float32 elif",
"self.hvp_func(self.func, input_var_, v=vector_) return self._concat(vhp_res)[0].cpu().detach().numpy().astype(np.float64) def get_hess(self, input_var): assert 'shapes' in dir( self),",
"that this function modifies the model in place and after this call, mod.parameters()",
":type train_y: np.ndarray :return: (function of the parameters, list of parameters, names of",
"parameters, names of parameters) :rtype: tuple \"\"\" # named_params = {k: var.cpu().detach().numpy() for",
"used again to perform a forward pass. Note that the `params` are regular",
"'hvp' else vhp def get_value_and_grad(self, input_var): assert 'shapes' in dir( self), 'You must",
"to create a function of the torch parameter model. :param model: torch model",
"model and return them as a tuple as well as their original attribute",
":type train_x: np.ndarray :param train_y: dataset used as ground truth input of the",
"for p in mod.parameters() if p.requires_grad] # Remove all the parameters in the",
"def extract_weights(mod: nn.Module) -> Tuple[Tuple[Tensor, ...], List[str]]: \"\"\" This function removes all the",
"use _del_nested_attr(obj, ['conv', 'weight']) \"\"\" if len(names) == 1: delattr(obj, names[0]) else: _del_nested_attr(getattr(obj,",
":rtype: tuple \"\"\" # named_params = {k: var.cpu().detach().numpy() for k, var in model.named_parameters()}",
"train_x: dataset used as input of the model :type train_x: np.ndarray :param train_y:",
"pass. Note that the `params` are regular Tensors (that can have history) and",
"the tensors shapes.' input_var_ = self._unconcat(torch.tensor( input_var, dtype=self.precision, device=self.device), self.shapes) vector_ = self._unconcat(torch.tensor(",
"device = params[0].device prec_ = torch.float32 if precision == 'float32' else torch.float64 if",
"[] for name, p in list(mod.named_parameters()): if p.requires_grad: _del_nested_attr(mod, name.split(\".\")) names.append(name) # Make",
"be used again to perform a forward pass. Note that the `params` are",
"still be empty after this call. \"\"\" for name, p in params.items(): _set_nested_attr(mod,",
"clean... if 'device' in dir(func): self.device = func.device else: self.device = torch.device(device) if",
"perform a forward pass. Note that the `params` are regular Tensors (that can",
"tensors shapes.' input_var_ = self._unconcat(torch.tensor( input_var, dtype=self.precision, requires_grad=True, device=self.device), self.shapes) loss = self._eval_func(input_var_)",
"name, p in list(mod.named_parameters()): if p.requires_grad: _del_nested_attr(mod, name.split(\".\")) names.append(name) # Make params regular",
"the `params` are regular Tensors (that can have history) and so are left",
"return np.concatenate(t_list, dim) else: raise NotImplementedError def _gather(self, t, i, j): if isinstance(t,",
"names[0], value) else: _set_nested_attr(getattr(obj, names[0]), names[1:], value) def extract_weights(mod: nn.Module) -> Tuple[Tuple[Tensor, ...],",
"the tensors shapes.' input_var_ = torch.tensor( input_var, dtype=self.precision, device=self.device) def func(inp): return self._eval_func(self._unconcat(inp,",
"delattr(obj, names[0]) else: _del_nested_attr(getattr(obj, names[0]), names[1:]) def _set_nested_attr(obj: nn.Module, names: List[str], value: Tensor)",
"numpy as np import torch from .base_wrapper import BaseWrapper from torch.autograd.functional import hvp,",
"p in mod.parameters() if p.requires_grad] # Remove all the parameters in the model",
"in mod.parameters() if p.requires_grad] # Remove all the parameters in the model names",
"names = extract_weights(model) device = params[0].device prec_ = torch.float32 if precision == 'float32'",
"# Remove all the parameters in the model names = [] for name,",
"the tensors shapes.' input_var_ = self._unconcat(torch.tensor( input_var, dtype=self.precision, requires_grad=True, device=self.device), self.shapes) loss =",
"tuple(input_var_.values()) if isinstance(vector_, dict): vector_ = tuple(vector_.values()) if isinstance(input_var_, list): input_var_ = tuple(input_var_)",
"input_var_ grads = torch.autograd.grad(loss, input_var_grad) if isinstance(input_var_, dict): grads = {k: v for",
"List[str]]: \"\"\" This function removes all the Parameters from the model and return",
"= params[0].device prec_ = torch.float32 if precision == 'float32' else torch.float64 if isinstance(train_x,",
"isinstance(input_var_, list): input_var_ = tuple(input_var_) if isinstance(vector_, list): vector_ = tuple(vector_) loss, vhp_res",
"get_hvp(self, input_var, vector): assert 'shapes' in dir( self), 'You must first call get",
"= self._unconcat(torch.tensor( input_var, dtype=self.precision, device=self.device), self.shapes) vector_ = self._unconcat(torch.tensor( vector, dtype=self.precision, device=self.device), self.shapes)",
"model names = [] for name, p in list(mod.named_parameters()): if p.requires_grad: _del_nested_attr(mod, name.split(\".\"))",
"= tuple(p.detach().requires_grad_() for p in orig_params) return params, names def load_weights(mod: nn.Module, params:",
"Tensors. This means that mod.parameters() will still be empty after this call. \"\"\"",
"of the model :type train_x: np.ndarray :param train_y: dataset used as ground truth",
"raise NotImplementedError def torch_function_factory(model, loss, train_x, train_y, precision='float32', optimized_vars=None): \"\"\" A factory to",
"to set the attribute obj.conv.weight, use _del_nested_attr(obj, ['conv', 'weight'], value) \"\"\" if len(names)",
"hvp, vhp, hessian from typing import List, Tuple, Dict, Union, Callable from torch",
"if torch.is_tensor(t_list[0]): return torch.cat(t_list, dim) elif isinstance(t_list[0], np.ndarray): return np.concatenate(t_list, dim) else: raise",
"func(*input_) else: return func(input_) # Adapted from https://github.com/pytorch/pytorch/blob/21c04b4438a766cd998fddb42247d4eb2e010f9a/benchmarks/functional_autograd_benchmark/functional_autograd_benchmark.py # Utilities to make nn.Module",
"# Make params regular Tensors instead of nn.Parameter params = tuple(p.detach().requires_grad_() for p",
"= torch.autograd.grad(loss, input_var_grad) if isinstance(input_var_, dict): grads = {k: v for k, v",
"nn.Module, params: Dict[str, Tensor]) -> None: \"\"\" Reload a set of weights so",
"a set of weights so that `mod` can be used again to perform",
"if torch.is_tensor(t): return t.reshape(sh) elif isinstance(t, np.ndarray): return np.reshape(t, sh) else: raise NotImplementedError",
"requires_grad=True, device=self.device), self.shapes) loss = self._eval_func(input_var_) input_var_grad = input_var_.values() if isinstance( input_var_, dict)",
"if precision == 'float32': self.precision = torch.float32 elif precision == 'float64': self.precision =",
"and evaluate the nn.Module using fixed inputs. def _del_nested_attr(obj: nn.Module, names: List[str]) ->",
"of the parameters, list of parameters, names of parameters) :rtype: tuple \"\"\" #",
"= apply_func(model, train_x) return loss(out, train_y) func.device = device return func, [p.cpu().detach().numpy() for",
"history) and so are left as Tensors. This means that mod.parameters() will still",
"means that mod.parameters() will still be empty after this call. \"\"\" for name,",
"self.precision = torch.float64 else: raise ValueError self.hvp_func = hvp if hvp_type == 'hvp'",
":type model: torch.nn.Modle] :param loss: a function with signature loss_value = loss(pred_y, true_y).",
"specified by the given list of names. For example, to delete the attribute",
"train_x, train_y, precision='float32', optimized_vars=None): \"\"\" A factory to create a function of the",
"_tconcat(self, t_list, dim=0): if torch.is_tensor(t_list[0]): return torch.cat(t_list, dim) elif isinstance(t_list[0], np.ndarray): return np.concatenate(t_list,",
"import hvp, vhp, hessian from typing import List, Tuple, Dict, Union, Callable from",
"attribute specified by the given list of names. For example, to delete the",
"'weight']) \"\"\" if len(names) == 1: delattr(obj, names[0]) else: _del_nested_attr(getattr(obj, names[0]), names[1:]) def",
"input_var_ grads = torch.autograd.grad(ctr_val, input_var_grad) return grads.cpu().detach().numpy().astype(np.float64) def _reshape(self, t, sh): if torch.is_tensor(t):",
"= func # Not very clean... if 'device' in dir(func): self.device = func.device",
"function that takes as input # the parameters and evaluate the nn.Module using",
"as input of the model :type train_x: np.ndarray :param train_y: dataset used as",
"list of names to value. For example, to set the attribute obj.conv.weight, use",
"dir( self), 'You must first call get input to define the tensors shapes.'",
"must first call get input to define the tensors shapes.' input_var_ = self._unconcat(torch.tensor(",
"np.ndarray): train_y = torch.tensor(train_y, dtype=prec_, device=device) def func(*new_params): load_weights(model, {k: v for k,",
"== 1: setattr(obj, names[0], value) else: _set_nested_attr(getattr(obj, names[0]), names[1:], value) def extract_weights(mod: nn.Module)",
"if hvp_type == 'hvp' else vhp def get_value_and_grad(self, input_var): assert 'shapes' in dir(",
"func, [p.cpu().detach().numpy() for p in params], names def apply_func(func, input_): if isinstance(input_, dict):",
"input of the loss :type train_y: np.ndarray :return: (function of the parameters, list",
"import numpy as np import torch from .base_wrapper import BaseWrapper from torch.autograd.functional import",
"import BaseWrapper from torch.autograd.functional import hvp, vhp, hessian from typing import List, Tuple,",
"-> Tuple[Tuple[Tensor, ...], List[str]]: \"\"\" This function removes all the Parameters from the",
"the Parameters from the model and return them as a tuple as well",
"be used again. Note that this function modifies the model in place and",
"grads = torch.autograd.grad(ctr_val, input_var_grad) return grads.cpu().detach().numpy().astype(np.float64) def _reshape(self, t, sh): if torch.is_tensor(t): return",
"to define the tensors shapes.' input_var_ = self._unconcat(torch.tensor( input_var, dtype=self.precision, device=self.device), self.shapes) vector_",
"the given list of names to value. For example, to set the attribute",
"def _gather(self, t, i, j): if isinstance(t, np.ndarray) or torch.is_tensor(t): return t[i:j] else:",
"{k: v for k, v in zip(names, new_params)}) out = apply_func(model, train_x) return",
"np.concatenate(t_list, dim) else: raise NotImplementedError def _gather(self, t, i, j): if isinstance(t, np.ndarray)",
"# named_params = {k: var.cpu().detach().numpy() for k, var in model.named_parameters()} params, names =",
"given list of names. For example, to delete the attribute obj.conv.weight, use _del_nested_attr(obj,",
"self), 'You must first call get input to define the tensors shapes.' input_var_",
"dir(func): self.device = func.device else: self.device = torch.device(device) if precision == 'float32': self.precision",
"device=self.device), self.shapes) vector_ = self._unconcat(torch.tensor( vector, dtype=self.precision, device=self.device), self.shapes) if isinstance(input_var_, dict): input_var_",
"dict): input_var_ = tuple(input_var_.values()) if isinstance(vector_, dict): vector_ = tuple(vector_.values()) if isinstance(input_var_, list):",
"Remove all the parameters in the model names = [] for name, p",
"as np import torch from .base_wrapper import BaseWrapper from torch.autograd.functional import hvp, vhp,",
"or torch.is_tensor(t): return t[i:j] else: raise NotImplementedError def torch_function_factory(model, loss, train_x, train_y, precision='float32',",
"\"\"\" orig_params = [p for p in mod.parameters() if p.requires_grad] # Remove all",
"= torch.tensor(train_y, dtype=prec_, device=device) def func(*new_params): load_weights(model, {k: v for k, v in",
"train_x) return loss(out, train_y) func.device = device return func, [p.cpu().detach().numpy() for p in",
"np.ndarray) or torch.is_tensor(t): return t[i:j] else: raise NotImplementedError def torch_function_factory(model, loss, train_x, train_y,",
"tuple): return func(*input_) else: return func(input_) # Adapted from https://github.com/pytorch/pytorch/blob/21c04b4438a766cd998fddb42247d4eb2e010f9a/benchmarks/functional_autograd_benchmark/functional_autograd_benchmark.py # Utilities to",
"if 'device' in dir(func): self.device = func.device else: self.device = torch.device(device) if precision",
"if isinstance(vector_, dict): vector_ = tuple(vector_.values()) if isinstance(input_var_, list): input_var_ = tuple(input_var_) if",
"create a function of the torch parameter model. :param model: torch model :type",
"dim) else: raise NotImplementedError def _gather(self, t, i, j): if isinstance(t, np.ndarray) or",
"from https://github.com/pytorch/pytorch/blob/21c04b4438a766cd998fddb42247d4eb2e010f9a/benchmarks/functional_autograd_benchmark/functional_autograd_benchmark.py # Utilities to make nn.Module \"functional\" # In particular the goal",
"precision='float32', hvp_type='vhp', device='cpu'): self.func = func # Not very clean... if 'device' in",
"input_var, vector): assert 'shapes' in dir( self), 'You must first call get input",
"very clean... if 'device' in dir(func): self.device = func.device else: self.device = torch.device(device)",
"{k: var.cpu().detach().numpy() for k, var in model.named_parameters()} params, names = extract_weights(model) device =",
"vhp def get_value_and_grad(self, input_var): assert 'shapes' in dir( self), 'You must first call",
"isinstance(t, np.ndarray) or torch.is_tensor(t): return t[i:j] else: raise NotImplementedError def torch_function_factory(model, loss, train_x,",
"t, i, j): if isinstance(t, np.ndarray) or torch.is_tensor(t): return t[i:j] else: raise NotImplementedError",
"all the Parameters from the model and return them as a tuple as",
"np.ndarray): train_x = torch.tensor(train_x, dtype=prec_, device=device) if isinstance(train_y, np.ndarray): train_y = torch.tensor(train_y, dtype=prec_,",
"have history) and so are left as Tensors. This means that mod.parameters() will",
"'device' in dir(func): self.device = func.device else: self.device = torch.device(device) if precision ==",
"def _del_nested_attr(obj: nn.Module, names: List[str]) -> None: \"\"\" Deletes the attribute specified by",
"device return func, [p.cpu().detach().numpy() for p in params], names def apply_func(func, input_): if",
"grads = torch.autograd.grad(loss, input_var_grad) if isinstance(input_var_, dict): grads = {k: v for k,",
"isinstance(input_, list) or isinstance(input_, tuple): return func(*input_) else: return func(input_) # Adapted from",
"names to value. For example, to set the attribute obj.conv.weight, use _del_nested_attr(obj, ['conv',",
"List, Tuple, Dict, Union, Callable from torch import nn, Tensor class TorchWrapper(BaseWrapper): def",
"first call get input to define the tensors shapes.' input_var_ = self._unconcat(torch.tensor( input_var,",
"weights must be re-loaded with `load_weights` before the model can be used again.",
"name.split(\".\")) names.append(name) # Make params regular Tensors instead of nn.Parameter params = tuple(p.detach().requires_grad_()",
"nn.Module using fixed inputs. def _del_nested_attr(obj: nn.Module, names: List[str]) -> None: \"\"\" Deletes",
"\"\"\" if len(names) == 1: delattr(obj, names[0]) else: _del_nested_attr(getattr(obj, names[0]), names[1:]) def _set_nested_attr(obj:",
"re-loaded with `load_weights` before the model can be used again. Note that this",
"again to perform a forward pass. Note that the `params` are regular Tensors",
"nn.Module, names: List[str], value: Tensor) -> None: \"\"\" Set the attribute specified by",
"of the loss :type train_y: np.ndarray :return: (function of the parameters, list of",
":param train_x: dataset used as input of the model :type train_x: np.ndarray :param",
"list) or isinstance(input_, tuple): return func(*input_) else: return func(input_) # Adapted from https://github.com/pytorch/pytorch/blob/21c04b4438a766cd998fddb42247d4eb2e010f9a/benchmarks/functional_autograd_benchmark/functional_autograd_benchmark.py",
"inputs. def _del_nested_attr(obj: nn.Module, names: List[str]) -> None: \"\"\" Deletes the attribute specified",
"return self._concat(vhp_res)[0].cpu().detach().numpy().astype(np.float64) def get_hess(self, input_var): assert 'shapes' in dir( self), 'You must first",
"before the model can be used again. Note that this function modifies the",
"in model.named_parameters()} params, names = extract_weights(model) device = params[0].device prec_ = torch.float32 if",
"k, var in model.named_parameters()} params, names = extract_weights(model) device = params[0].device prec_ =",
"in dir(func): self.device = func.device else: self.device = torch.device(device) if precision == 'float32':",
"the model :type train_x: np.ndarray :param train_y: dataset used as ground truth input",
"else: _del_nested_attr(getattr(obj, names[0]), names[1:]) def _set_nested_attr(obj: nn.Module, names: List[str], value: Tensor) -> None:",
"forward pass. Note that the `params` are regular Tensors (that can have history)",
"that takes as input # the parameters and evaluate the nn.Module using fixed",
"as well as their original attribute names. The weights must be re-loaded with",
"modifies the model in place and after this call, mod.parameters() will be empty.",
"\"\"\" # named_params = {k: var.cpu().detach().numpy() for k, var in model.named_parameters()} params, names",
"input # the parameters and evaluate the nn.Module using fixed inputs. def _del_nested_attr(obj:",
"return [loss.cpu().detach().numpy().astype(np.float64), self._concat(grads)[0].cpu().detach().numpy().astype(np.float64)] def get_hvp(self, input_var, vector): assert 'shapes' in dir( self), 'You",
"Tensor class TorchWrapper(BaseWrapper): def __init__(self, func, precision='float32', hvp_type='vhp', device='cpu'): self.func = func #",
"tuple(vector_) loss, vhp_res = self.hvp_func(self.func, input_var_, v=vector_) return self._concat(vhp_res)[0].cpu().detach().numpy().astype(np.float64) def get_hess(self, input_var): assert",
"list of names. For example, to delete the attribute obj.conv.weight, use _del_nested_attr(obj, ['conv',",
"model in place and after this call, mod.parameters() will be empty. \"\"\" orig_params",
"shapes.' input_var_ = self._unconcat(torch.tensor( input_var, dtype=self.precision, requires_grad=True, device=self.device), self.shapes) loss = self._eval_func(input_var_) input_var_grad",
"t_list, dim=0): if torch.is_tensor(t_list[0]): return torch.cat(t_list, dim) elif isinstance(t_list[0], np.ndarray): return np.concatenate(t_list, dim)",
"dtype=self.precision, device=self.device), self.shapes) vector_ = self._unconcat(torch.tensor( vector, dtype=self.precision, device=self.device), self.shapes) if isinstance(input_var_, dict):",
"__init__(self, func, precision='float32', hvp_type='vhp', device='cpu'): self.func = func # Not very clean... if",
"= {k: v for k, v in zip(input_var_.keys(), grads)} return [loss.cpu().detach().numpy().astype(np.float64), self._concat(grads)[0].cpu().detach().numpy().astype(np.float64)] def",
"dim) elif isinstance(t_list[0], np.ndarray): return np.concatenate(t_list, dim) else: raise NotImplementedError def _gather(self, t,",
"precision='float32', optimized_vars=None): \"\"\" A factory to create a function of the torch parameter",
"np import torch from .base_wrapper import BaseWrapper from torch.autograd.functional import hvp, vhp, hessian",
"torch.float64 if isinstance(train_x, np.ndarray): train_x = torch.tensor(train_x, dtype=prec_, device=device) if isinstance(train_y, np.ndarray): train_y",
"self.shapes) ctr_val = self._eval_ctr_func(input_var_) input_var_grad = input_var_.values() if isinstance( input_var_, dict) else input_var_",
"are left as Tensors. This means that mod.parameters() will still be empty after",
"= tuple(input_var_) if isinstance(vector_, list): vector_ = tuple(vector_) loss, vhp_res = self.hvp_func(self.func, input_var_,",
"torch.tensor(train_x, dtype=prec_, device=device) if isinstance(train_y, np.ndarray): train_y = torch.tensor(train_y, dtype=prec_, device=device) def func(*new_params):",
"else: raise NotImplementedError def torch_function_factory(model, loss, train_x, train_y, precision='float32', optimized_vars=None): \"\"\" A factory",
"to perform a forward pass. Note that the `params` are regular Tensors (that",
"np.ndarray :param train_y: dataset used as ground truth input of the loss :type",
"torch.nn.Modle] :param loss: a function with signature loss_value = loss(pred_y, true_y). :type loss:",
"names. For example, to delete the attribute obj.conv.weight, use _del_nested_attr(obj, ['conv', 'weight']) \"\"\"",
"p in list(mod.named_parameters()): if p.requires_grad: _del_nested_attr(mod, name.split(\".\")) names.append(name) # Make params regular Tensors",
"in place and after this call, mod.parameters() will be empty. \"\"\" orig_params =",
"the attribute obj.conv.weight, use _del_nested_attr(obj, ['conv', 'weight']) \"\"\" if len(names) == 1: delattr(obj,",
"as their original attribute names. The weights must be re-loaded with `load_weights` before",
"def get_ctr_jac(self, input_var): assert 'shapes' in dir( self), 'You must first call get",
"in params], names def apply_func(func, input_): if isinstance(input_, dict): return func(**input_) elif isinstance(input_,",
"first call get input to define the tensors shapes.' input_var_ = torch.tensor( input_var,",
"is to be able to provide a function that takes as input #",
"parameters, list of parameters, names of parameters) :rtype: tuple \"\"\" # named_params =",
"var in model.named_parameters()} params, names = extract_weights(model) device = params[0].device prec_ = torch.float32",
"isinstance(t, np.ndarray): return np.reshape(t, sh) else: raise NotImplementedError def _tconcat(self, t_list, dim=0): if",
"k, v in zip(input_var_.keys(), grads)} return [loss.cpu().detach().numpy().astype(np.float64), self._concat(grads)[0].cpu().detach().numpy().astype(np.float64)] def get_hvp(self, input_var, vector): assert",
"= func.device else: self.device = torch.device(device) if precision == 'float32': self.precision = torch.float32",
"_gather(self, t, i, j): if isinstance(t, np.ndarray) or torch.is_tensor(t): return t[i:j] else: raise",
"= hvp if hvp_type == 'hvp' else vhp def get_value_and_grad(self, input_var): assert 'shapes'",
"(function of the parameters, list of parameters, names of parameters) :rtype: tuple \"\"\"",
"if isinstance( input_var_, dict) else input_var_ grads = torch.autograd.grad(ctr_val, input_var_grad) return grads.cpu().detach().numpy().astype(np.float64) def",
"= {k: var.cpu().detach().numpy() for k, var in model.named_parameters()} params, names = extract_weights(model) device",
"tuple(p.detach().requires_grad_() for p in orig_params) return params, names def load_weights(mod: nn.Module, params: Dict[str,",
"if isinstance(input_var_, dict): input_var_ = tuple(input_var_.values()) if isinstance(vector_, dict): vector_ = tuple(vector_.values()) if",
"Tuple[Tuple[Tensor, ...], List[str]]: \"\"\" This function removes all the Parameters from the model",
"original attribute names. The weights must be re-loaded with `load_weights` before the model",
"for k, v in zip(input_var_.keys(), grads)} return [loss.cpu().detach().numpy().astype(np.float64), self._concat(grads)[0].cpu().detach().numpy().astype(np.float64)] def get_hvp(self, input_var, vector):",
"Reload a set of weights so that `mod` can be used again to",
"input_var_grad) return grads.cpu().detach().numpy().astype(np.float64) def _reshape(self, t, sh): if torch.is_tensor(t): return t.reshape(sh) elif isinstance(t,",
"np.ndarray :return: (function of the parameters, list of parameters, names of parameters) :rtype:",
"if precision == 'float32' else torch.float64 if isinstance(train_x, np.ndarray): train_x = torch.tensor(train_x, dtype=prec_,",
"...], List[str]]: \"\"\" This function removes all the Parameters from the model and",
"\"\"\" A factory to create a function of the torch parameter model. :param",
"a forward pass. Note that the `params` are regular Tensors (that can have",
"example, to delete the attribute obj.conv.weight, use _del_nested_attr(obj, ['conv', 'weight']) \"\"\" if len(names)",
"value) def extract_weights(mod: nn.Module) -> Tuple[Tuple[Tensor, ...], List[str]]: \"\"\" This function removes all",
"The weights must be re-loaded with `load_weights` before the model can be used",
"if isinstance(input_, dict): return func(**input_) elif isinstance(input_, list) or isinstance(input_, tuple): return func(*input_)",
"use _del_nested_attr(obj, ['conv', 'weight'], value) \"\"\" if len(names) == 1: setattr(obj, names[0], value)",
"def load_weights(mod: nn.Module, params: Dict[str, Tensor]) -> None: \"\"\" Reload a set of",
"names[1:]) def _set_nested_attr(obj: nn.Module, names: List[str], value: Tensor) -> None: \"\"\" Set the",
"model: torch model :type model: torch.nn.Modle] :param loss: a function with signature loss_value",
"and after this call, mod.parameters() will be empty. \"\"\" orig_params = [p for",
"them as a tuple as well as their original attribute names. The weights",
"nn.Module, names: List[str]) -> None: \"\"\" Deletes the attribute specified by the given",
"names def apply_func(func, input_): if isinstance(input_, dict): return func(**input_) elif isinstance(input_, list) or",
"define the tensors shapes.' input_var_ = self._unconcat(torch.tensor( input_var, dtype=self.precision, requires_grad=True, device=self.device), self.shapes) ctr_val",
"self._eval_func(input_var_) input_var_grad = input_var_.values() if isinstance( input_var_, dict) else input_var_ grads = torch.autograd.grad(loss,",
"input_var, dtype=self.precision, device=self.device), self.shapes) vector_ = self._unconcat(torch.tensor( vector, dtype=self.precision, device=self.device), self.shapes) if isinstance(input_var_,",
"with signature loss_value = loss(pred_y, true_y). :type loss: function :param train_x: dataset used",
"NotImplementedError def torch_function_factory(model, loss, train_x, train_y, precision='float32', optimized_vars=None): \"\"\" A factory to create",
"regular Tensors instead of nn.Parameter params = tuple(p.detach().requires_grad_() for p in orig_params) return",
"with `load_weights` before the model can be used again. Note that this function",
"orig_params) return params, names def load_weights(mod: nn.Module, params: Dict[str, Tensor]) -> None: \"\"\"",
"zip(names, new_params)}) out = apply_func(model, train_x) return loss(out, train_y) func.device = device return",
"len(names) == 1: setattr(obj, names[0], value) else: _set_nested_attr(getattr(obj, names[0]), names[1:], value) def extract_weights(mod:",
"function modifies the model in place and after this call, mod.parameters() will be",
"[loss.cpu().detach().numpy().astype(np.float64), self._concat(grads)[0].cpu().detach().numpy().astype(np.float64)] def get_hvp(self, input_var, vector): assert 'shapes' in dir( self), 'You must",
"attribute obj.conv.weight, use _del_nested_attr(obj, ['conv', 'weight']) \"\"\" if len(names) == 1: delattr(obj, names[0])",
"define the tensors shapes.' input_var_ = self._unconcat(torch.tensor( input_var, dtype=self.precision, requires_grad=True, device=self.device), self.shapes) loss",
"v in zip(names, new_params)}) out = apply_func(model, train_x) return loss(out, train_y) func.device =",
"= input_var_.values() if isinstance( input_var_, dict) else input_var_ grads = torch.autograd.grad(ctr_val, input_var_grad) return",
"true_y). :type loss: function :param train_x: dataset used as input of the model",
"`load_weights` before the model can be used again. Note that this function modifies",
"train_y = torch.tensor(train_y, dtype=prec_, device=device) def func(*new_params): load_weights(model, {k: v for k, v",
"parameter model. :param model: torch model :type model: torch.nn.Modle] :param loss: a function",
"def func(*new_params): load_weights(model, {k: v for k, v in zip(names, new_params)}) out =",
"= torch.autograd.grad(ctr_val, input_var_grad) return grads.cpu().detach().numpy().astype(np.float64) def _reshape(self, t, sh): if torch.is_tensor(t): return t.reshape(sh)",
"names[1:], value) def extract_weights(mod: nn.Module) -> Tuple[Tuple[Tensor, ...], List[str]]: \"\"\" This function removes",
"the model in place and after this call, mod.parameters() will be empty. \"\"\"",
"tuple(input_var_) if isinstance(vector_, list): vector_ = tuple(vector_) loss, vhp_res = self.hvp_func(self.func, input_var_, v=vector_)",
"https://github.com/pytorch/pytorch/blob/21c04b4438a766cd998fddb42247d4eb2e010f9a/benchmarks/functional_autograd_benchmark/functional_autograd_benchmark.py # Utilities to make nn.Module \"functional\" # In particular the goal is",
"= tuple(input_var_.values()) if isinstance(vector_, dict): vector_ = tuple(vector_.values()) if isinstance(input_var_, list): input_var_ =",
"in orig_params) return params, names def load_weights(mod: nn.Module, params: Dict[str, Tensor]) -> None:",
"torch.device(device) if precision == 'float32': self.precision = torch.float32 elif precision == 'float64': self.precision",
"= self.hvp_func(self.func, input_var_, v=vector_) return self._concat(vhp_res)[0].cpu().detach().numpy().astype(np.float64) def get_hess(self, input_var): assert 'shapes' in dir(",
"return np.reshape(t, sh) else: raise NotImplementedError def _tconcat(self, t_list, dim=0): if torch.is_tensor(t_list[0]): return",
":type loss: function :param train_x: dataset used as input of the model :type",
"torch.tensor(train_y, dtype=prec_, device=device) def func(*new_params): load_weights(model, {k: v for k, v in zip(names,",
"self.hvp_func = hvp if hvp_type == 'hvp' else vhp def get_value_and_grad(self, input_var): assert",
"= self._eval_func(input_var_) input_var_grad = input_var_.values() if isinstance( input_var_, dict) else input_var_ grads =",
"if isinstance( input_var_, dict) else input_var_ grads = torch.autograd.grad(loss, input_var_grad) if isinstance(input_var_, dict):",
"= extract_weights(model) device = params[0].device prec_ = torch.float32 if precision == 'float32' else",
"set of weights so that `mod` can be used again to perform a",
"loss = self._eval_func(input_var_) input_var_grad = input_var_.values() if isinstance( input_var_, dict) else input_var_ grads",
"by the given list of names. For example, to delete the attribute obj.conv.weight,",
"be empty after this call. \"\"\" for name, p in params.items(): _set_nested_attr(mod, name.split(\".\"),",
"def _tconcat(self, t_list, dim=0): if torch.is_tensor(t_list[0]): return torch.cat(t_list, dim) elif isinstance(t_list[0], np.ndarray): return",
"torch.autograd.functional import hvp, vhp, hessian from typing import List, Tuple, Dict, Union, Callable",
"== 'float32': self.precision = torch.float32 elif precision == 'float64': self.precision = torch.float64 else:",
"self._unconcat(torch.tensor( input_var, dtype=self.precision, requires_grad=True, device=self.device), self.shapes) loss = self._eval_func(input_var_) input_var_grad = input_var_.values() if",
"import torch from .base_wrapper import BaseWrapper from torch.autograd.functional import hvp, vhp, hessian from",
"out = apply_func(model, train_x) return loss(out, train_y) func.device = device return func, [p.cpu().detach().numpy()",
"v for k, v in zip(input_var_.keys(), grads)} return [loss.cpu().detach().numpy().astype(np.float64), self._concat(grads)[0].cpu().detach().numpy().astype(np.float64)] def get_hvp(self, input_var,",
"input_var_, dict) else input_var_ grads = torch.autograd.grad(loss, input_var_grad) if isinstance(input_var_, dict): grads =",
"isinstance(input_var_, dict): input_var_ = tuple(input_var_.values()) if isinstance(vector_, dict): vector_ = tuple(vector_.values()) if isinstance(input_var_,",
"dim=0): if torch.is_tensor(t_list[0]): return torch.cat(t_list, dim) elif isinstance(t_list[0], np.ndarray): return np.concatenate(t_list, dim) else:",
"grads)} return [loss.cpu().detach().numpy().astype(np.float64), self._concat(grads)[0].cpu().detach().numpy().astype(np.float64)] def get_hvp(self, input_var, vector): assert 'shapes' in dir( self),",
"elif precision == 'float64': self.precision = torch.float64 else: raise ValueError self.hvp_func = hvp",
"isinstance(t_list[0], np.ndarray): return np.concatenate(t_list, dim) else: raise NotImplementedError def _gather(self, t, i, j):",
"None: \"\"\" Set the attribute specified by the given list of names to",
"define the tensors shapes.' input_var_ = torch.tensor( input_var, dtype=self.precision, device=self.device) def func(inp): return",
"return params, names def load_weights(mod: nn.Module, params: Dict[str, Tensor]) -> None: \"\"\" Reload",
"= device return func, [p.cpu().detach().numpy() for p in params], names def apply_func(func, input_):",
"load_weights(mod: nn.Module, params: Dict[str, Tensor]) -> None: \"\"\" Reload a set of weights",
"so that `mod` can be used again to perform a forward pass. Note",
"NotImplementedError def _gather(self, t, i, j): if isinstance(t, np.ndarray) or torch.is_tensor(t): return t[i:j]",
"def get_hvp(self, input_var, vector): assert 'shapes' in dir( self), 'You must first call",
"Deletes the attribute specified by the given list of names. For example, to",
"-> None: \"\"\" Deletes the attribute specified by the given list of names.",
"loss :type train_y: np.ndarray :return: (function of the parameters, list of parameters, names",
"empty after this call. \"\"\" for name, p in params.items(): _set_nested_attr(mod, name.split(\".\"), p)",
"from torch.autograd.functional import hvp, vhp, hessian from typing import List, Tuple, Dict, Union,",
"of the torch parameter model. :param model: torch model :type model: torch.nn.Modle] :param",
"device=self.device), self.shapes) ctr_val = self._eval_ctr_func(input_var_) input_var_grad = input_var_.values() if isinstance( input_var_, dict) else",
"the loss :type train_y: np.ndarray :return: (function of the parameters, list of parameters,",
"_del_nested_attr(obj, ['conv', 'weight']) \"\"\" if len(names) == 1: delattr(obj, names[0]) else: _del_nested_attr(getattr(obj, names[0]),",
"nn.Module) -> Tuple[Tuple[Tensor, ...], List[str]]: \"\"\" This function removes all the Parameters from",
"self._eval_ctr_func(input_var_) input_var_grad = input_var_.values() if isinstance( input_var_, dict) else input_var_ grads = torch.autograd.grad(ctr_val,",
"setattr(obj, names[0], value) else: _set_nested_attr(getattr(obj, names[0]), names[1:], value) def extract_weights(mod: nn.Module) -> Tuple[Tuple[Tensor,",
"isinstance(vector_, list): vector_ = tuple(vector_) loss, vhp_res = self.hvp_func(self.func, input_var_, v=vector_) return self._concat(vhp_res)[0].cpu().detach().numpy().astype(np.float64)",
"dict): grads = {k: v for k, v in zip(input_var_.keys(), grads)} return [loss.cpu().detach().numpy().astype(np.float64),",
"return t[i:j] else: raise NotImplementedError def torch_function_factory(model, loss, train_x, train_y, precision='float32', optimized_vars=None): \"\"\"",
"input_var, dtype=self.precision, device=self.device) def func(inp): return self._eval_func(self._unconcat(inp, self.shapes)) hess = hessian(func, input_var_, vectorize=False)",
"names def load_weights(mod: nn.Module, params: Dict[str, Tensor]) -> None: \"\"\" Reload a set",
"regular Tensors (that can have history) and so are left as Tensors. This",
"sh): if torch.is_tensor(t): return t.reshape(sh) elif isinstance(t, np.ndarray): return np.reshape(t, sh) else: raise",
"the given list of names. For example, to delete the attribute obj.conv.weight, use",
"else: return func(input_) # Adapted from https://github.com/pytorch/pytorch/blob/21c04b4438a766cd998fddb42247d4eb2e010f9a/benchmarks/functional_autograd_benchmark/functional_autograd_benchmark.py # Utilities to make nn.Module \"functional\"",
"function removes all the Parameters from the model and return them as a",
"BaseWrapper from torch.autograd.functional import hvp, vhp, hessian from typing import List, Tuple, Dict,",
"dict) else input_var_ grads = torch.autograd.grad(ctr_val, input_var_grad) return grads.cpu().detach().numpy().astype(np.float64) def _reshape(self, t, sh):",
"func.device = device return func, [p.cpu().detach().numpy() for p in params], names def apply_func(func,",
"to provide a function that takes as input # the parameters and evaluate",
"must first call get input to define the tensors shapes.' input_var_ = torch.tensor(",
"if len(names) == 1: setattr(obj, names[0], value) else: _set_nested_attr(getattr(obj, names[0]), names[1:], value) def",
"place and after this call, mod.parameters() will be empty. \"\"\" orig_params = [p",
"requires_grad=True, device=self.device), self.shapes) ctr_val = self._eval_ctr_func(input_var_) input_var_grad = input_var_.values() if isinstance( input_var_, dict)",
"params, names def load_weights(mod: nn.Module, params: Dict[str, Tensor]) -> None: \"\"\" Reload a",
"for p in params], names def apply_func(func, input_): if isinstance(input_, dict): return func(**input_)",
"len(names) == 1: delattr(obj, names[0]) else: _del_nested_attr(getattr(obj, names[0]), names[1:]) def _set_nested_attr(obj: nn.Module, names:",
"`mod` can be used again to perform a forward pass. Note that the",
"np.reshape(t, sh) else: raise NotImplementedError def _tconcat(self, t_list, dim=0): if torch.is_tensor(t_list[0]): return torch.cat(t_list,",
"shapes.' input_var_ = self._unconcat(torch.tensor( input_var, dtype=self.precision, device=self.device), self.shapes) vector_ = self._unconcat(torch.tensor( vector, dtype=self.precision,",
"dict): vector_ = tuple(vector_.values()) if isinstance(input_var_, list): input_var_ = tuple(input_var_) if isinstance(vector_, list):",
"model. :param model: torch model :type model: torch.nn.Modle] :param loss: a function with",
"factory to create a function of the torch parameter model. :param model: torch",
"input_): if isinstance(input_, dict): return func(**input_) elif isinstance(input_, list) or isinstance(input_, tuple): return",
"mod.parameters() will still be empty after this call. \"\"\" for name, p in",
":param model: torch model :type model: torch.nn.Modle] :param loss: a function with signature",
"'weight'], value) \"\"\" if len(names) == 1: setattr(obj, names[0], value) else: _set_nested_attr(getattr(obj, names[0]),",
"Parameters from the model and return them as a tuple as well as",
"= tuple(vector_.values()) if isinstance(input_var_, list): input_var_ = tuple(input_var_) if isinstance(vector_, list): vector_ =",
"dtype=self.precision, requires_grad=True, device=self.device), self.shapes) loss = self._eval_func(input_var_) input_var_grad = input_var_.values() if isinstance( input_var_,",
"torch_function_factory(model, loss, train_x, train_y, precision='float32', optimized_vars=None): \"\"\" A factory to create a function",
"if isinstance(input_var_, list): input_var_ = tuple(input_var_) if isinstance(vector_, list): vector_ = tuple(vector_) loss,",
"v=vector_) return self._concat(vhp_res)[0].cpu().detach().numpy().astype(np.float64) def get_hess(self, input_var): assert 'shapes' in dir( self), 'You must",
"torch.is_tensor(t_list[0]): return torch.cat(t_list, dim) elif isinstance(t_list[0], np.ndarray): return np.concatenate(t_list, dim) else: raise NotImplementedError",
"Dict[str, Tensor]) -> None: \"\"\" Reload a set of weights so that `mod`",
"For example, to delete the attribute obj.conv.weight, use _del_nested_attr(obj, ['conv', 'weight']) \"\"\" if",
"if p.requires_grad] # Remove all the parameters in the model names = []",
"get input to define the tensors shapes.' input_var_ = self._unconcat(torch.tensor( input_var, dtype=self.precision, device=self.device),",
"as input # the parameters and evaluate the nn.Module using fixed inputs. def",
"provide a function that takes as input # the parameters and evaluate the",
"grads.cpu().detach().numpy().astype(np.float64) def _reshape(self, t, sh): if torch.is_tensor(t): return t.reshape(sh) elif isinstance(t, np.ndarray): return",
"Tuple, Dict, Union, Callable from torch import nn, Tensor class TorchWrapper(BaseWrapper): def __init__(self,",
"ctr_val = self._eval_ctr_func(input_var_) input_var_grad = input_var_.values() if isinstance( input_var_, dict) else input_var_ grads",
"A factory to create a function of the torch parameter model. :param model:",
"make nn.Module \"functional\" # In particular the goal is to be able to",
"Tensor]) -> None: \"\"\" Reload a set of weights so that `mod` can",
"if isinstance(train_y, np.ndarray): train_y = torch.tensor(train_y, dtype=prec_, device=device) def func(*new_params): load_weights(model, {k: v",
"optimized_vars=None): \"\"\" A factory to create a function of the torch parameter model.",
"p in orig_params) return params, names def load_weights(mod: nn.Module, params: Dict[str, Tensor]) ->",
"after this call, mod.parameters() will be empty. \"\"\" orig_params = [p for p",
"'You must first call get input to define the tensors shapes.' input_var_ =",
"== 1: delattr(obj, names[0]) else: _del_nested_attr(getattr(obj, names[0]), names[1:]) def _set_nested_attr(obj: nn.Module, names: List[str],",
"\"\"\" Set the attribute specified by the given list of names to value.",
"\"functional\" # In particular the goal is to be able to provide a",
"loss_value = loss(pred_y, true_y). :type loss: function :param train_x: dataset used as input",
"Not very clean... if 'device' in dir(func): self.device = func.device else: self.device =",
"given list of names to value. For example, to set the attribute obj.conv.weight,",
"nn.Module \"functional\" # In particular the goal is to be able to provide",
"self.shapes)) hess = hessian(func, input_var_, vectorize=False) return hess.cpu().detach().numpy().astype(np.float64) def get_ctr_jac(self, input_var): assert 'shapes'",
"get_value_and_grad(self, input_var): assert 'shapes' in dir( self), 'You must first call get input",
"raise ValueError self.hvp_func = hvp if hvp_type == 'hvp' else vhp def get_value_and_grad(self,",
"new_params)}) out = apply_func(model, train_x) return loss(out, train_y) func.device = device return func,",
"attribute specified by the given list of names to value. For example, to",
"must be re-loaded with `load_weights` before the model can be used again. Note",
"input_var, dtype=self.precision, requires_grad=True, device=self.device), self.shapes) loss = self._eval_func(input_var_) input_var_grad = input_var_.values() if isinstance(",
"a function of the torch parameter model. :param model: torch model :type model:",
"and so are left as Tensors. This means that mod.parameters() will still be",
"from the model and return them as a tuple as well as their",
"else: _set_nested_attr(getattr(obj, names[0]), names[1:], value) def extract_weights(mod: nn.Module) -> Tuple[Tuple[Tensor, ...], List[str]]: \"\"\"",
".base_wrapper import BaseWrapper from torch.autograd.functional import hvp, vhp, hessian from typing import List,",
"train_y) func.device = device return func, [p.cpu().detach().numpy() for p in params], names def",
"list): input_var_ = tuple(input_var_) if isinstance(vector_, list): vector_ = tuple(vector_) loss, vhp_res =",
"shapes.' input_var_ = torch.tensor( input_var, dtype=self.precision, device=self.device) def func(inp): return self._eval_func(self._unconcat(inp, self.shapes)) hess",
"def _reshape(self, t, sh): if torch.is_tensor(t): return t.reshape(sh) elif isinstance(t, np.ndarray): return np.reshape(t,",
"if isinstance(t, np.ndarray) or torch.is_tensor(t): return t[i:j] else: raise NotImplementedError def torch_function_factory(model, loss,",
"loss: a function with signature loss_value = loss(pred_y, true_y). :type loss: function :param",
"function with signature loss_value = loss(pred_y, true_y). :type loss: function :param train_x: dataset",
"# Utilities to make nn.Module \"functional\" # In particular the goal is to",
"v for k, v in zip(names, new_params)}) out = apply_func(model, train_x) return loss(out,",
"input_var_ = torch.tensor( input_var, dtype=self.precision, device=self.device) def func(inp): return self._eval_func(self._unconcat(inp, self.shapes)) hess =",
"tuple(vector_.values()) if isinstance(input_var_, list): input_var_ = tuple(input_var_) if isinstance(vector_, list): vector_ = tuple(vector_)",
"Callable from torch import nn, Tensor class TorchWrapper(BaseWrapper): def __init__(self, func, precision='float32', hvp_type='vhp',",
"in zip(names, new_params)}) out = apply_func(model, train_x) return loss(out, train_y) func.device = device",
"train_y, precision='float32', optimized_vars=None): \"\"\" A factory to create a function of the torch",
"that `mod` can be used again to perform a forward pass. Note that",
"raise NotImplementedError def _gather(self, t, i, j): if isinstance(t, np.ndarray) or torch.is_tensor(t): return",
"_del_nested_attr(mod, name.split(\".\")) names.append(name) # Make params regular Tensors instead of nn.Parameter params =",
"device=self.device), self.shapes) loss = self._eval_func(input_var_) input_var_grad = input_var_.values() if isinstance( input_var_, dict) else",
"return loss(out, train_y) func.device = device return func, [p.cpu().detach().numpy() for p in params],",
"input_var_grad = input_var_.values() if isinstance( input_var_, dict) else input_var_ grads = torch.autograd.grad(loss, input_var_grad)",
"dtype=prec_, device=device) if isinstance(train_y, np.ndarray): train_y = torch.tensor(train_y, dtype=prec_, device=device) def func(*new_params): load_weights(model,",
"fixed inputs. def _del_nested_attr(obj: nn.Module, names: List[str]) -> None: \"\"\" Deletes the attribute",
"train_x: np.ndarray :param train_y: dataset used as ground truth input of the loss",
"[p.cpu().detach().numpy() for p in params], names def apply_func(func, input_): if isinstance(input_, dict): return",
"of names to value. For example, to set the attribute obj.conv.weight, use _del_nested_attr(obj,",
"t, sh): if torch.is_tensor(t): return t.reshape(sh) elif isinstance(t, np.ndarray): return np.reshape(t, sh) else:",
"\"\"\" Reload a set of weights so that `mod` can be used again",
"'float32' else torch.float64 if isinstance(train_x, np.ndarray): train_x = torch.tensor(train_x, dtype=prec_, device=device) if isinstance(train_y,",
"Set the attribute specified by the given list of names to value. For",
"'shapes' in dir( self), 'You must first call get input to define the",
"dtype=prec_, device=device) def func(*new_params): load_weights(model, {k: v for k, v in zip(names, new_params)})",
"names = [] for name, p in list(mod.named_parameters()): if p.requires_grad: _del_nested_attr(mod, name.split(\".\")) names.append(name)",
"torch import nn, Tensor class TorchWrapper(BaseWrapper): def __init__(self, func, precision='float32', hvp_type='vhp', device='cpu'): self.func",
"tensors shapes.' input_var_ = self._unconcat(torch.tensor( input_var, dtype=self.precision, requires_grad=True, device=self.device), self.shapes) ctr_val = self._eval_ctr_func(input_var_)",
"if isinstance(vector_, list): vector_ = tuple(vector_) loss, vhp_res = self.hvp_func(self.func, input_var_, v=vector_) return",
"train_y: dataset used as ground truth input of the loss :type train_y: np.ndarray",
"using fixed inputs. def _del_nested_attr(obj: nn.Module, names: List[str]) -> None: \"\"\" Deletes the",
"t[i:j] else: raise NotImplementedError def torch_function_factory(model, loss, train_x, train_y, precision='float32', optimized_vars=None): \"\"\" A",
"function :param train_x: dataset used as input of the model :type train_x: np.ndarray",
"particular the goal is to be able to provide a function that takes",
"dict) else input_var_ grads = torch.autograd.grad(loss, input_var_grad) if isinstance(input_var_, dict): grads = {k:",
"vectorize=False) return hess.cpu().detach().numpy().astype(np.float64) def get_ctr_jac(self, input_var): assert 'shapes' in dir( self), 'You must",
"return t.reshape(sh) elif isinstance(t, np.ndarray): return np.reshape(t, sh) else: raise NotImplementedError def _tconcat(self,",
"the goal is to be able to provide a function that takes as",
"params: Dict[str, Tensor]) -> None: \"\"\" Reload a set of weights so that",
"t.reshape(sh) elif isinstance(t, np.ndarray): return np.reshape(t, sh) else: raise NotImplementedError def _tconcat(self, t_list,",
"input_var, dtype=self.precision, requires_grad=True, device=self.device), self.shapes) ctr_val = self._eval_ctr_func(input_var_) input_var_grad = input_var_.values() if isinstance(",
"of parameters) :rtype: tuple \"\"\" # named_params = {k: var.cpu().detach().numpy() for k, var",
"can have history) and so are left as Tensors. This means that mod.parameters()",
"= self._unconcat(torch.tensor( input_var, dtype=self.precision, requires_grad=True, device=self.device), self.shapes) ctr_val = self._eval_ctr_func(input_var_) input_var_grad = input_var_.values()",
"in zip(input_var_.keys(), grads)} return [loss.cpu().detach().numpy().astype(np.float64), self._concat(grads)[0].cpu().detach().numpy().astype(np.float64)] def get_hvp(self, input_var, vector): assert 'shapes' in",
"dtype=self.precision, device=self.device) def func(inp): return self._eval_func(self._unconcat(inp, self.shapes)) hess = hessian(func, input_var_, vectorize=False) return",
"= [p for p in mod.parameters() if p.requires_grad] # Remove all the parameters",
"ground truth input of the loss :type train_y: np.ndarray :return: (function of the",
"_set_nested_attr(obj: nn.Module, names: List[str], value: Tensor) -> None: \"\"\" Set the attribute specified",
"return torch.cat(t_list, dim) elif isinstance(t_list[0], np.ndarray): return np.concatenate(t_list, dim) else: raise NotImplementedError def",
"train_x = torch.tensor(train_x, dtype=prec_, device=device) if isinstance(train_y, np.ndarray): train_y = torch.tensor(train_y, dtype=prec_, device=device)",
"parameters) :rtype: tuple \"\"\" # named_params = {k: var.cpu().detach().numpy() for k, var in",
"parameters and evaluate the nn.Module using fixed inputs. def _del_nested_attr(obj: nn.Module, names: List[str])",
"self._unconcat(torch.tensor( input_var, dtype=self.precision, requires_grad=True, device=self.device), self.shapes) ctr_val = self._eval_ctr_func(input_var_) input_var_grad = input_var_.values() if",
"_del_nested_attr(obj: nn.Module, names: List[str]) -> None: \"\"\" Deletes the attribute specified by the",
"else input_var_ grads = torch.autograd.grad(ctr_val, input_var_grad) return grads.cpu().detach().numpy().astype(np.float64) def _reshape(self, t, sh): if",
"hvp_type='vhp', device='cpu'): self.func = func # Not very clean... if 'device' in dir(func):",
"nn, Tensor class TorchWrapper(BaseWrapper): def __init__(self, func, precision='float32', hvp_type='vhp', device='cpu'): self.func = func",
"List[str]) -> None: \"\"\" Deletes the attribute specified by the given list of",
"hvp if hvp_type == 'hvp' else vhp def get_value_and_grad(self, input_var): assert 'shapes' in",
"to define the tensors shapes.' input_var_ = torch.tensor( input_var, dtype=self.precision, device=self.device) def func(inp):",
"vector_ = tuple(vector_) loss, vhp_res = self.hvp_func(self.func, input_var_, v=vector_) return self._concat(vhp_res)[0].cpu().detach().numpy().astype(np.float64) def get_hess(self,",
"= torch.tensor( input_var, dtype=self.precision, device=self.device) def func(inp): return self._eval_func(self._unconcat(inp, self.shapes)) hess = hessian(func,",
"be re-loaded with `load_weights` before the model can be used again. Note that",
"self.shapes) vector_ = self._unconcat(torch.tensor( vector, dtype=self.precision, device=self.device), self.shapes) if isinstance(input_var_, dict): input_var_ =",
"else: raise ValueError self.hvp_func = hvp if hvp_type == 'hvp' else vhp def",
"loss, vhp_res = self.hvp_func(self.func, input_var_, v=vector_) return self._concat(vhp_res)[0].cpu().detach().numpy().astype(np.float64) def get_hess(self, input_var): assert 'shapes'",
"get_ctr_jac(self, input_var): assert 'shapes' in dir( self), 'You must first call get input",
"delete the attribute obj.conv.weight, use _del_nested_attr(obj, ['conv', 'weight']) \"\"\" if len(names) == 1:",
"value. For example, to set the attribute obj.conv.weight, use _del_nested_attr(obj, ['conv', 'weight'], value)",
"precision == 'float32': self.precision = torch.float32 elif precision == 'float64': self.precision = torch.float64",
"{k: v for k, v in zip(input_var_.keys(), grads)} return [loss.cpu().detach().numpy().astype(np.float64), self._concat(grads)[0].cpu().detach().numpy().astype(np.float64)] def get_hvp(self,",
"value: Tensor) -> None: \"\"\" Set the attribute specified by the given list",
"self._unconcat(torch.tensor( vector, dtype=self.precision, device=self.device), self.shapes) if isinstance(input_var_, dict): input_var_ = tuple(input_var_.values()) if isinstance(vector_,",
"import nn, Tensor class TorchWrapper(BaseWrapper): def __init__(self, func, precision='float32', hvp_type='vhp', device='cpu'): self.func =",
"removes all the Parameters from the model and return them as a tuple",
"names.append(name) # Make params regular Tensors instead of nn.Parameter params = tuple(p.detach().requires_grad_() for",
"isinstance(vector_, dict): vector_ = tuple(vector_.values()) if isinstance(input_var_, list): input_var_ = tuple(input_var_) if isinstance(vector_,",
"model.named_parameters()} params, names = extract_weights(model) device = params[0].device prec_ = torch.float32 if precision",
"hess = hessian(func, input_var_, vectorize=False) return hess.cpu().detach().numpy().astype(np.float64) def get_ctr_jac(self, input_var): assert 'shapes' in",
"isinstance(input_, dict): return func(**input_) elif isinstance(input_, list) or isinstance(input_, tuple): return func(*input_) else:",
"None: \"\"\" Reload a set of weights so that `mod` can be used",
"left as Tensors. This means that mod.parameters() will still be empty after this",
"of weights so that `mod` can be used again to perform a forward",
"self.device = func.device else: self.device = torch.device(device) if precision == 'float32': self.precision =",
"that the `params` are regular Tensors (that can have history) and so are",
"for k, var in model.named_parameters()} params, names = extract_weights(model) device = params[0].device prec_",
"tensors shapes.' input_var_ = torch.tensor( input_var, dtype=self.precision, device=self.device) def func(inp): return self._eval_func(self._unconcat(inp, self.shapes))",
"self.precision = torch.float32 elif precision == 'float64': self.precision = torch.float64 else: raise ValueError",
"define the tensors shapes.' input_var_ = self._unconcat(torch.tensor( input_var, dtype=self.precision, device=self.device), self.shapes) vector_ =",
"k, v in zip(names, new_params)}) out = apply_func(model, train_x) return loss(out, train_y) func.device",
"a function with signature loss_value = loss(pred_y, true_y). :type loss: function :param train_x:",
"model can be used again. Note that this function modifies the model in",
"that mod.parameters() will still be empty after this call. \"\"\" for name, p",
"Tensors instead of nn.Parameter params = tuple(p.detach().requires_grad_() for p in orig_params) return params,",
"return func(input_) # Adapted from https://github.com/pytorch/pytorch/blob/21c04b4438a766cd998fddb42247d4eb2e010f9a/benchmarks/functional_autograd_benchmark/functional_autograd_benchmark.py # Utilities to make nn.Module \"functional\" #",
"Union, Callable from torch import nn, Tensor class TorchWrapper(BaseWrapper): def __init__(self, func, precision='float32',",
"\"\"\" This function removes all the Parameters from the model and return them",
"model: torch.nn.Modle] :param loss: a function with signature loss_value = loss(pred_y, true_y). :type",
"tensors shapes.' input_var_ = self._unconcat(torch.tensor( input_var, dtype=self.precision, device=self.device), self.shapes) vector_ = self._unconcat(torch.tensor( vector,",
"Utilities to make nn.Module \"functional\" # In particular the goal is to be"
] |
[
"100 first.initialize() second.initialize() pi_1 = first.run() assert pi_1 != second.run() def test_myData_class(): assert",
"second.randomSeed = 100 first.initialize() second.initialize() pi_1 = first.run() assert pi_1 != second.run() def",
"import pytest from montecarlodlgapp import MonteCarloAppDrop, MyDataDROP given = pytest.mark.parametrize def test_myApp_class(): first",
"\"Hello from MyDataDROP\" def test_myData_dataURL(): assert MyDataDROP(\"a\", \"a\").dataURL == \"Hello from the dataURL",
"first.run() assert pi_1 != second.run() def test_myData_class(): assert MyDataDROP(\"a\", \"a\").getIO() == \"Hello from",
"test_myApp_class(): first = MonteCarloAppDrop(\"a\", \"a\") second = MonteCarloAppDrop(\"a\", \"a\") second.randomSeed = 100 first.initialize()",
"first = MonteCarloAppDrop(\"a\", \"a\") second = MonteCarloAppDrop(\"a\", \"a\") second.randomSeed = 100 first.initialize() second.initialize()",
"MonteCarloAppDrop, MyDataDROP given = pytest.mark.parametrize def test_myApp_class(): first = MonteCarloAppDrop(\"a\", \"a\") second =",
"\"a\") second.randomSeed = 100 first.initialize() second.initialize() pi_1 = first.run() assert pi_1 != second.run()",
"assert MyDataDROP(\"a\", \"a\").getIO() == \"Hello from MyDataDROP\" def test_myData_dataURL(): assert MyDataDROP(\"a\", \"a\").dataURL ==",
"pytest.mark.parametrize def test_myApp_class(): first = MonteCarloAppDrop(\"a\", \"a\") second = MonteCarloAppDrop(\"a\", \"a\") second.randomSeed =",
"\"a\") second = MonteCarloAppDrop(\"a\", \"a\") second.randomSeed = 100 first.initialize() second.initialize() pi_1 = first.run()",
"= 100 first.initialize() second.initialize() pi_1 = first.run() assert pi_1 != second.run() def test_myData_class():",
"!= second.run() def test_myData_class(): assert MyDataDROP(\"a\", \"a\").getIO() == \"Hello from MyDataDROP\" def test_myData_dataURL():",
"assert pi_1 != second.run() def test_myData_class(): assert MyDataDROP(\"a\", \"a\").getIO() == \"Hello from MyDataDROP\"",
"= MonteCarloAppDrop(\"a\", \"a\") second = MonteCarloAppDrop(\"a\", \"a\") second.randomSeed = 100 first.initialize() second.initialize() pi_1",
"second = MonteCarloAppDrop(\"a\", \"a\") second.randomSeed = 100 first.initialize() second.initialize() pi_1 = first.run() assert",
"from MyDataDROP\" def test_myData_dataURL(): assert MyDataDROP(\"a\", \"a\").dataURL == \"Hello from the dataURL method\"",
"== \"Hello from MyDataDROP\" def test_myData_dataURL(): assert MyDataDROP(\"a\", \"a\").dataURL == \"Hello from the",
"from montecarlodlgapp import MonteCarloAppDrop, MyDataDROP given = pytest.mark.parametrize def test_myApp_class(): first = MonteCarloAppDrop(\"a\",",
"= first.run() assert pi_1 != second.run() def test_myData_class(): assert MyDataDROP(\"a\", \"a\").getIO() == \"Hello",
"second.run() def test_myData_class(): assert MyDataDROP(\"a\", \"a\").getIO() == \"Hello from MyDataDROP\" def test_myData_dataURL(): assert",
"pi_1 = first.run() assert pi_1 != second.run() def test_myData_class(): assert MyDataDROP(\"a\", \"a\").getIO() ==",
"def test_myData_class(): assert MyDataDROP(\"a\", \"a\").getIO() == \"Hello from MyDataDROP\" def test_myData_dataURL(): assert MyDataDROP(\"a\",",
"def test_myApp_class(): first = MonteCarloAppDrop(\"a\", \"a\") second = MonteCarloAppDrop(\"a\", \"a\") second.randomSeed = 100",
"MonteCarloAppDrop(\"a\", \"a\") second = MonteCarloAppDrop(\"a\", \"a\") second.randomSeed = 100 first.initialize() second.initialize() pi_1 =",
"first.initialize() second.initialize() pi_1 = first.run() assert pi_1 != second.run() def test_myData_class(): assert MyDataDROP(\"a\",",
"\"a\").getIO() == \"Hello from MyDataDROP\" def test_myData_dataURL(): assert MyDataDROP(\"a\", \"a\").dataURL == \"Hello from",
"<gh_stars>0 import pytest from montecarlodlgapp import MonteCarloAppDrop, MyDataDROP given = pytest.mark.parametrize def test_myApp_class():",
"= pytest.mark.parametrize def test_myApp_class(): first = MonteCarloAppDrop(\"a\", \"a\") second = MonteCarloAppDrop(\"a\", \"a\") second.randomSeed",
"given = pytest.mark.parametrize def test_myApp_class(): first = MonteCarloAppDrop(\"a\", \"a\") second = MonteCarloAppDrop(\"a\", \"a\")",
"MyDataDROP given = pytest.mark.parametrize def test_myApp_class(): first = MonteCarloAppDrop(\"a\", \"a\") second = MonteCarloAppDrop(\"a\",",
"pytest from montecarlodlgapp import MonteCarloAppDrop, MyDataDROP given = pytest.mark.parametrize def test_myApp_class(): first =",
"pi_1 != second.run() def test_myData_class(): assert MyDataDROP(\"a\", \"a\").getIO() == \"Hello from MyDataDROP\" def",
"MyDataDROP(\"a\", \"a\").getIO() == \"Hello from MyDataDROP\" def test_myData_dataURL(): assert MyDataDROP(\"a\", \"a\").dataURL == \"Hello",
"montecarlodlgapp import MonteCarloAppDrop, MyDataDROP given = pytest.mark.parametrize def test_myApp_class(): first = MonteCarloAppDrop(\"a\", \"a\")",
"test_myData_class(): assert MyDataDROP(\"a\", \"a\").getIO() == \"Hello from MyDataDROP\" def test_myData_dataURL(): assert MyDataDROP(\"a\", \"a\").dataURL",
"MonteCarloAppDrop(\"a\", \"a\") second.randomSeed = 100 first.initialize() second.initialize() pi_1 = first.run() assert pi_1 !=",
"import MonteCarloAppDrop, MyDataDROP given = pytest.mark.parametrize def test_myApp_class(): first = MonteCarloAppDrop(\"a\", \"a\") second",
"= MonteCarloAppDrop(\"a\", \"a\") second.randomSeed = 100 first.initialize() second.initialize() pi_1 = first.run() assert pi_1",
"second.initialize() pi_1 = first.run() assert pi_1 != second.run() def test_myData_class(): assert MyDataDROP(\"a\", \"a\").getIO()"
] |
[
"**options): SmsGatewayFee.create_new(UnicelBackend.get_api_id(), INCOMING, 0.50, currency=Currency.objects.get(code=\"INR\")) SmsGatewayFee.create_new(UnicelBackend.get_api_id(), OUTGOING, 0.50, currency=Currency.objects.get(code=\"INR\")) logger.info(\"Updated Unicel gateway fees.\")",
"help = \"bootstrap Unicel gateway fees\" args = \"\" label = \"\" def",
"LabelCommand from corehq.apps.accounting.models import Currency from corehq.apps.sms.models import INCOMING, OUTGOING from corehq.apps.smsbillables.models import",
"\"bootstrap Unicel gateway fees\" args = \"\" label = \"\" def handle(self, *labels,",
"from corehq.apps.accounting.models import Currency from corehq.apps.sms.models import INCOMING, OUTGOING from corehq.apps.smsbillables.models import SmsGatewayFee",
"Command(LabelCommand): help = \"bootstrap Unicel gateway fees\" args = \"\" label = \"\"",
"= \"bootstrap Unicel gateway fees\" args = \"\" label = \"\" def handle(self,",
"handle(self, *labels, **options): SmsGatewayFee.create_new(UnicelBackend.get_api_id(), INCOMING, 0.50, currency=Currency.objects.get(code=\"INR\")) SmsGatewayFee.create_new(UnicelBackend.get_api_id(), OUTGOING, 0.50, currency=Currency.objects.get(code=\"INR\")) logger.info(\"Updated Unicel",
"corehq.apps.accounting.models import Currency from corehq.apps.sms.models import INCOMING, OUTGOING from corehq.apps.smsbillables.models import SmsGatewayFee from",
"\"\" label = \"\" def handle(self, *labels, **options): SmsGatewayFee.create_new(UnicelBackend.get_api_id(), INCOMING, 0.50, currency=Currency.objects.get(code=\"INR\")) SmsGatewayFee.create_new(UnicelBackend.get_api_id(),",
"= \"\" label = \"\" def handle(self, *labels, **options): SmsGatewayFee.create_new(UnicelBackend.get_api_id(), INCOMING, 0.50, currency=Currency.objects.get(code=\"INR\"))",
"= logging.getLogger('accounting') class Command(LabelCommand): help = \"bootstrap Unicel gateway fees\" args = \"\"",
"import Currency from corehq.apps.sms.models import INCOMING, OUTGOING from corehq.apps.smsbillables.models import SmsGatewayFee from corehq.apps.unicel.api",
"from django.core.management.base import LabelCommand from corehq.apps.accounting.models import Currency from corehq.apps.sms.models import INCOMING, OUTGOING",
"import UnicelBackend logger = logging.getLogger('accounting') class Command(LabelCommand): help = \"bootstrap Unicel gateway fees\"",
"label = \"\" def handle(self, *labels, **options): SmsGatewayFee.create_new(UnicelBackend.get_api_id(), INCOMING, 0.50, currency=Currency.objects.get(code=\"INR\")) SmsGatewayFee.create_new(UnicelBackend.get_api_id(), OUTGOING,",
"\"\" def handle(self, *labels, **options): SmsGatewayFee.create_new(UnicelBackend.get_api_id(), INCOMING, 0.50, currency=Currency.objects.get(code=\"INR\")) SmsGatewayFee.create_new(UnicelBackend.get_api_id(), OUTGOING, 0.50, currency=Currency.objects.get(code=\"INR\"))",
"*labels, **options): SmsGatewayFee.create_new(UnicelBackend.get_api_id(), INCOMING, 0.50, currency=Currency.objects.get(code=\"INR\")) SmsGatewayFee.create_new(UnicelBackend.get_api_id(), OUTGOING, 0.50, currency=Currency.objects.get(code=\"INR\")) logger.info(\"Updated Unicel gateway",
"logger = logging.getLogger('accounting') class Command(LabelCommand): help = \"bootstrap Unicel gateway fees\" args =",
"args = \"\" label = \"\" def handle(self, *labels, **options): SmsGatewayFee.create_new(UnicelBackend.get_api_id(), INCOMING, 0.50,",
"corehq.apps.smsbillables.models import SmsGatewayFee from corehq.apps.unicel.api import UnicelBackend logger = logging.getLogger('accounting') class Command(LabelCommand): help",
"import INCOMING, OUTGOING from corehq.apps.smsbillables.models import SmsGatewayFee from corehq.apps.unicel.api import UnicelBackend logger =",
"import LabelCommand from corehq.apps.accounting.models import Currency from corehq.apps.sms.models import INCOMING, OUTGOING from corehq.apps.smsbillables.models",
"from corehq.apps.sms.models import INCOMING, OUTGOING from corehq.apps.smsbillables.models import SmsGatewayFee from corehq.apps.unicel.api import UnicelBackend",
"Unicel gateway fees\" args = \"\" label = \"\" def handle(self, *labels, **options):",
"= \"\" def handle(self, *labels, **options): SmsGatewayFee.create_new(UnicelBackend.get_api_id(), INCOMING, 0.50, currency=Currency.objects.get(code=\"INR\")) SmsGatewayFee.create_new(UnicelBackend.get_api_id(), OUTGOING, 0.50,",
"corehq.apps.sms.models import INCOMING, OUTGOING from corehq.apps.smsbillables.models import SmsGatewayFee from corehq.apps.unicel.api import UnicelBackend logger",
"gateway fees\" args = \"\" label = \"\" def handle(self, *labels, **options): SmsGatewayFee.create_new(UnicelBackend.get_api_id(),",
"INCOMING, OUTGOING from corehq.apps.smsbillables.models import SmsGatewayFee from corehq.apps.unicel.api import UnicelBackend logger = logging.getLogger('accounting')",
"django.core.management.base import LabelCommand from corehq.apps.accounting.models import Currency from corehq.apps.sms.models import INCOMING, OUTGOING from",
"import logging from django.core.management.base import LabelCommand from corehq.apps.accounting.models import Currency from corehq.apps.sms.models import",
"OUTGOING from corehq.apps.smsbillables.models import SmsGatewayFee from corehq.apps.unicel.api import UnicelBackend logger = logging.getLogger('accounting') class",
"from corehq.apps.unicel.api import UnicelBackend logger = logging.getLogger('accounting') class Command(LabelCommand): help = \"bootstrap Unicel",
"fees\" args = \"\" label = \"\" def handle(self, *labels, **options): SmsGatewayFee.create_new(UnicelBackend.get_api_id(), INCOMING,",
"from corehq.apps.smsbillables.models import SmsGatewayFee from corehq.apps.unicel.api import UnicelBackend logger = logging.getLogger('accounting') class Command(LabelCommand):",
"SmsGatewayFee from corehq.apps.unicel.api import UnicelBackend logger = logging.getLogger('accounting') class Command(LabelCommand): help = \"bootstrap",
"import SmsGatewayFee from corehq.apps.unicel.api import UnicelBackend logger = logging.getLogger('accounting') class Command(LabelCommand): help =",
"UnicelBackend logger = logging.getLogger('accounting') class Command(LabelCommand): help = \"bootstrap Unicel gateway fees\" args",
"logging.getLogger('accounting') class Command(LabelCommand): help = \"bootstrap Unicel gateway fees\" args = \"\" label",
"Currency from corehq.apps.sms.models import INCOMING, OUTGOING from corehq.apps.smsbillables.models import SmsGatewayFee from corehq.apps.unicel.api import",
"class Command(LabelCommand): help = \"bootstrap Unicel gateway fees\" args = \"\" label =",
"def handle(self, *labels, **options): SmsGatewayFee.create_new(UnicelBackend.get_api_id(), INCOMING, 0.50, currency=Currency.objects.get(code=\"INR\")) SmsGatewayFee.create_new(UnicelBackend.get_api_id(), OUTGOING, 0.50, currency=Currency.objects.get(code=\"INR\")) logger.info(\"Updated",
"logging from django.core.management.base import LabelCommand from corehq.apps.accounting.models import Currency from corehq.apps.sms.models import INCOMING,",
"corehq.apps.unicel.api import UnicelBackend logger = logging.getLogger('accounting') class Command(LabelCommand): help = \"bootstrap Unicel gateway"
] |
[
".log_init import init_logging from .web import Web from ..core.playbooks.playbooks_event_handler_impl import PlaybooksEventHandlerImpl from ..",
"\"false\").lower() == \"true\": manhole.install(locals=dict(getmembers(robusta_api))) Web.init(event_handler) Web.run() # blocking loader.close() if __name__ == \"__main__\":",
"init_logging() registry = Registry() event_handler = PlaybooksEventHandlerImpl(registry) loader = ConfigLoader(registry, event_handler) if os.environ.get(\"ENABLE_MANHOLE\",",
"import api as robusta_api from .config_loader import ConfigLoader from ..model.config import Registry def",
"loader = ConfigLoader(registry, event_handler) if os.environ.get(\"ENABLE_MANHOLE\", \"false\").lower() == \"true\": manhole.install(locals=dict(getmembers(robusta_api))) Web.init(event_handler) Web.run() #",
"from inspect import getmembers import manhole from .log_init import init_logging from .web import",
"robusta_api from .config_loader import ConfigLoader from ..model.config import Registry def main(): init_logging() registry",
"os import os.path from inspect import getmembers import manhole from .log_init import init_logging",
"= Registry() event_handler = PlaybooksEventHandlerImpl(registry) loader = ConfigLoader(registry, event_handler) if os.environ.get(\"ENABLE_MANHOLE\", \"false\").lower() ==",
"from ..core.playbooks.playbooks_event_handler_impl import PlaybooksEventHandlerImpl from .. import api as robusta_api from .config_loader import",
"event_handler = PlaybooksEventHandlerImpl(registry) loader = ConfigLoader(registry, event_handler) if os.environ.get(\"ENABLE_MANHOLE\", \"false\").lower() == \"true\": manhole.install(locals=dict(getmembers(robusta_api)))",
"from ..model.config import Registry def main(): init_logging() registry = Registry() event_handler = PlaybooksEventHandlerImpl(registry)",
"from .. import api as robusta_api from .config_loader import ConfigLoader from ..model.config import",
"import os.path from inspect import getmembers import manhole from .log_init import init_logging from",
"ConfigLoader from ..model.config import Registry def main(): init_logging() registry = Registry() event_handler =",
"Web from ..core.playbooks.playbooks_event_handler_impl import PlaybooksEventHandlerImpl from .. import api as robusta_api from .config_loader",
".config_loader import ConfigLoader from ..model.config import Registry def main(): init_logging() registry = Registry()",
"os.path from inspect import getmembers import manhole from .log_init import init_logging from .web",
"..model.config import Registry def main(): init_logging() registry = Registry() event_handler = PlaybooksEventHandlerImpl(registry) loader",
"from .web import Web from ..core.playbooks.playbooks_event_handler_impl import PlaybooksEventHandlerImpl from .. import api as",
"Registry def main(): init_logging() registry = Registry() event_handler = PlaybooksEventHandlerImpl(registry) loader = ConfigLoader(registry,",
"PlaybooksEventHandlerImpl from .. import api as robusta_api from .config_loader import ConfigLoader from ..model.config",
"= ConfigLoader(registry, event_handler) if os.environ.get(\"ENABLE_MANHOLE\", \"false\").lower() == \"true\": manhole.install(locals=dict(getmembers(robusta_api))) Web.init(event_handler) Web.run() # blocking",
"= PlaybooksEventHandlerImpl(registry) loader = ConfigLoader(registry, event_handler) if os.environ.get(\"ENABLE_MANHOLE\", \"false\").lower() == \"true\": manhole.install(locals=dict(getmembers(robusta_api))) Web.init(event_handler)",
"import getmembers import manhole from .log_init import init_logging from .web import Web from",
"import Registry def main(): init_logging() registry = Registry() event_handler = PlaybooksEventHandlerImpl(registry) loader =",
".web import Web from ..core.playbooks.playbooks_event_handler_impl import PlaybooksEventHandlerImpl from .. import api as robusta_api",
"from .log_init import init_logging from .web import Web from ..core.playbooks.playbooks_event_handler_impl import PlaybooksEventHandlerImpl from",
"if os.environ.get(\"ENABLE_MANHOLE\", \"false\").lower() == \"true\": manhole.install(locals=dict(getmembers(robusta_api))) Web.init(event_handler) Web.run() # blocking loader.close() if __name__",
".. import api as robusta_api from .config_loader import ConfigLoader from ..model.config import Registry",
"as robusta_api from .config_loader import ConfigLoader from ..model.config import Registry def main(): init_logging()",
"== \"true\": manhole.install(locals=dict(getmembers(robusta_api))) Web.init(event_handler) Web.run() # blocking loader.close() if __name__ == \"__main__\": main()",
"def main(): init_logging() registry = Registry() event_handler = PlaybooksEventHandlerImpl(registry) loader = ConfigLoader(registry, event_handler)",
"init_logging from .web import Web from ..core.playbooks.playbooks_event_handler_impl import PlaybooksEventHandlerImpl from .. import api",
"registry = Registry() event_handler = PlaybooksEventHandlerImpl(registry) loader = ConfigLoader(registry, event_handler) if os.environ.get(\"ENABLE_MANHOLE\", \"false\").lower()",
"os.environ.get(\"ENABLE_MANHOLE\", \"false\").lower() == \"true\": manhole.install(locals=dict(getmembers(robusta_api))) Web.init(event_handler) Web.run() # blocking loader.close() if __name__ ==",
"main(): init_logging() registry = Registry() event_handler = PlaybooksEventHandlerImpl(registry) loader = ConfigLoader(registry, event_handler) if",
"from .config_loader import ConfigLoader from ..model.config import Registry def main(): init_logging() registry =",
"import init_logging from .web import Web from ..core.playbooks.playbooks_event_handler_impl import PlaybooksEventHandlerImpl from .. import",
"PlaybooksEventHandlerImpl(registry) loader = ConfigLoader(registry, event_handler) if os.environ.get(\"ENABLE_MANHOLE\", \"false\").lower() == \"true\": manhole.install(locals=dict(getmembers(robusta_api))) Web.init(event_handler) Web.run()",
"manhole from .log_init import init_logging from .web import Web from ..core.playbooks.playbooks_event_handler_impl import PlaybooksEventHandlerImpl",
"import Web from ..core.playbooks.playbooks_event_handler_impl import PlaybooksEventHandlerImpl from .. import api as robusta_api from",
"getmembers import manhole from .log_init import init_logging from .web import Web from ..core.playbooks.playbooks_event_handler_impl",
"import PlaybooksEventHandlerImpl from .. import api as robusta_api from .config_loader import ConfigLoader from",
"Registry() event_handler = PlaybooksEventHandlerImpl(registry) loader = ConfigLoader(registry, event_handler) if os.environ.get(\"ENABLE_MANHOLE\", \"false\").lower() == \"true\":",
"ConfigLoader(registry, event_handler) if os.environ.get(\"ENABLE_MANHOLE\", \"false\").lower() == \"true\": manhole.install(locals=dict(getmembers(robusta_api))) Web.init(event_handler) Web.run() # blocking loader.close()",
"event_handler) if os.environ.get(\"ENABLE_MANHOLE\", \"false\").lower() == \"true\": manhole.install(locals=dict(getmembers(robusta_api))) Web.init(event_handler) Web.run() # blocking loader.close() if",
"import os import os.path from inspect import getmembers import manhole from .log_init import",
"import manhole from .log_init import init_logging from .web import Web from ..core.playbooks.playbooks_event_handler_impl import",
"inspect import getmembers import manhole from .log_init import init_logging from .web import Web",
"import ConfigLoader from ..model.config import Registry def main(): init_logging() registry = Registry() event_handler",
"api as robusta_api from .config_loader import ConfigLoader from ..model.config import Registry def main():",
"..core.playbooks.playbooks_event_handler_impl import PlaybooksEventHandlerImpl from .. import api as robusta_api from .config_loader import ConfigLoader"
] |
[
"= IngredientSerializer queryset = Ingredient.objects.all() class RecipeViewSet(viewsets.ModelViewSet): \"\"\"Manage Recipe in the database\"\"\" serializer_class",
"authentication_classes = (TokenAuthentication,) permission_classes = (IsAuthenticated,) def get_queryset(self): \"\"\"Return objects for the current",
"Recipe in the database\"\"\" serializer_class = RecipeSerializer queryset = Recipe.objects.all() authentication_classes = (TokenAuthentication,)",
"database\"\"\" serializer_class = TagSerializer queryset = Tag.objects.all() class IngredientViewSet(BaseRecipeAttrViewSet): \"\"\"Manage ingredients in the",
"\"\"\"Retrieve the recipes for the authenticated user\"\"\" qs = super(RecipeViewSet, self).get_queryset() return qs.filter(user=self.request.user)",
"objects for the current authenticated user only\"\"\" qs = super(BaseRecipeAttrViewSet, self).get_queryset() return qs.filter(user=self.request.user).order_by('-name')",
"serializer_class = RecipeSerializer queryset = Recipe.objects.all() authentication_classes = (TokenAuthentication,) permission_classes = (IsAuthenticated,) #",
"mixins.CreateModelMixin): \"\"\"Base class for a user owned recipe attributes\"\"\" authentication_classes = (TokenAuthentication,) permission_classes",
"import TagSerializer, IngredientSerializer, RecipeSerializer from core.models import Tag, Ingredient, Recipe class BaseRecipeAttrViewSet(viewsets.GenericViewSet, mixins.ListModelMixin,",
"the current authenticated user only\"\"\" qs = super(BaseRecipeAttrViewSet, self).get_queryset() return qs.filter(user=self.request.user).order_by('-name') def perform_create(self,",
"def get_queryset(self): \"\"\"Retrieve the recipes for the authenticated user\"\"\" qs = super(RecipeViewSet, self).get_queryset()",
"Ingredient, Recipe class BaseRecipeAttrViewSet(viewsets.GenericViewSet, mixins.ListModelMixin, mixins.CreateModelMixin): \"\"\"Base class for a user owned recipe",
"import TokenAuthentication from rest_framework.permissions import IsAuthenticated from .serializers import TagSerializer, IngredientSerializer, RecipeSerializer from",
"= (TokenAuthentication,) permission_classes = (IsAuthenticated,) def get_queryset(self): \"\"\"Return objects for the current authenticated",
"serializer_class = TagSerializer queryset = Tag.objects.all() class IngredientViewSet(BaseRecipeAttrViewSet): \"\"\"Manage ingredients in the database\"\"\"",
"the database\"\"\" serializer_class = IngredientSerializer queryset = Ingredient.objects.all() class RecipeViewSet(viewsets.ModelViewSet): \"\"\"Manage Recipe in",
"TagSerializer, IngredientSerializer, RecipeSerializer from core.models import Tag, Ingredient, Recipe class BaseRecipeAttrViewSet(viewsets.GenericViewSet, mixins.ListModelMixin, mixins.CreateModelMixin):",
"(TokenAuthentication,) permission_classes = (IsAuthenticated,) # Must override because the old one order by",
"a new object\"\"\" serializer.save(user=self.request.user) class TagViewSet(BaseRecipeAttrViewSet): \"\"\"Manage tags in the database\"\"\" serializer_class =",
"permission_classes = (IsAuthenticated,) def get_queryset(self): \"\"\"Return objects for the current authenticated user only\"\"\"",
"class RecipeViewSet(viewsets.ModelViewSet): \"\"\"Manage Recipe in the database\"\"\" serializer_class = RecipeSerializer queryset = Recipe.objects.all()",
"= super(BaseRecipeAttrViewSet, self).get_queryset() return qs.filter(user=self.request.user).order_by('-name') def perform_create(self, serializer): \"\"\"Create a new object\"\"\" serializer.save(user=self.request.user)",
"one order by name def get_queryset(self): \"\"\"Retrieve the recipes for the authenticated user\"\"\"",
"permission_classes = (IsAuthenticated,) # Must override because the old one order by name",
"get_queryset(self): \"\"\"Retrieve the recipes for the authenticated user\"\"\" qs = super(RecipeViewSet, self).get_queryset() return",
"perform_create(self, serializer): \"\"\"Create a new object\"\"\" serializer.save(user=self.request.user) class TagViewSet(BaseRecipeAttrViewSet): \"\"\"Manage tags in the",
"\"\"\"Manage tags in the database\"\"\" serializer_class = TagSerializer queryset = Tag.objects.all() class IngredientViewSet(BaseRecipeAttrViewSet):",
"user owned recipe attributes\"\"\" authentication_classes = (TokenAuthentication,) permission_classes = (IsAuthenticated,) def get_queryset(self): \"\"\"Return",
"mixins.ListModelMixin, mixins.CreateModelMixin): \"\"\"Base class for a user owned recipe attributes\"\"\" authentication_classes = (TokenAuthentication,)",
"tags in the database\"\"\" serializer_class = TagSerializer queryset = Tag.objects.all() class IngredientViewSet(BaseRecipeAttrViewSet): \"\"\"Manage",
"because the old one order by name def get_queryset(self): \"\"\"Retrieve the recipes for",
"import IsAuthenticated from .serializers import TagSerializer, IngredientSerializer, RecipeSerializer from core.models import Tag, Ingredient,",
"for the current authenticated user only\"\"\" qs = super(BaseRecipeAttrViewSet, self).get_queryset() return qs.filter(user=self.request.user).order_by('-name') def",
"\"\"\"Base class for a user owned recipe attributes\"\"\" authentication_classes = (TokenAuthentication,) permission_classes =",
"IngredientSerializer, RecipeSerializer from core.models import Tag, Ingredient, Recipe class BaseRecipeAttrViewSet(viewsets.GenericViewSet, mixins.ListModelMixin, mixins.CreateModelMixin): \"\"\"Base",
"TagViewSet(BaseRecipeAttrViewSet): \"\"\"Manage tags in the database\"\"\" serializer_class = TagSerializer queryset = Tag.objects.all() class",
"= (IsAuthenticated,) # Must override because the old one order by name def",
"= Tag.objects.all() class IngredientViewSet(BaseRecipeAttrViewSet): \"\"\"Manage ingredients in the database\"\"\" serializer_class = IngredientSerializer queryset",
"in the database\"\"\" serializer_class = RecipeSerializer queryset = Recipe.objects.all() authentication_classes = (TokenAuthentication,) permission_classes",
"# Must override because the old one order by name def get_queryset(self): \"\"\"Retrieve",
"Tag.objects.all() class IngredientViewSet(BaseRecipeAttrViewSet): \"\"\"Manage ingredients in the database\"\"\" serializer_class = IngredientSerializer queryset =",
"IngredientViewSet(BaseRecipeAttrViewSet): \"\"\"Manage ingredients in the database\"\"\" serializer_class = IngredientSerializer queryset = Ingredient.objects.all() class",
"queryset = Recipe.objects.all() authentication_classes = (TokenAuthentication,) permission_classes = (IsAuthenticated,) # Must override because",
"serializer_class = IngredientSerializer queryset = Ingredient.objects.all() class RecipeViewSet(viewsets.ModelViewSet): \"\"\"Manage Recipe in the database\"\"\"",
"self).get_queryset() return qs.filter(user=self.request.user).order_by('-name') def perform_create(self, serializer): \"\"\"Create a new object\"\"\" serializer.save(user=self.request.user) class TagViewSet(BaseRecipeAttrViewSet):",
"\"\"\"Manage ingredients in the database\"\"\" serializer_class = IngredientSerializer queryset = Ingredient.objects.all() class RecipeViewSet(viewsets.ModelViewSet):",
"= (IsAuthenticated,) def get_queryset(self): \"\"\"Return objects for the current authenticated user only\"\"\" qs",
"import viewsets, mixins from rest_framework.authentication import TokenAuthentication from rest_framework.permissions import IsAuthenticated from .serializers",
"user only\"\"\" qs = super(BaseRecipeAttrViewSet, self).get_queryset() return qs.filter(user=self.request.user).order_by('-name') def perform_create(self, serializer): \"\"\"Create a",
"rest_framework import viewsets, mixins from rest_framework.authentication import TokenAuthentication from rest_framework.permissions import IsAuthenticated from",
"= TagSerializer queryset = Tag.objects.all() class IngredientViewSet(BaseRecipeAttrViewSet): \"\"\"Manage ingredients in the database\"\"\" serializer_class",
"from rest_framework.permissions import IsAuthenticated from .serializers import TagSerializer, IngredientSerializer, RecipeSerializer from core.models import",
"import Tag, Ingredient, Recipe class BaseRecipeAttrViewSet(viewsets.GenericViewSet, mixins.ListModelMixin, mixins.CreateModelMixin): \"\"\"Base class for a user",
"ingredients in the database\"\"\" serializer_class = IngredientSerializer queryset = Ingredient.objects.all() class RecipeViewSet(viewsets.ModelViewSet): \"\"\"Manage",
"= RecipeSerializer queryset = Recipe.objects.all() authentication_classes = (TokenAuthentication,) permission_classes = (IsAuthenticated,) # Must",
"rest_framework.authentication import TokenAuthentication from rest_framework.permissions import IsAuthenticated from .serializers import TagSerializer, IngredientSerializer, RecipeSerializer",
"super(BaseRecipeAttrViewSet, self).get_queryset() return qs.filter(user=self.request.user).order_by('-name') def perform_create(self, serializer): \"\"\"Create a new object\"\"\" serializer.save(user=self.request.user) class",
"(TokenAuthentication,) permission_classes = (IsAuthenticated,) def get_queryset(self): \"\"\"Return objects for the current authenticated user",
".serializers import TagSerializer, IngredientSerializer, RecipeSerializer from core.models import Tag, Ingredient, Recipe class BaseRecipeAttrViewSet(viewsets.GenericViewSet,",
"TagSerializer queryset = Tag.objects.all() class IngredientViewSet(BaseRecipeAttrViewSet): \"\"\"Manage ingredients in the database\"\"\" serializer_class =",
"rest_framework.permissions import IsAuthenticated from .serializers import TagSerializer, IngredientSerializer, RecipeSerializer from core.models import Tag,",
"Tag, Ingredient, Recipe class BaseRecipeAttrViewSet(viewsets.GenericViewSet, mixins.ListModelMixin, mixins.CreateModelMixin): \"\"\"Base class for a user owned",
"in the database\"\"\" serializer_class = TagSerializer queryset = Tag.objects.all() class IngredientViewSet(BaseRecipeAttrViewSet): \"\"\"Manage ingredients",
"return qs.filter(user=self.request.user).order_by('-name') def perform_create(self, serializer): \"\"\"Create a new object\"\"\" serializer.save(user=self.request.user) class TagViewSet(BaseRecipeAttrViewSet): \"\"\"Manage",
"in the database\"\"\" serializer_class = IngredientSerializer queryset = Ingredient.objects.all() class RecipeViewSet(viewsets.ModelViewSet): \"\"\"Manage Recipe",
"queryset = Ingredient.objects.all() class RecipeViewSet(viewsets.ModelViewSet): \"\"\"Manage Recipe in the database\"\"\" serializer_class = RecipeSerializer",
"= Recipe.objects.all() authentication_classes = (TokenAuthentication,) permission_classes = (IsAuthenticated,) # Must override because the",
"= (TokenAuthentication,) permission_classes = (IsAuthenticated,) # Must override because the old one order",
"override because the old one order by name def get_queryset(self): \"\"\"Retrieve the recipes",
"class BaseRecipeAttrViewSet(viewsets.GenericViewSet, mixins.ListModelMixin, mixins.CreateModelMixin): \"\"\"Base class for a user owned recipe attributes\"\"\" authentication_classes",
"database\"\"\" serializer_class = IngredientSerializer queryset = Ingredient.objects.all() class RecipeViewSet(viewsets.ModelViewSet): \"\"\"Manage Recipe in the",
"the database\"\"\" serializer_class = RecipeSerializer queryset = Recipe.objects.all() authentication_classes = (TokenAuthentication,) permission_classes =",
"order by name def get_queryset(self): \"\"\"Retrieve the recipes for the authenticated user\"\"\" qs",
"TokenAuthentication from rest_framework.permissions import IsAuthenticated from .serializers import TagSerializer, IngredientSerializer, RecipeSerializer from core.models",
"only\"\"\" qs = super(BaseRecipeAttrViewSet, self).get_queryset() return qs.filter(user=self.request.user).order_by('-name') def perform_create(self, serializer): \"\"\"Create a new",
"get_queryset(self): \"\"\"Return objects for the current authenticated user only\"\"\" qs = super(BaseRecipeAttrViewSet, self).get_queryset()",
"mixins from rest_framework.authentication import TokenAuthentication from rest_framework.permissions import IsAuthenticated from .serializers import TagSerializer,",
"IngredientSerializer queryset = Ingredient.objects.all() class RecipeViewSet(viewsets.ModelViewSet): \"\"\"Manage Recipe in the database\"\"\" serializer_class =",
"the old one order by name def get_queryset(self): \"\"\"Retrieve the recipes for the",
"from rest_framework.authentication import TokenAuthentication from rest_framework.permissions import IsAuthenticated from .serializers import TagSerializer, IngredientSerializer,",
"IsAuthenticated from .serializers import TagSerializer, IngredientSerializer, RecipeSerializer from core.models import Tag, Ingredient, Recipe",
"old one order by name def get_queryset(self): \"\"\"Retrieve the recipes for the authenticated",
"serializer.save(user=self.request.user) class TagViewSet(BaseRecipeAttrViewSet): \"\"\"Manage tags in the database\"\"\" serializer_class = TagSerializer queryset =",
"(IsAuthenticated,) # Must override because the old one order by name def get_queryset(self):",
"name def get_queryset(self): \"\"\"Retrieve the recipes for the authenticated user\"\"\" qs = super(RecipeViewSet,",
"class for a user owned recipe attributes\"\"\" authentication_classes = (TokenAuthentication,) permission_classes = (IsAuthenticated,)",
"a user owned recipe attributes\"\"\" authentication_classes = (TokenAuthentication,) permission_classes = (IsAuthenticated,) def get_queryset(self):",
"\"\"\"Return objects for the current authenticated user only\"\"\" qs = super(BaseRecipeAttrViewSet, self).get_queryset() return",
"attributes\"\"\" authentication_classes = (TokenAuthentication,) permission_classes = (IsAuthenticated,) def get_queryset(self): \"\"\"Return objects for the",
"from rest_framework import viewsets, mixins from rest_framework.authentication import TokenAuthentication from rest_framework.permissions import IsAuthenticated",
"qs = super(BaseRecipeAttrViewSet, self).get_queryset() return qs.filter(user=self.request.user).order_by('-name') def perform_create(self, serializer): \"\"\"Create a new object\"\"\"",
"owned recipe attributes\"\"\" authentication_classes = (TokenAuthentication,) permission_classes = (IsAuthenticated,) def get_queryset(self): \"\"\"Return objects",
"current authenticated user only\"\"\" qs = super(BaseRecipeAttrViewSet, self).get_queryset() return qs.filter(user=self.request.user).order_by('-name') def perform_create(self, serializer):",
"class IngredientViewSet(BaseRecipeAttrViewSet): \"\"\"Manage ingredients in the database\"\"\" serializer_class = IngredientSerializer queryset = Ingredient.objects.all()",
"qs.filter(user=self.request.user).order_by('-name') def perform_create(self, serializer): \"\"\"Create a new object\"\"\" serializer.save(user=self.request.user) class TagViewSet(BaseRecipeAttrViewSet): \"\"\"Manage tags",
"Ingredient.objects.all() class RecipeViewSet(viewsets.ModelViewSet): \"\"\"Manage Recipe in the database\"\"\" serializer_class = RecipeSerializer queryset =",
"database\"\"\" serializer_class = RecipeSerializer queryset = Recipe.objects.all() authentication_classes = (TokenAuthentication,) permission_classes = (IsAuthenticated,)",
"queryset = Tag.objects.all() class IngredientViewSet(BaseRecipeAttrViewSet): \"\"\"Manage ingredients in the database\"\"\" serializer_class = IngredientSerializer",
"object\"\"\" serializer.save(user=self.request.user) class TagViewSet(BaseRecipeAttrViewSet): \"\"\"Manage tags in the database\"\"\" serializer_class = TagSerializer queryset",
"\"\"\"Manage Recipe in the database\"\"\" serializer_class = RecipeSerializer queryset = Recipe.objects.all() authentication_classes =",
"serializer): \"\"\"Create a new object\"\"\" serializer.save(user=self.request.user) class TagViewSet(BaseRecipeAttrViewSet): \"\"\"Manage tags in the database\"\"\"",
"\"\"\"Create a new object\"\"\" serializer.save(user=self.request.user) class TagViewSet(BaseRecipeAttrViewSet): \"\"\"Manage tags in the database\"\"\" serializer_class",
"def perform_create(self, serializer): \"\"\"Create a new object\"\"\" serializer.save(user=self.request.user) class TagViewSet(BaseRecipeAttrViewSet): \"\"\"Manage tags in",
"Must override because the old one order by name def get_queryset(self): \"\"\"Retrieve the",
"viewsets, mixins from rest_framework.authentication import TokenAuthentication from rest_framework.permissions import IsAuthenticated from .serializers import",
"new object\"\"\" serializer.save(user=self.request.user) class TagViewSet(BaseRecipeAttrViewSet): \"\"\"Manage tags in the database\"\"\" serializer_class = TagSerializer",
"from core.models import Tag, Ingredient, Recipe class BaseRecipeAttrViewSet(viewsets.GenericViewSet, mixins.ListModelMixin, mixins.CreateModelMixin): \"\"\"Base class for",
"for a user owned recipe attributes\"\"\" authentication_classes = (TokenAuthentication,) permission_classes = (IsAuthenticated,) def",
"by name def get_queryset(self): \"\"\"Retrieve the recipes for the authenticated user\"\"\" qs =",
"recipe attributes\"\"\" authentication_classes = (TokenAuthentication,) permission_classes = (IsAuthenticated,) def get_queryset(self): \"\"\"Return objects for",
"BaseRecipeAttrViewSet(viewsets.GenericViewSet, mixins.ListModelMixin, mixins.CreateModelMixin): \"\"\"Base class for a user owned recipe attributes\"\"\" authentication_classes =",
"core.models import Tag, Ingredient, Recipe class BaseRecipeAttrViewSet(viewsets.GenericViewSet, mixins.ListModelMixin, mixins.CreateModelMixin): \"\"\"Base class for a",
"Recipe class BaseRecipeAttrViewSet(viewsets.GenericViewSet, mixins.ListModelMixin, mixins.CreateModelMixin): \"\"\"Base class for a user owned recipe attributes\"\"\"",
"RecipeSerializer from core.models import Tag, Ingredient, Recipe class BaseRecipeAttrViewSet(viewsets.GenericViewSet, mixins.ListModelMixin, mixins.CreateModelMixin): \"\"\"Base class",
"the database\"\"\" serializer_class = TagSerializer queryset = Tag.objects.all() class IngredientViewSet(BaseRecipeAttrViewSet): \"\"\"Manage ingredients in",
"class TagViewSet(BaseRecipeAttrViewSet): \"\"\"Manage tags in the database\"\"\" serializer_class = TagSerializer queryset = Tag.objects.all()",
"Recipe.objects.all() authentication_classes = (TokenAuthentication,) permission_classes = (IsAuthenticated,) # Must override because the old",
"= Ingredient.objects.all() class RecipeViewSet(viewsets.ModelViewSet): \"\"\"Manage Recipe in the database\"\"\" serializer_class = RecipeSerializer queryset",
"authentication_classes = (TokenAuthentication,) permission_classes = (IsAuthenticated,) # Must override because the old one",
"(IsAuthenticated,) def get_queryset(self): \"\"\"Return objects for the current authenticated user only\"\"\" qs =",
"def get_queryset(self): \"\"\"Return objects for the current authenticated user only\"\"\" qs = super(BaseRecipeAttrViewSet,",
"authenticated user only\"\"\" qs = super(BaseRecipeAttrViewSet, self).get_queryset() return qs.filter(user=self.request.user).order_by('-name') def perform_create(self, serializer): \"\"\"Create",
"RecipeViewSet(viewsets.ModelViewSet): \"\"\"Manage Recipe in the database\"\"\" serializer_class = RecipeSerializer queryset = Recipe.objects.all() authentication_classes",
"RecipeSerializer queryset = Recipe.objects.all() authentication_classes = (TokenAuthentication,) permission_classes = (IsAuthenticated,) # Must override",
"from .serializers import TagSerializer, IngredientSerializer, RecipeSerializer from core.models import Tag, Ingredient, Recipe class"
] |
[
"Account from protean.context import context from protean_flask.core.serializers import EntitySerializer from protean_flask.core.serializers import ma",
"= ma.fields.Integer() class Meta: entity = Account fields = ('id', 'name', 'username', 'email',",
"obj): \"\"\" Return the current logged in user \"\"\" if context.account: return context.account.id",
"\"\"\" Serializer for Account Entity\"\"\" id = ma.fields.Integer() class Meta: entity = Account",
"= Account fields = ('id', 'name', 'username', 'email', 'title', 'phone', 'timezone', 'is_locked', 'is_active',",
"for Human Entity\"\"\" current_account = ma.fields.Method('get_current_account') def get_current_account(self, obj): \"\"\" Return the current",
"Return the current logged in user \"\"\" if context.account: return context.account.id else: return",
"import Human class AccountSerializer(EntitySerializer): \"\"\" Serializer for Account Entity\"\"\" id = ma.fields.Integer() class",
"entity = Account fields = ('id', 'name', 'username', 'email', 'title', 'phone', 'timezone', 'is_locked',",
"= ('id', 'name', 'username', 'email', 'title', 'phone', 'timezone', 'is_locked', 'is_active', 'is_verified') class HumanSerializer(EntitySerializer):",
"authentic.entities import Account from protean.context import context from protean_flask.core.serializers import EntitySerializer from protean_flask.core.serializers",
"'is_locked', 'is_active', 'is_verified') class HumanSerializer(EntitySerializer): \"\"\" Serializer for Human Entity\"\"\" current_account = ma.fields.Method('get_current_account')",
"protean_flask.core.serializers import ma from .entities import Human class AccountSerializer(EntitySerializer): \"\"\" Serializer for Account",
"'email', 'title', 'phone', 'timezone', 'is_locked', 'is_active', 'is_verified') class HumanSerializer(EntitySerializer): \"\"\" Serializer for Human",
"Serializer for Account Entity\"\"\" id = ma.fields.Integer() class Meta: entity = Account fields",
"the current logged in user \"\"\" if context.account: return context.account.id else: return None",
"ma.fields.Integer() class Meta: entity = Account fields = ('id', 'name', 'username', 'email', 'title',",
"protean.context import context from protean_flask.core.serializers import EntitySerializer from protean_flask.core.serializers import ma from .entities",
"in user \"\"\" if context.account: return context.account.id else: return None class Meta: entity",
"the sample app \"\"\" from authentic.entities import Account from protean.context import context from",
"import ma from .entities import Human class AccountSerializer(EntitySerializer): \"\"\" Serializer for Account Entity\"\"\"",
"from protean_flask.core.serializers import EntitySerializer from protean_flask.core.serializers import ma from .entities import Human class",
"Entity\"\"\" id = ma.fields.Integer() class Meta: entity = Account fields = ('id', 'name',",
"from protean.context import context from protean_flask.core.serializers import EntitySerializer from protean_flask.core.serializers import ma from",
"Meta: entity = Account fields = ('id', 'name', 'username', 'email', 'title', 'phone', 'timezone',",
"'is_verified') class HumanSerializer(EntitySerializer): \"\"\" Serializer for Human Entity\"\"\" current_account = ma.fields.Method('get_current_account') def get_current_account(self,",
"class Meta: entity = Account fields = ('id', 'name', 'username', 'email', 'title', 'phone',",
"ma.fields.Method('get_current_account') def get_current_account(self, obj): \"\"\" Return the current logged in user \"\"\" if",
"Human Entity\"\"\" current_account = ma.fields.Method('get_current_account') def get_current_account(self, obj): \"\"\" Return the current logged",
"import EntitySerializer from protean_flask.core.serializers import ma from .entities import Human class AccountSerializer(EntitySerializer): \"\"\"",
".entities import Human class AccountSerializer(EntitySerializer): \"\"\" Serializer for Account Entity\"\"\" id = ma.fields.Integer()",
"import context from protean_flask.core.serializers import EntitySerializer from protean_flask.core.serializers import ma from .entities import",
"import Account from protean.context import context from protean_flask.core.serializers import EntitySerializer from protean_flask.core.serializers import",
"ma from .entities import Human class AccountSerializer(EntitySerializer): \"\"\" Serializer for Account Entity\"\"\" id",
"'name', 'username', 'email', 'title', 'phone', 'timezone', 'is_locked', 'is_active', 'is_verified') class HumanSerializer(EntitySerializer): \"\"\" Serializer",
"'phone', 'timezone', 'is_locked', 'is_active', 'is_verified') class HumanSerializer(EntitySerializer): \"\"\" Serializer for Human Entity\"\"\" current_account",
"current logged in user \"\"\" if context.account: return context.account.id else: return None class",
"'title', 'phone', 'timezone', 'is_locked', 'is_active', 'is_verified') class HumanSerializer(EntitySerializer): \"\"\" Serializer for Human Entity\"\"\"",
"logged in user \"\"\" if context.account: return context.account.id else: return None class Meta:",
"get_current_account(self, obj): \"\"\" Return the current logged in user \"\"\" if context.account: return",
"\"\"\" if context.account: return context.account.id else: return None class Meta: entity = Human",
"fields = ('id', 'name', 'username', 'email', 'title', 'phone', 'timezone', 'is_locked', 'is_active', 'is_verified') class",
"from protean_flask.core.serializers import ma from .entities import Human class AccountSerializer(EntitySerializer): \"\"\" Serializer for",
"class AccountSerializer(EntitySerializer): \"\"\" Serializer for Account Entity\"\"\" id = ma.fields.Integer() class Meta: entity",
"from authentic.entities import Account from protean.context import context from protean_flask.core.serializers import EntitySerializer from",
"sample app \"\"\" from authentic.entities import Account from protean.context import context from protean_flask.core.serializers",
"from .entities import Human class AccountSerializer(EntitySerializer): \"\"\" Serializer for Account Entity\"\"\" id =",
"'timezone', 'is_locked', 'is_active', 'is_verified') class HumanSerializer(EntitySerializer): \"\"\" Serializer for Human Entity\"\"\" current_account =",
"used by the sample app \"\"\" from authentic.entities import Account from protean.context import",
"current_account = ma.fields.Method('get_current_account') def get_current_account(self, obj): \"\"\" Return the current logged in user",
"user \"\"\" if context.account: return context.account.id else: return None class Meta: entity =",
"\"\"\" Serializers used by the sample app \"\"\" from authentic.entities import Account from",
"\"\"\" Return the current logged in user \"\"\" if context.account: return context.account.id else:",
"Human class AccountSerializer(EntitySerializer): \"\"\" Serializer for Account Entity\"\"\" id = ma.fields.Integer() class Meta:",
"\"\"\" from authentic.entities import Account from protean.context import context from protean_flask.core.serializers import EntitySerializer",
"= ma.fields.Method('get_current_account') def get_current_account(self, obj): \"\"\" Return the current logged in user \"\"\"",
"protean_flask.core.serializers import EntitySerializer from protean_flask.core.serializers import ma from .entities import Human class AccountSerializer(EntitySerializer):",
"by the sample app \"\"\" from authentic.entities import Account from protean.context import context",
"'is_active', 'is_verified') class HumanSerializer(EntitySerializer): \"\"\" Serializer for Human Entity\"\"\" current_account = ma.fields.Method('get_current_account') def",
"class HumanSerializer(EntitySerializer): \"\"\" Serializer for Human Entity\"\"\" current_account = ma.fields.Method('get_current_account') def get_current_account(self, obj):",
"app \"\"\" from authentic.entities import Account from protean.context import context from protean_flask.core.serializers import",
"EntitySerializer from protean_flask.core.serializers import ma from .entities import Human class AccountSerializer(EntitySerializer): \"\"\" Serializer",
"Entity\"\"\" current_account = ma.fields.Method('get_current_account') def get_current_account(self, obj): \"\"\" Return the current logged in",
"Serializer for Human Entity\"\"\" current_account = ma.fields.Method('get_current_account') def get_current_account(self, obj): \"\"\" Return the",
"id = ma.fields.Integer() class Meta: entity = Account fields = ('id', 'name', 'username',",
"\"\"\" Serializer for Human Entity\"\"\" current_account = ma.fields.Method('get_current_account') def get_current_account(self, obj): \"\"\" Return",
"('id', 'name', 'username', 'email', 'title', 'phone', 'timezone', 'is_locked', 'is_active', 'is_verified') class HumanSerializer(EntitySerializer): \"\"\"",
"HumanSerializer(EntitySerializer): \"\"\" Serializer for Human Entity\"\"\" current_account = ma.fields.Method('get_current_account') def get_current_account(self, obj): \"\"\"",
"for Account Entity\"\"\" id = ma.fields.Integer() class Meta: entity = Account fields =",
"def get_current_account(self, obj): \"\"\" Return the current logged in user \"\"\" if context.account:",
"'username', 'email', 'title', 'phone', 'timezone', 'is_locked', 'is_active', 'is_verified') class HumanSerializer(EntitySerializer): \"\"\" Serializer for",
"Account Entity\"\"\" id = ma.fields.Integer() class Meta: entity = Account fields = ('id',",
"Serializers used by the sample app \"\"\" from authentic.entities import Account from protean.context",
"context from protean_flask.core.serializers import EntitySerializer from protean_flask.core.serializers import ma from .entities import Human",
"Account fields = ('id', 'name', 'username', 'email', 'title', 'phone', 'timezone', 'is_locked', 'is_active', 'is_verified')",
"AccountSerializer(EntitySerializer): \"\"\" Serializer for Account Entity\"\"\" id = ma.fields.Integer() class Meta: entity ="
] |
[
"def main(): # logging.info(\"Collecting lang/locale count, per tag, for the given hashtags: {0}\".format('",
"the collection.\".format(clean_collection)) x = user_col.insert_many(user_locale_list) return(len(x.inserted_ids)) # def group_tweets_by_tag(api, db_connection, hashtags_list): # print()",
"tweepy import pymongo import json # TWITTER PARAMS HASHTAGS_LIST = configs.HASHTAGS_LIST # MONGODB",
"configs.twitter_auth() # mongodb_connection = configs.mongodb_connect() # user_locale_list = get_user_locale_info(api_auth, mongodb_connection, MONGO_COL_TWEETS) # insert_user_locale_info(api_auth,",
"[] insert_ids = [] for user in user_list: logging.info(\"looking up for user {0}\".format(user['user']))",
"# api_auth = configs.twitter_auth() # mongodb_connection = configs.mongodb_connect() # user_locale_list = get_user_locale_info(api_auth, mongodb_connection,",
"'lang':1}) for user in user_list: user_data['hashtag'] = user['hashtag'] user_data['user'] = user['user'] user_data['lang'] =",
"hashtags: {0}\".format(' '.join(HASHTAGS_LIST))) # api_auth = configs.twitter_auth() # mongodb_connection = configs.mongodb_connect() # user_locale_list",
"mongo_col_user, user_locale_list): user_col = db_connection[mongo_col_user] count_documents = user_col.count() if not count_documents == 0:",
"= user['hashtag'] user_data['user'] = user['user'] user_data['lang'] = user['lang'] user_data_list.append(user_data) user_data = {} user_data",
"MONGO_COL_TWEETS) # insert_user_locale_info(api_auth, mongodb_connection, MONGO_COL_USER, user_locale_list) # tweets_by_tag = group_tweets_by_tag(api_auth, mongodb_connection, MONGO_COL_TWEETS, MONGO_COL_TTAGS,",
"#### NOT WORKING import logging import configs import tweepy import pymongo import json",
"print() # def get_locale_by_tag(api, db_connection, hashtags_list): # print() #### NOT WORKING # def",
"mongodb_connection, MONGO_COL_TWEETS, MONGO_COL_LOCALE, HASHTAGS_LIST) # logging.info(\"Lang/Locale count per tag stored into the collection",
"tag, for the given hashtags: {0}\".format(' '.join(HASHTAGS_LIST))) # api_auth = configs.twitter_auth() # mongodb_connection",
"hashtags_list): # print() # def get_locale_by_tag(api, db_connection, hashtags_list): # print() #### NOT WORKING",
"per tag, for the given hashtags: {0}\".format(' '.join(HASHTAGS_LIST))) # api_auth = configs.twitter_auth() #",
"get_user_info\") filtered_user_list = [] insert_ids = [] for user in user_list: logging.info(\"looking up",
"for the given hashtags: {0}\".format(' '.join(HASHTAGS_LIST))) # api_auth = configs.twitter_auth() # mongodb_connection =",
"user_data_list user_locale_data = get_user_info(api, db_connection, mongo_col_tweets, user_data) return(user_locale_data) def get_user_info(api, db_connection, mongo_col_user, user_list):",
"user_col = db_connection[mongo_col_user] count_documents = user_col.count() if not count_documents == 0: logging.info(\"Collection \\\"{0}\\\"",
"= user_col.insert_many(user_locale_list) return(len(x.inserted_ids)) # def group_tweets_by_tag(api, db_connection, hashtags_list): # print() # def get_locale_by_tag(api,",
"user_locale_list) # tweets_by_tag = group_tweets_by_tag(api_auth, mongodb_connection, MONGO_COL_TWEETS, MONGO_COL_TTAGS, HASHTAGS_LIST) # locale_by_tag = get_locale_by_tag(api_auth,",
"MONGO_COL_LOCALE, HASHTAGS_LIST) # logging.info(\"Lang/Locale count per tag stored into the collection \\\"{0}\\\"\".format(MONGO_COL_LOCALE)) #",
"user_data) return(user_locale_data) def get_user_info(api, db_connection, mongo_col_user, user_list): user_col = db_connection[mongo_col_user] logging.info(\"entrando na funcao",
"configs.mongodb_connect() # user_locale_list = get_user_locale_info(api_auth, mongodb_connection, MONGO_COL_TWEETS) # insert_user_locale_info(api_auth, mongodb_connection, MONGO_COL_USER, user_locale_list) #",
"[] user_data = {} user_list = tweet_col.find({}, {'hashtag':1, 'user':1, 'lang':1}) for user in",
"user_locale_list): user_col = db_connection[mongo_col_user] count_documents = user_col.count() if not count_documents == 0: logging.info(\"Collection",
"= configs.twitter_auth() # mongodb_connection = configs.mongodb_connect() # user_locale_list = get_user_locale_info(api_auth, mongodb_connection, MONGO_COL_TWEETS) #",
"# mongodb_connection = configs.mongodb_connect() # user_locale_list = get_user_locale_info(api_auth, mongodb_connection, MONGO_COL_TWEETS) # insert_user_locale_info(api_auth, mongodb_connection,",
"count per tag stored into the collection \\\"{0}\\\"\".format(MONGO_COL_LOCALE)) # if __name__ == \"__main__\":",
"get_user_locale_info(api_auth, mongodb_connection, MONGO_COL_TWEETS) # insert_user_locale_info(api_auth, mongodb_connection, MONGO_COL_USER, user_locale_list) # tweets_by_tag = group_tweets_by_tag(api_auth, mongodb_connection,",
"{0}\".format(' '.join(HASHTAGS_LIST))) # api_auth = configs.twitter_auth() # mongodb_connection = configs.mongodb_connect() # user_locale_list =",
"user_data_list.append(user_data) user_data = {} user_data = user_data_list user_locale_data = get_user_info(api, db_connection, mongo_col_tweets, user_data)",
"'name': user_raw_json['name'], 'lang': user['lang'], 'location': user_raw_json['location']} # x = user_col.insert_many(user_locale_list) insert_ids.append(x.insert_ids) return(filtered_user_list) def",
"na funcao get_user_locale_info\") tweet_col = db_connection[mongo_col] user_data_list = [] user_data = {} user_list",
"== 0: logging.info(\"Collection \\\"{0}\\\" is not empty. Performing cleanup\".format(mongo_col_user)) clean_collection = configs.cleanup_collection(db_connection, mongo_col_user)",
"# def group_tweets_by_tag(api, db_connection, hashtags_list): # print() # def get_locale_by_tag(api, db_connection, hashtags_list): #",
"def group_tweets_by_tag(api, db_connection, hashtags_list): # print() # def get_locale_by_tag(api, db_connection, hashtags_list): # print()",
"were deleted from the collection.\".format(clean_collection)) x = user_col.insert_many(user_locale_list) return(len(x.inserted_ids)) # def group_tweets_by_tag(api, db_connection,",
"{} user_list = tweet_col.find({}, {'hashtag':1, 'user':1, 'lang':1}) for user in user_list: user_data['hashtag'] =",
"user_list: logging.info(\"looking up for user {0}\".format(user['user'])) user_raw = api.get_user(screen_name=user['user']) user_raw_json = user_raw._json user_filtered",
"count, per tag, for the given hashtags: {0}\".format(' '.join(HASHTAGS_LIST))) # api_auth = configs.twitter_auth()",
"tag stored into the collection \\\"{0}\\\"\".format(MONGO_COL_LOCALE)) # if __name__ == \"__main__\": # configs.logging_basic_config()",
"import tweepy import pymongo import json # TWITTER PARAMS HASHTAGS_LIST = configs.HASHTAGS_LIST #",
"import pymongo import json # TWITTER PARAMS HASHTAGS_LIST = configs.HASHTAGS_LIST # MONGODB PARAMS",
"# logging.info(\"Collecting lang/locale count, per tag, for the given hashtags: {0}\".format(' '.join(HASHTAGS_LIST))) #",
"insert_ids = [] for user in user_list: logging.info(\"looking up for user {0}\".format(user['user'])) user_raw",
"= configs.cleanup_collection(db_connection, mongo_col_user) logging.info(\"Collection cleanup: {0} documents were deleted from the collection.\".format(clean_collection)) x",
"insert_user_locale_info(api, db_connection, mongo_col_user, user_locale_list): user_col = db_connection[mongo_col_user] count_documents = user_col.count() if not count_documents",
"# def main(): # logging.info(\"Collecting lang/locale count, per tag, for the given hashtags:",
"user_data['lang'] = user['lang'] user_data_list.append(user_data) user_data = {} user_data = user_data_list user_locale_data = get_user_info(api,",
"mongo_col_user, user_list): user_col = db_connection[mongo_col_user] logging.info(\"entrando na funcao get_user_info\") filtered_user_list = [] insert_ids",
"api_auth = configs.twitter_auth() # mongodb_connection = configs.mongodb_connect() # user_locale_list = get_user_locale_info(api_auth, mongodb_connection, MONGO_COL_TWEETS)",
"get_locale_by_tag(api_auth, mongodb_connection, MONGO_COL_TWEETS, MONGO_COL_LOCALE, HASHTAGS_LIST) # logging.info(\"Lang/Locale count per tag stored into the",
"logging.info(\"Lang/Locale count per tag stored into the collection \\\"{0}\\\"\".format(MONGO_COL_LOCALE)) # if __name__ ==",
"# user_locale_list = get_user_locale_info(api_auth, mongodb_connection, MONGO_COL_TWEETS) # insert_user_locale_info(api_auth, mongodb_connection, MONGO_COL_USER, user_locale_list) # tweets_by_tag",
"def get_user_info(api, db_connection, mongo_col_user, user_list): user_col = db_connection[mongo_col_user] logging.info(\"entrando na funcao get_user_info\") filtered_user_list",
"= user['lang'] user_data_list.append(user_data) user_data = {} user_data = user_data_list user_locale_data = get_user_info(api, db_connection,",
"HASHTAGS_LIST) # logging.info(\"Lang/Locale count per tag stored into the collection \\\"{0}\\\"\".format(MONGO_COL_LOCALE)) # if",
"return(len(x.inserted_ids)) # def group_tweets_by_tag(api, db_connection, hashtags_list): # print() # def get_locale_by_tag(api, db_connection, hashtags_list):",
"user_raw_json = user_raw._json user_filtered = {'hashtag': user['hashtag'], 'name': user_raw_json['name'], 'lang': user['lang'], 'location': user_raw_json['location']}",
"MONGODB PARAMS MONGO_COL_TWEETS = configs.MONGO_COL_TWEETS MONGO_COL_USER = configs.MONGO_COL_USER MONGO_COL_TTAGS = configs.MONGO_COL_TTAGS MONGO_COL_LOCALE =",
"= configs.HASHTAGS_LIST # MONGODB PARAMS MONGO_COL_TWEETS = configs.MONGO_COL_TWEETS MONGO_COL_USER = configs.MONGO_COL_USER MONGO_COL_TTAGS =",
"user in user_list: user_data['hashtag'] = user['hashtag'] user_data['user'] = user['user'] user_data['lang'] = user['lang'] user_data_list.append(user_data)",
"= user_col.insert_many(user_locale_list) insert_ids.append(x.insert_ids) return(filtered_user_list) def insert_user_locale_info(api, db_connection, mongo_col_user, user_locale_list): user_col = db_connection[mongo_col_user] count_documents",
"logging.info(\"Collecting lang/locale count, per tag, for the given hashtags: {0}\".format(' '.join(HASHTAGS_LIST))) # api_auth",
"configs.MONGO_COL_TTAGS MONGO_COL_LOCALE = configs.MONGO_COL_LOCALE def get_user_locale_info(api, db_connection, mongo_col_tweets, mongo_col_user): logging.info(\"entrando na funcao get_user_locale_info\")",
"user['user'] user_data['lang'] = user['lang'] user_data_list.append(user_data) user_data = {} user_data = user_data_list user_locale_data =",
"funcao get_user_locale_info\") tweet_col = db_connection[mongo_col] user_data_list = [] user_data = {} user_list =",
"MONGO_COL_TWEETS, MONGO_COL_LOCALE, HASHTAGS_LIST) # logging.info(\"Lang/Locale count per tag stored into the collection \\\"{0}\\\"\".format(MONGO_COL_LOCALE))",
"for user in user_list: logging.info(\"looking up for user {0}\".format(user['user'])) user_raw = api.get_user(screen_name=user['user']) user_raw_json",
"tweets_by_tag = group_tweets_by_tag(api_auth, mongodb_connection, MONGO_COL_TWEETS, MONGO_COL_TTAGS, HASHTAGS_LIST) # locale_by_tag = get_locale_by_tag(api_auth, mongodb_connection, MONGO_COL_TWEETS,",
"= [] for user in user_list: logging.info(\"looking up for user {0}\".format(user['user'])) user_raw =",
"pymongo import json # TWITTER PARAMS HASHTAGS_LIST = configs.HASHTAGS_LIST # MONGODB PARAMS MONGO_COL_TWEETS",
"user_list): user_col = db_connection[mongo_col_user] logging.info(\"entrando na funcao get_user_info\") filtered_user_list = [] insert_ids =",
"in user_list: logging.info(\"looking up for user {0}\".format(user['user'])) user_raw = api.get_user(screen_name=user['user']) user_raw_json = user_raw._json",
"WORKING import logging import configs import tweepy import pymongo import json # TWITTER",
"'lang': user['lang'], 'location': user_raw_json['location']} # x = user_col.insert_many(user_locale_list) insert_ids.append(x.insert_ids) return(filtered_user_list) def insert_user_locale_info(api, db_connection,",
"print() #### NOT WORKING # def main(): # logging.info(\"Collecting lang/locale count, per tag,",
"user_raw_json['name'], 'lang': user['lang'], 'location': user_raw_json['location']} # x = user_col.insert_many(user_locale_list) insert_ids.append(x.insert_ids) return(filtered_user_list) def insert_user_locale_info(api,",
"user['lang'], 'location': user_raw_json['location']} # x = user_col.insert_many(user_locale_list) insert_ids.append(x.insert_ids) return(filtered_user_list) def insert_user_locale_info(api, db_connection, mongo_col_user,",
"# TWITTER PARAMS HASHTAGS_LIST = configs.HASHTAGS_LIST # MONGODB PARAMS MONGO_COL_TWEETS = configs.MONGO_COL_TWEETS MONGO_COL_USER",
"get_locale_by_tag(api, db_connection, hashtags_list): # print() #### NOT WORKING # def main(): # logging.info(\"Collecting",
"HASHTAGS_LIST = configs.HASHTAGS_LIST # MONGODB PARAMS MONGO_COL_TWEETS = configs.MONGO_COL_TWEETS MONGO_COL_USER = configs.MONGO_COL_USER MONGO_COL_TTAGS",
"= {} user_data = user_data_list user_locale_data = get_user_info(api, db_connection, mongo_col_tweets, user_data) return(user_locale_data) def",
"\\\"{0}\\\" is not empty. Performing cleanup\".format(mongo_col_user)) clean_collection = configs.cleanup_collection(db_connection, mongo_col_user) logging.info(\"Collection cleanup: {0}",
"= db_connection[mongo_col] user_data_list = [] user_data = {} user_list = tweet_col.find({}, {'hashtag':1, 'user':1,",
"user in user_list: logging.info(\"looking up for user {0}\".format(user['user'])) user_raw = api.get_user(screen_name=user['user']) user_raw_json =",
"configs.MONGO_COL_LOCALE def get_user_locale_info(api, db_connection, mongo_col_tweets, mongo_col_user): logging.info(\"entrando na funcao get_user_locale_info\") tweet_col = db_connection[mongo_col]",
"up for user {0}\".format(user['user'])) user_raw = api.get_user(screen_name=user['user']) user_raw_json = user_raw._json user_filtered = {'hashtag':",
"return(user_locale_data) def get_user_info(api, db_connection, mongo_col_user, user_list): user_col = db_connection[mongo_col_user] logging.info(\"entrando na funcao get_user_info\")",
"user['lang'] user_data_list.append(user_data) user_data = {} user_data = user_data_list user_locale_data = get_user_info(api, db_connection, mongo_col_tweets,",
"= [] user_data = {} user_list = tweet_col.find({}, {'hashtag':1, 'user':1, 'lang':1}) for user",
"user['hashtag'] user_data['user'] = user['user'] user_data['lang'] = user['lang'] user_data_list.append(user_data) user_data = {} user_data =",
"collection.\".format(clean_collection)) x = user_col.insert_many(user_locale_list) return(len(x.inserted_ids)) # def group_tweets_by_tag(api, db_connection, hashtags_list): # print() #",
"api.get_user(screen_name=user['user']) user_raw_json = user_raw._json user_filtered = {'hashtag': user['hashtag'], 'name': user_raw_json['name'], 'lang': user['lang'], 'location':",
"'user':1, 'lang':1}) for user in user_list: user_data['hashtag'] = user['hashtag'] user_data['user'] = user['user'] user_data['lang']",
"{'hashtag': user['hashtag'], 'name': user_raw_json['name'], 'lang': user['lang'], 'location': user_raw_json['location']} # x = user_col.insert_many(user_locale_list) insert_ids.append(x.insert_ids)",
"x = user_col.insert_many(user_locale_list) insert_ids.append(x.insert_ids) return(filtered_user_list) def insert_user_locale_info(api, db_connection, mongo_col_user, user_locale_list): user_col = db_connection[mongo_col_user]",
"MONGO_COL_TWEETS, MONGO_COL_TTAGS, HASHTAGS_LIST) # locale_by_tag = get_locale_by_tag(api_auth, mongodb_connection, MONGO_COL_TWEETS, MONGO_COL_LOCALE, HASHTAGS_LIST) # logging.info(\"Lang/Locale",
"db_connection[mongo_col] user_data_list = [] user_data = {} user_list = tweet_col.find({}, {'hashtag':1, 'user':1, 'lang':1})",
"= user_data_list user_locale_data = get_user_info(api, db_connection, mongo_col_tweets, user_data) return(user_locale_data) def get_user_info(api, db_connection, mongo_col_user,",
"get_user_locale_info\") tweet_col = db_connection[mongo_col] user_data_list = [] user_data = {} user_list = tweet_col.find({},",
"insert_ids.append(x.insert_ids) return(filtered_user_list) def insert_user_locale_info(api, db_connection, mongo_col_user, user_locale_list): user_col = db_connection[mongo_col_user] count_documents = user_col.count()",
"logging.info(\"entrando na funcao get_user_info\") filtered_user_list = [] insert_ids = [] for user in",
"if not count_documents == 0: logging.info(\"Collection \\\"{0}\\\" is not empty. Performing cleanup\".format(mongo_col_user)) clean_collection",
"user_raw_json['location']} # x = user_col.insert_many(user_locale_list) insert_ids.append(x.insert_ids) return(filtered_user_list) def insert_user_locale_info(api, db_connection, mongo_col_user, user_locale_list): user_col",
"import logging import configs import tweepy import pymongo import json # TWITTER PARAMS",
"not count_documents == 0: logging.info(\"Collection \\\"{0}\\\" is not empty. Performing cleanup\".format(mongo_col_user)) clean_collection =",
"Performing cleanup\".format(mongo_col_user)) clean_collection = configs.cleanup_collection(db_connection, mongo_col_user) logging.info(\"Collection cleanup: {0} documents were deleted from",
"mongodb_connection = configs.mongodb_connect() # user_locale_list = get_user_locale_info(api_auth, mongodb_connection, MONGO_COL_TWEETS) # insert_user_locale_info(api_auth, mongodb_connection, MONGO_COL_USER,",
"user_data = {} user_data = user_data_list user_locale_data = get_user_info(api, db_connection, mongo_col_tweets, user_data) return(user_locale_data)",
"def insert_user_locale_info(api, db_connection, mongo_col_user, user_locale_list): user_col = db_connection[mongo_col_user] count_documents = user_col.count() if not",
"{'hashtag':1, 'user':1, 'lang':1}) for user in user_list: user_data['hashtag'] = user['hashtag'] user_data['user'] = user['user']",
"# MONGODB PARAMS MONGO_COL_TWEETS = configs.MONGO_COL_TWEETS MONGO_COL_USER = configs.MONGO_COL_USER MONGO_COL_TTAGS = configs.MONGO_COL_TTAGS MONGO_COL_LOCALE",
"tweet_col = db_connection[mongo_col] user_data_list = [] user_data = {} user_list = tweet_col.find({}, {'hashtag':1,",
"db_connection, hashtags_list): # print() #### NOT WORKING # def main(): # logging.info(\"Collecting lang/locale",
"# tweets_by_tag = group_tweets_by_tag(api_auth, mongodb_connection, MONGO_COL_TWEETS, MONGO_COL_TTAGS, HASHTAGS_LIST) # locale_by_tag = get_locale_by_tag(api_auth, mongodb_connection,",
"'.join(HASHTAGS_LIST))) # api_auth = configs.twitter_auth() # mongodb_connection = configs.mongodb_connect() # user_locale_list = get_user_locale_info(api_auth,",
"get_user_locale_info(api, db_connection, mongo_col_tweets, mongo_col_user): logging.info(\"entrando na funcao get_user_locale_info\") tweet_col = db_connection[mongo_col] user_data_list =",
"user_col.count() if not count_documents == 0: logging.info(\"Collection \\\"{0}\\\" is not empty. Performing cleanup\".format(mongo_col_user))",
"cleanup\".format(mongo_col_user)) clean_collection = configs.cleanup_collection(db_connection, mongo_col_user) logging.info(\"Collection cleanup: {0} documents were deleted from the",
"the given hashtags: {0}\".format(' '.join(HASHTAGS_LIST))) # api_auth = configs.twitter_auth() # mongodb_connection = configs.mongodb_connect()",
"= user['user'] user_data['lang'] = user['lang'] user_data_list.append(user_data) user_data = {} user_data = user_data_list user_locale_data",
"na funcao get_user_info\") filtered_user_list = [] insert_ids = [] for user in user_list:",
"= api.get_user(screen_name=user['user']) user_raw_json = user_raw._json user_filtered = {'hashtag': user['hashtag'], 'name': user_raw_json['name'], 'lang': user['lang'],",
"lang/locale count, per tag, for the given hashtags: {0}\".format(' '.join(HASHTAGS_LIST))) # api_auth =",
"PARAMS HASHTAGS_LIST = configs.HASHTAGS_LIST # MONGODB PARAMS MONGO_COL_TWEETS = configs.MONGO_COL_TWEETS MONGO_COL_USER = configs.MONGO_COL_USER",
"= group_tweets_by_tag(api_auth, mongodb_connection, MONGO_COL_TWEETS, MONGO_COL_TTAGS, HASHTAGS_LIST) # locale_by_tag = get_locale_by_tag(api_auth, mongodb_connection, MONGO_COL_TWEETS, MONGO_COL_LOCALE,",
"user_filtered = {'hashtag': user['hashtag'], 'name': user_raw_json['name'], 'lang': user['lang'], 'location': user_raw_json['location']} # x =",
"= configs.MONGO_COL_TTAGS MONGO_COL_LOCALE = configs.MONGO_COL_LOCALE def get_user_locale_info(api, db_connection, mongo_col_tweets, mongo_col_user): logging.info(\"entrando na funcao",
"MONGO_COL_USER = configs.MONGO_COL_USER MONGO_COL_TTAGS = configs.MONGO_COL_TTAGS MONGO_COL_LOCALE = configs.MONGO_COL_LOCALE def get_user_locale_info(api, db_connection, mongo_col_tweets,",
"= get_user_info(api, db_connection, mongo_col_tweets, user_data) return(user_locale_data) def get_user_info(api, db_connection, mongo_col_user, user_list): user_col =",
"logging import configs import tweepy import pymongo import json # TWITTER PARAMS HASHTAGS_LIST",
"not empty. Performing cleanup\".format(mongo_col_user)) clean_collection = configs.cleanup_collection(db_connection, mongo_col_user) logging.info(\"Collection cleanup: {0} documents were",
"0: logging.info(\"Collection \\\"{0}\\\" is not empty. Performing cleanup\".format(mongo_col_user)) clean_collection = configs.cleanup_collection(db_connection, mongo_col_user) logging.info(\"Collection",
"user {0}\".format(user['user'])) user_raw = api.get_user(screen_name=user['user']) user_raw_json = user_raw._json user_filtered = {'hashtag': user['hashtag'], 'name':",
"empty. Performing cleanup\".format(mongo_col_user)) clean_collection = configs.cleanup_collection(db_connection, mongo_col_user) logging.info(\"Collection cleanup: {0} documents were deleted",
"is not empty. Performing cleanup\".format(mongo_col_user)) clean_collection = configs.cleanup_collection(db_connection, mongo_col_user) logging.info(\"Collection cleanup: {0} documents",
"user_raw._json user_filtered = {'hashtag': user['hashtag'], 'name': user_raw_json['name'], 'lang': user['lang'], 'location': user_raw_json['location']} # x",
"db_connection[mongo_col_user] count_documents = user_col.count() if not count_documents == 0: logging.info(\"Collection \\\"{0}\\\" is not",
"clean_collection = configs.cleanup_collection(db_connection, mongo_col_user) logging.info(\"Collection cleanup: {0} documents were deleted from the collection.\".format(clean_collection))",
"# print() # def get_locale_by_tag(api, db_connection, hashtags_list): # print() #### NOT WORKING #",
"user_col.insert_many(user_locale_list) insert_ids.append(x.insert_ids) return(filtered_user_list) def insert_user_locale_info(api, db_connection, mongo_col_user, user_locale_list): user_col = db_connection[mongo_col_user] count_documents =",
"per tag stored into the collection \\\"{0}\\\"\".format(MONGO_COL_LOCALE)) # if __name__ == \"__main__\": #",
"db_connection, mongo_col_user, user_list): user_col = db_connection[mongo_col_user] logging.info(\"entrando na funcao get_user_info\") filtered_user_list = []",
"logging.info(\"Collection \\\"{0}\\\" is not empty. Performing cleanup\".format(mongo_col_user)) clean_collection = configs.cleanup_collection(db_connection, mongo_col_user) logging.info(\"Collection cleanup:",
"# insert_user_locale_info(api_auth, mongodb_connection, MONGO_COL_USER, user_locale_list) # tweets_by_tag = group_tweets_by_tag(api_auth, mongodb_connection, MONGO_COL_TWEETS, MONGO_COL_TTAGS, HASHTAGS_LIST)",
"given hashtags: {0}\".format(' '.join(HASHTAGS_LIST))) # api_auth = configs.twitter_auth() # mongodb_connection = configs.mongodb_connect() #",
"{0} documents were deleted from the collection.\".format(clean_collection)) x = user_col.insert_many(user_locale_list) return(len(x.inserted_ids)) # def",
"# x = user_col.insert_many(user_locale_list) insert_ids.append(x.insert_ids) return(filtered_user_list) def insert_user_locale_info(api, db_connection, mongo_col_user, user_locale_list): user_col =",
"mongodb_connection, MONGO_COL_TWEETS) # insert_user_locale_info(api_auth, mongodb_connection, MONGO_COL_USER, user_locale_list) # tweets_by_tag = group_tweets_by_tag(api_auth, mongodb_connection, MONGO_COL_TWEETS,",
"MONGO_COL_TWEETS = configs.MONGO_COL_TWEETS MONGO_COL_USER = configs.MONGO_COL_USER MONGO_COL_TTAGS = configs.MONGO_COL_TTAGS MONGO_COL_LOCALE = configs.MONGO_COL_LOCALE def",
"user_data = user_data_list user_locale_data = get_user_info(api, db_connection, mongo_col_tweets, user_data) return(user_locale_data) def get_user_info(api, db_connection,",
"user_col.insert_many(user_locale_list) return(len(x.inserted_ids)) # def group_tweets_by_tag(api, db_connection, hashtags_list): # print() # def get_locale_by_tag(api, db_connection,",
"into the collection \\\"{0}\\\"\".format(MONGO_COL_LOCALE)) # if __name__ == \"__main__\": # configs.logging_basic_config() # main()",
"db_connection, hashtags_list): # print() # def get_locale_by_tag(api, db_connection, hashtags_list): # print() #### NOT",
"funcao get_user_info\") filtered_user_list = [] insert_ids = [] for user in user_list: logging.info(\"looking",
"= configs.MONGO_COL_TWEETS MONGO_COL_USER = configs.MONGO_COL_USER MONGO_COL_TTAGS = configs.MONGO_COL_TTAGS MONGO_COL_LOCALE = configs.MONGO_COL_LOCALE def get_user_locale_info(api,",
"logging.info(\"looking up for user {0}\".format(user['user'])) user_raw = api.get_user(screen_name=user['user']) user_raw_json = user_raw._json user_filtered =",
"NOT WORKING import logging import configs import tweepy import pymongo import json #",
"count_documents = user_col.count() if not count_documents == 0: logging.info(\"Collection \\\"{0}\\\" is not empty.",
"user_list: user_data['hashtag'] = user['hashtag'] user_data['user'] = user['user'] user_data['lang'] = user['lang'] user_data_list.append(user_data) user_data =",
"MONGO_COL_TTAGS, HASHTAGS_LIST) # locale_by_tag = get_locale_by_tag(api_auth, mongodb_connection, MONGO_COL_TWEETS, MONGO_COL_LOCALE, HASHTAGS_LIST) # logging.info(\"Lang/Locale count",
"user_list = tweet_col.find({}, {'hashtag':1, 'user':1, 'lang':1}) for user in user_list: user_data['hashtag'] = user['hashtag']",
"for user in user_list: user_data['hashtag'] = user['hashtag'] user_data['user'] = user['user'] user_data['lang'] = user['lang']",
"for user {0}\".format(user['user'])) user_raw = api.get_user(screen_name=user['user']) user_raw_json = user_raw._json user_filtered = {'hashtag': user['hashtag'],",
"user_raw = api.get_user(screen_name=user['user']) user_raw_json = user_raw._json user_filtered = {'hashtag': user['hashtag'], 'name': user_raw_json['name'], 'lang':",
"HASHTAGS_LIST) # locale_by_tag = get_locale_by_tag(api_auth, mongodb_connection, MONGO_COL_TWEETS, MONGO_COL_LOCALE, HASHTAGS_LIST) # logging.info(\"Lang/Locale count per",
"= get_user_locale_info(api_auth, mongodb_connection, MONGO_COL_TWEETS) # insert_user_locale_info(api_auth, mongodb_connection, MONGO_COL_USER, user_locale_list) # tweets_by_tag = group_tweets_by_tag(api_auth,",
"main(): # logging.info(\"Collecting lang/locale count, per tag, for the given hashtags: {0}\".format(' '.join(HASHTAGS_LIST)))",
"# print() #### NOT WORKING # def main(): # logging.info(\"Collecting lang/locale count, per",
"db_connection, mongo_col_tweets, user_data) return(user_locale_data) def get_user_info(api, db_connection, mongo_col_user, user_list): user_col = db_connection[mongo_col_user] logging.info(\"entrando",
"= db_connection[mongo_col_user] count_documents = user_col.count() if not count_documents == 0: logging.info(\"Collection \\\"{0}\\\" is",
"MONGO_COL_USER, user_locale_list) # tweets_by_tag = group_tweets_by_tag(api_auth, mongodb_connection, MONGO_COL_TWEETS, MONGO_COL_TTAGS, HASHTAGS_LIST) # locale_by_tag =",
"tweet_col.find({}, {'hashtag':1, 'user':1, 'lang':1}) for user in user_list: user_data['hashtag'] = user['hashtag'] user_data['user'] =",
"= user_raw._json user_filtered = {'hashtag': user['hashtag'], 'name': user_raw_json['name'], 'lang': user['lang'], 'location': user_raw_json['location']} #",
"= configs.MONGO_COL_USER MONGO_COL_TTAGS = configs.MONGO_COL_TTAGS MONGO_COL_LOCALE = configs.MONGO_COL_LOCALE def get_user_locale_info(api, db_connection, mongo_col_tweets, mongo_col_user):",
"user['hashtag'], 'name': user_raw_json['name'], 'lang': user['lang'], 'location': user_raw_json['location']} # x = user_col.insert_many(user_locale_list) insert_ids.append(x.insert_ids) return(filtered_user_list)",
"db_connection, mongo_col_user, user_locale_list): user_col = db_connection[mongo_col_user] count_documents = user_col.count() if not count_documents ==",
"# def get_locale_by_tag(api, db_connection, hashtags_list): # print() #### NOT WORKING # def main():",
"group_tweets_by_tag(api_auth, mongodb_connection, MONGO_COL_TWEETS, MONGO_COL_TTAGS, HASHTAGS_LIST) # locale_by_tag = get_locale_by_tag(api_auth, mongodb_connection, MONGO_COL_TWEETS, MONGO_COL_LOCALE, HASHTAGS_LIST)",
"x = user_col.insert_many(user_locale_list) return(len(x.inserted_ids)) # def group_tweets_by_tag(api, db_connection, hashtags_list): # print() # def",
"# logging.info(\"Lang/Locale count per tag stored into the collection \\\"{0}\\\"\".format(MONGO_COL_LOCALE)) # if __name__",
"return(filtered_user_list) def insert_user_locale_info(api, db_connection, mongo_col_user, user_locale_list): user_col = db_connection[mongo_col_user] count_documents = user_col.count() if",
"logging.info(\"entrando na funcao get_user_locale_info\") tweet_col = db_connection[mongo_col] user_data_list = [] user_data = {}",
"filtered_user_list = [] insert_ids = [] for user in user_list: logging.info(\"looking up for",
"= {'hashtag': user['hashtag'], 'name': user_raw_json['name'], 'lang': user['lang'], 'location': user_raw_json['location']} # x = user_col.insert_many(user_locale_list)",
"TWITTER PARAMS HASHTAGS_LIST = configs.HASHTAGS_LIST # MONGODB PARAMS MONGO_COL_TWEETS = configs.MONGO_COL_TWEETS MONGO_COL_USER =",
"MONGO_COL_LOCALE = configs.MONGO_COL_LOCALE def get_user_locale_info(api, db_connection, mongo_col_tweets, mongo_col_user): logging.info(\"entrando na funcao get_user_locale_info\") tweet_col",
"in user_list: user_data['hashtag'] = user['hashtag'] user_data['user'] = user['user'] user_data['lang'] = user['lang'] user_data_list.append(user_data) user_data",
"= [] insert_ids = [] for user in user_list: logging.info(\"looking up for user",
"locale_by_tag = get_locale_by_tag(api_auth, mongodb_connection, MONGO_COL_TWEETS, MONGO_COL_LOCALE, HASHTAGS_LIST) # logging.info(\"Lang/Locale count per tag stored",
"get_user_info(api, db_connection, mongo_col_user, user_list): user_col = db_connection[mongo_col_user] logging.info(\"entrando na funcao get_user_info\") filtered_user_list =",
"MONGO_COL_TTAGS = configs.MONGO_COL_TTAGS MONGO_COL_LOCALE = configs.MONGO_COL_LOCALE def get_user_locale_info(api, db_connection, mongo_col_tweets, mongo_col_user): logging.info(\"entrando na",
"import configs import tweepy import pymongo import json # TWITTER PARAMS HASHTAGS_LIST =",
"user_data = {} user_list = tweet_col.find({}, {'hashtag':1, 'user':1, 'lang':1}) for user in user_list:",
"= tweet_col.find({}, {'hashtag':1, 'user':1, 'lang':1}) for user in user_list: user_data['hashtag'] = user['hashtag'] user_data['user']",
"= user_col.count() if not count_documents == 0: logging.info(\"Collection \\\"{0}\\\" is not empty. Performing",
"cleanup: {0} documents were deleted from the collection.\".format(clean_collection)) x = user_col.insert_many(user_locale_list) return(len(x.inserted_ids)) #",
"get_user_info(api, db_connection, mongo_col_tweets, user_data) return(user_locale_data) def get_user_info(api, db_connection, mongo_col_user, user_list): user_col = db_connection[mongo_col_user]",
"mongo_col_user) logging.info(\"Collection cleanup: {0} documents were deleted from the collection.\".format(clean_collection)) x = user_col.insert_many(user_locale_list)",
"PARAMS MONGO_COL_TWEETS = configs.MONGO_COL_TWEETS MONGO_COL_USER = configs.MONGO_COL_USER MONGO_COL_TTAGS = configs.MONGO_COL_TTAGS MONGO_COL_LOCALE = configs.MONGO_COL_LOCALE",
"configs.MONGO_COL_TWEETS MONGO_COL_USER = configs.MONGO_COL_USER MONGO_COL_TTAGS = configs.MONGO_COL_TTAGS MONGO_COL_LOCALE = configs.MONGO_COL_LOCALE def get_user_locale_info(api, db_connection,",
"configs.cleanup_collection(db_connection, mongo_col_user) logging.info(\"Collection cleanup: {0} documents were deleted from the collection.\".format(clean_collection)) x =",
"import json # TWITTER PARAMS HASHTAGS_LIST = configs.HASHTAGS_LIST # MONGODB PARAMS MONGO_COL_TWEETS =",
"mongodb_connection, MONGO_COL_TWEETS, MONGO_COL_TTAGS, HASHTAGS_LIST) # locale_by_tag = get_locale_by_tag(api_auth, mongodb_connection, MONGO_COL_TWEETS, MONGO_COL_LOCALE, HASHTAGS_LIST) #",
"= configs.MONGO_COL_LOCALE def get_user_locale_info(api, db_connection, mongo_col_tweets, mongo_col_user): logging.info(\"entrando na funcao get_user_locale_info\") tweet_col =",
"db_connection, mongo_col_tweets, mongo_col_user): logging.info(\"entrando na funcao get_user_locale_info\") tweet_col = db_connection[mongo_col] user_data_list = []",
"{} user_data = user_data_list user_locale_data = get_user_info(api, db_connection, mongo_col_tweets, user_data) return(user_locale_data) def get_user_info(api,",
"documents were deleted from the collection.\".format(clean_collection)) x = user_col.insert_many(user_locale_list) return(len(x.inserted_ids)) # def group_tweets_by_tag(api,",
"mongodb_connection, MONGO_COL_USER, user_locale_list) # tweets_by_tag = group_tweets_by_tag(api_auth, mongodb_connection, MONGO_COL_TWEETS, MONGO_COL_TTAGS, HASHTAGS_LIST) # locale_by_tag",
"count_documents == 0: logging.info(\"Collection \\\"{0}\\\" is not empty. Performing cleanup\".format(mongo_col_user)) clean_collection = configs.cleanup_collection(db_connection,",
"mongo_col_tweets, mongo_col_user): logging.info(\"entrando na funcao get_user_locale_info\") tweet_col = db_connection[mongo_col] user_data_list = [] user_data",
"mongo_col_user): logging.info(\"entrando na funcao get_user_locale_info\") tweet_col = db_connection[mongo_col] user_data_list = [] user_data =",
"logging.info(\"Collection cleanup: {0} documents were deleted from the collection.\".format(clean_collection)) x = user_col.insert_many(user_locale_list) return(len(x.inserted_ids))",
"insert_user_locale_info(api_auth, mongodb_connection, MONGO_COL_USER, user_locale_list) # tweets_by_tag = group_tweets_by_tag(api_auth, mongodb_connection, MONGO_COL_TWEETS, MONGO_COL_TTAGS, HASHTAGS_LIST) #",
"= db_connection[mongo_col_user] logging.info(\"entrando na funcao get_user_info\") filtered_user_list = [] insert_ids = [] for",
"mongo_col_tweets, user_data) return(user_locale_data) def get_user_info(api, db_connection, mongo_col_user, user_list): user_col = db_connection[mongo_col_user] logging.info(\"entrando na",
"hashtags_list): # print() #### NOT WORKING # def main(): # logging.info(\"Collecting lang/locale count,",
"# locale_by_tag = get_locale_by_tag(api_auth, mongodb_connection, MONGO_COL_TWEETS, MONGO_COL_LOCALE, HASHTAGS_LIST) # logging.info(\"Lang/Locale count per tag",
"deleted from the collection.\".format(clean_collection)) x = user_col.insert_many(user_locale_list) return(len(x.inserted_ids)) # def group_tweets_by_tag(api, db_connection, hashtags_list):",
"db_connection[mongo_col_user] logging.info(\"entrando na funcao get_user_info\") filtered_user_list = [] insert_ids = [] for user",
"= get_locale_by_tag(api_auth, mongodb_connection, MONGO_COL_TWEETS, MONGO_COL_LOCALE, HASHTAGS_LIST) # logging.info(\"Lang/Locale count per tag stored into",
"configs.MONGO_COL_USER MONGO_COL_TTAGS = configs.MONGO_COL_TTAGS MONGO_COL_LOCALE = configs.MONGO_COL_LOCALE def get_user_locale_info(api, db_connection, mongo_col_tweets, mongo_col_user): logging.info(\"entrando",
"user_data['hashtag'] = user['hashtag'] user_data['user'] = user['user'] user_data['lang'] = user['lang'] user_data_list.append(user_data) user_data = {}",
"user_data['user'] = user['user'] user_data['lang'] = user['lang'] user_data_list.append(user_data) user_data = {} user_data = user_data_list",
"from the collection.\".format(clean_collection)) x = user_col.insert_many(user_locale_list) return(len(x.inserted_ids)) # def group_tweets_by_tag(api, db_connection, hashtags_list): #",
"group_tweets_by_tag(api, db_connection, hashtags_list): # print() # def get_locale_by_tag(api, db_connection, hashtags_list): # print() ####",
"= {} user_list = tweet_col.find({}, {'hashtag':1, 'user':1, 'lang':1}) for user in user_list: user_data['hashtag']",
"json # TWITTER PARAMS HASHTAGS_LIST = configs.HASHTAGS_LIST # MONGODB PARAMS MONGO_COL_TWEETS = configs.MONGO_COL_TWEETS",
"def get_user_locale_info(api, db_connection, mongo_col_tweets, mongo_col_user): logging.info(\"entrando na funcao get_user_locale_info\") tweet_col = db_connection[mongo_col] user_data_list",
"def get_locale_by_tag(api, db_connection, hashtags_list): # print() #### NOT WORKING # def main(): #",
"= configs.mongodb_connect() # user_locale_list = get_user_locale_info(api_auth, mongodb_connection, MONGO_COL_TWEETS) # insert_user_locale_info(api_auth, mongodb_connection, MONGO_COL_USER, user_locale_list)",
"configs import tweepy import pymongo import json # TWITTER PARAMS HASHTAGS_LIST = configs.HASHTAGS_LIST",
"[] for user in user_list: logging.info(\"looking up for user {0}\".format(user['user'])) user_raw = api.get_user(screen_name=user['user'])",
"'location': user_raw_json['location']} # x = user_col.insert_many(user_locale_list) insert_ids.append(x.insert_ids) return(filtered_user_list) def insert_user_locale_info(api, db_connection, mongo_col_user, user_locale_list):",
"NOT WORKING # def main(): # logging.info(\"Collecting lang/locale count, per tag, for the",
"stored into the collection \\\"{0}\\\"\".format(MONGO_COL_LOCALE)) # if __name__ == \"__main__\": # configs.logging_basic_config() #",
"user_locale_list = get_user_locale_info(api_auth, mongodb_connection, MONGO_COL_TWEETS) # insert_user_locale_info(api_auth, mongodb_connection, MONGO_COL_USER, user_locale_list) # tweets_by_tag =",
"WORKING # def main(): # logging.info(\"Collecting lang/locale count, per tag, for the given",
"configs.HASHTAGS_LIST # MONGODB PARAMS MONGO_COL_TWEETS = configs.MONGO_COL_TWEETS MONGO_COL_USER = configs.MONGO_COL_USER MONGO_COL_TTAGS = configs.MONGO_COL_TTAGS",
"user_col = db_connection[mongo_col_user] logging.info(\"entrando na funcao get_user_info\") filtered_user_list = [] insert_ids = []",
"{0}\".format(user['user'])) user_raw = api.get_user(screen_name=user['user']) user_raw_json = user_raw._json user_filtered = {'hashtag': user['hashtag'], 'name': user_raw_json['name'],",
"#### NOT WORKING # def main(): # logging.info(\"Collecting lang/locale count, per tag, for",
"user_data_list = [] user_data = {} user_list = tweet_col.find({}, {'hashtag':1, 'user':1, 'lang':1}) for",
"user_locale_data = get_user_info(api, db_connection, mongo_col_tweets, user_data) return(user_locale_data) def get_user_info(api, db_connection, mongo_col_user, user_list): user_col"
] |
[
"'auto' # 原文语种 toLang = 'zh' # 译文语种 salt = random.randint(32768, 65536) q",
"json.loads(result_all) print (result) return result['trans_result'][0]['dst'] except Exception as e: print (e) finally: if",
"# coding=utf-8 import http.client import hashlib import urllib import random import json class",
"= secretKey def translate(self,q): myurl = '/api/trans/vip/translate' fromLang = 'auto' # 原文语种 toLang",
"+ '&salt=' + str( salt) + '&sign=' + sign try: httpClient = http.client.HTTPConnection('api.fanyi.baidu.com')",
"httpClient.getresponse() result_all = response.read().decode(\"utf-8\") result = json.loads(result_all) print (result) return result['trans_result'][0]['dst'] except Exception",
"appid self.secretKey = secretKey def translate(self,q): myurl = '/api/trans/vip/translate' fromLang = 'auto' #",
"toLang = 'zh' # 译文语种 salt = random.randint(32768, 65536) q = q sign",
"q = q sign = self.appid + q + str(salt) + self.secretKey sign",
"import http.client import hashlib import urllib import random import json class BaiduTranslate: appid",
"__init__(self, appid, secretKey): self.appid = appid self.secretKey = secretKey def translate(self,q): myurl =",
"httpClient.request('GET', myurl) # response是HTTPResponse对象 response = httpClient.getresponse() result_all = response.read().decode(\"utf-8\") result = json.loads(result_all)",
"appid, secretKey): self.appid = appid self.secretKey = secretKey def translate(self,q): myurl = '/api/trans/vip/translate'",
"http.client import hashlib import urllib import random import json class BaiduTranslate: appid =",
"'&q=' + urllib.parse.quote(q) + '&from=' + fromLang + '&to=' + toLang + '&salt='",
"secretKey def translate(self,q): myurl = '/api/trans/vip/translate' fromLang = 'auto' # 原文语种 toLang =",
"self.appid + '&q=' + urllib.parse.quote(q) + '&from=' + fromLang + '&to=' + toLang",
"salt) + '&sign=' + sign try: httpClient = http.client.HTTPConnection('api.fanyi.baidu.com') httpClient.request('GET', myurl) # response是HTTPResponse对象",
"http.client.HTTPConnection('api.fanyi.baidu.com') httpClient.request('GET', myurl) # response是HTTPResponse对象 response = httpClient.getresponse() result_all = response.read().decode(\"utf-8\") result =",
"httpClient = None def __init__(self, appid, secretKey): self.appid = appid self.secretKey = secretKey",
"myurl = myurl + '?appid=' + self.appid + '&q=' + urllib.parse.quote(q) + '&from='",
"+ self.appid + '&q=' + urllib.parse.quote(q) + '&from=' + fromLang + '&to=' +",
"appid = '' # 填写你的appid secretKey = '' # 填写你的密钥 httpClient = None",
"'&sign=' + sign try: httpClient = http.client.HTTPConnection('api.fanyi.baidu.com') httpClient.request('GET', myurl) # response是HTTPResponse对象 response =",
"hashlib.md5(sign.encode()).hexdigest() myurl = myurl + '?appid=' + self.appid + '&q=' + urllib.parse.quote(q) +",
"'&to=' + toLang + '&salt=' + str( salt) + '&sign=' + sign try:",
"= None def __init__(self, appid, secretKey): self.appid = appid self.secretKey = secretKey def",
"'' # 填写你的密钥 httpClient = None def __init__(self, appid, secretKey): self.appid = appid",
"urllib import random import json class BaiduTranslate: appid = '' # 填写你的appid secretKey",
"+ urllib.parse.quote(q) + '&from=' + fromLang + '&to=' + toLang + '&salt=' +",
"class BaiduTranslate: appid = '' # 填写你的appid secretKey = '' # 填写你的密钥 httpClient",
"+ fromLang + '&to=' + toLang + '&salt=' + str( salt) + '&sign='",
"secretKey): self.appid = appid self.secretKey = secretKey def translate(self,q): myurl = '/api/trans/vip/translate' fromLang",
"+ '&to=' + toLang + '&salt=' + str( salt) + '&sign=' + sign",
"def __init__(self, appid, secretKey): self.appid = appid self.secretKey = secretKey def translate(self,q): myurl",
"填写你的appid secretKey = '' # 填写你的密钥 httpClient = None def __init__(self, appid, secretKey):",
"+ '&from=' + fromLang + '&to=' + toLang + '&salt=' + str( salt)",
"response = httpClient.getresponse() result_all = response.read().decode(\"utf-8\") result = json.loads(result_all) print (result) return result['trans_result'][0]['dst']",
"= response.read().decode(\"utf-8\") result = json.loads(result_all) print (result) return result['trans_result'][0]['dst'] except Exception as e:",
"response是HTTPResponse对象 response = httpClient.getresponse() result_all = response.read().decode(\"utf-8\") result = json.loads(result_all) print (result) return",
"q + str(salt) + self.secretKey sign = hashlib.md5(sign.encode()).hexdigest() myurl = myurl + '?appid='",
"import urllib import random import json class BaiduTranslate: appid = '' # 填写你的appid",
"sign try: httpClient = http.client.HTTPConnection('api.fanyi.baidu.com') httpClient.request('GET', myurl) # response是HTTPResponse对象 response = httpClient.getresponse() result_all",
"# 原文语种 toLang = 'zh' # 译文语种 salt = random.randint(32768, 65536) q =",
"'&salt=' + str( salt) + '&sign=' + sign try: httpClient = http.client.HTTPConnection('api.fanyi.baidu.com') httpClient.request('GET',",
"hashlib import urllib import random import json class BaiduTranslate: appid = '' #",
"= appid self.secretKey = secretKey def translate(self,q): myurl = '/api/trans/vip/translate' fromLang = 'auto'",
"response.read().decode(\"utf-8\") result = json.loads(result_all) print (result) return result['trans_result'][0]['dst'] except Exception as e: print",
"= json.loads(result_all) print (result) return result['trans_result'][0]['dst'] except Exception as e: print (e) finally:",
"sign = hashlib.md5(sign.encode()).hexdigest() myurl = myurl + '?appid=' + self.appid + '&q=' +",
"填写你的密钥 httpClient = None def __init__(self, appid, secretKey): self.appid = appid self.secretKey =",
"= '' # 填写你的appid secretKey = '' # 填写你的密钥 httpClient = None def",
"self.secretKey sign = hashlib.md5(sign.encode()).hexdigest() myurl = myurl + '?appid=' + self.appid + '&q='",
"+ toLang + '&salt=' + str( salt) + '&sign=' + sign try: httpClient",
"coding=utf-8 import http.client import hashlib import urllib import random import json class BaiduTranslate:",
"str(salt) + self.secretKey sign = hashlib.md5(sign.encode()).hexdigest() myurl = myurl + '?appid=' + self.appid",
"myurl = '/api/trans/vip/translate' fromLang = 'auto' # 原文语种 toLang = 'zh' # 译文语种",
"toLang + '&salt=' + str( salt) + '&sign=' + sign try: httpClient =",
"random.randint(32768, 65536) q = q sign = self.appid + q + str(salt) +",
"# response是HTTPResponse对象 response = httpClient.getresponse() result_all = response.read().decode(\"utf-8\") result = json.loads(result_all) print (result)",
"= http.client.HTTPConnection('api.fanyi.baidu.com') httpClient.request('GET', myurl) # response是HTTPResponse对象 response = httpClient.getresponse() result_all = response.read().decode(\"utf-8\") result",
"result = json.loads(result_all) print (result) return result['trans_result'][0]['dst'] except Exception as e: print (e)",
"+ sign try: httpClient = http.client.HTTPConnection('api.fanyi.baidu.com') httpClient.request('GET', myurl) # response是HTTPResponse对象 response = httpClient.getresponse()",
"secretKey = '' # 填写你的密钥 httpClient = None def __init__(self, appid, secretKey): self.appid",
"self.appid + q + str(salt) + self.secretKey sign = hashlib.md5(sign.encode()).hexdigest() myurl = myurl",
"import hashlib import urllib import random import json class BaiduTranslate: appid = ''",
"= hashlib.md5(sign.encode()).hexdigest() myurl = myurl + '?appid=' + self.appid + '&q=' + urllib.parse.quote(q)",
"self.appid = appid self.secretKey = secretKey def translate(self,q): myurl = '/api/trans/vip/translate' fromLang =",
"print (result) return result['trans_result'][0]['dst'] except Exception as e: print (e) finally: if httpClient:",
"65536) q = q sign = self.appid + q + str(salt) + self.secretKey",
"httpClient = http.client.HTTPConnection('api.fanyi.baidu.com') httpClient.request('GET', myurl) # response是HTTPResponse对象 response = httpClient.getresponse() result_all = response.read().decode(\"utf-8\")",
"= q sign = self.appid + q + str(salt) + self.secretKey sign =",
"myurl) # response是HTTPResponse对象 response = httpClient.getresponse() result_all = response.read().decode(\"utf-8\") result = json.loads(result_all) print",
"json class BaiduTranslate: appid = '' # 填写你的appid secretKey = '' # 填写你的密钥",
"= 'auto' # 原文语种 toLang = 'zh' # 译文语种 salt = random.randint(32768, 65536)",
"+ q + str(salt) + self.secretKey sign = hashlib.md5(sign.encode()).hexdigest() myurl = myurl +",
"= httpClient.getresponse() result_all = response.read().decode(\"utf-8\") result = json.loads(result_all) print (result) return result['trans_result'][0]['dst'] except",
"= '' # 填写你的密钥 httpClient = None def __init__(self, appid, secretKey): self.appid =",
"# 填写你的密钥 httpClient = None def __init__(self, appid, secretKey): self.appid = appid self.secretKey",
"'zh' # 译文语种 salt = random.randint(32768, 65536) q = q sign = self.appid",
"(result) return result['trans_result'][0]['dst'] except Exception as e: print (e) finally: if httpClient: httpClient.close()",
"= '/api/trans/vip/translate' fromLang = 'auto' # 原文语种 toLang = 'zh' # 译文语种 salt",
"+ '&sign=' + sign try: httpClient = http.client.HTTPConnection('api.fanyi.baidu.com') httpClient.request('GET', myurl) # response是HTTPResponse对象 response",
"= random.randint(32768, 65536) q = q sign = self.appid + q + str(salt)",
"# 填写你的appid secretKey = '' # 填写你的密钥 httpClient = None def __init__(self, appid,",
"= self.appid + q + str(salt) + self.secretKey sign = hashlib.md5(sign.encode()).hexdigest() myurl =",
"fromLang + '&to=' + toLang + '&salt=' + str( salt) + '&sign=' +",
"译文语种 salt = random.randint(32768, 65536) q = q sign = self.appid + q",
"random import json class BaiduTranslate: appid = '' # 填写你的appid secretKey = ''",
"= 'zh' # 译文语种 salt = random.randint(32768, 65536) q = q sign =",
"def translate(self,q): myurl = '/api/trans/vip/translate' fromLang = 'auto' # 原文语种 toLang = 'zh'",
"result_all = response.read().decode(\"utf-8\") result = json.loads(result_all) print (result) return result['trans_result'][0]['dst'] except Exception as",
"'?appid=' + self.appid + '&q=' + urllib.parse.quote(q) + '&from=' + fromLang + '&to='",
"import random import json class BaiduTranslate: appid = '' # 填写你的appid secretKey =",
"'' # 填写你的appid secretKey = '' # 填写你的密钥 httpClient = None def __init__(self,",
"myurl + '?appid=' + self.appid + '&q=' + urllib.parse.quote(q) + '&from=' + fromLang",
"str( salt) + '&sign=' + sign try: httpClient = http.client.HTTPConnection('api.fanyi.baidu.com') httpClient.request('GET', myurl) #",
"sign = self.appid + q + str(salt) + self.secretKey sign = hashlib.md5(sign.encode()).hexdigest() myurl",
"+ self.secretKey sign = hashlib.md5(sign.encode()).hexdigest() myurl = myurl + '?appid=' + self.appid +",
"translate(self,q): myurl = '/api/trans/vip/translate' fromLang = 'auto' # 原文语种 toLang = 'zh' #",
"import json class BaiduTranslate: appid = '' # 填写你的appid secretKey = '' #",
"# 译文语种 salt = random.randint(32768, 65536) q = q sign = self.appid +",
"urllib.parse.quote(q) + '&from=' + fromLang + '&to=' + toLang + '&salt=' + str(",
"'&from=' + fromLang + '&to=' + toLang + '&salt=' + str( salt) +",
"fromLang = 'auto' # 原文语种 toLang = 'zh' # 译文语种 salt = random.randint(32768,",
"'/api/trans/vip/translate' fromLang = 'auto' # 原文语种 toLang = 'zh' # 译文语种 salt =",
"+ str(salt) + self.secretKey sign = hashlib.md5(sign.encode()).hexdigest() myurl = myurl + '?appid=' +",
"try: httpClient = http.client.HTTPConnection('api.fanyi.baidu.com') httpClient.request('GET', myurl) # response是HTTPResponse对象 response = httpClient.getresponse() result_all =",
"salt = random.randint(32768, 65536) q = q sign = self.appid + q +",
"BaiduTranslate: appid = '' # 填写你的appid secretKey = '' # 填写你的密钥 httpClient =",
"= myurl + '?appid=' + self.appid + '&q=' + urllib.parse.quote(q) + '&from=' +",
"+ '&q=' + urllib.parse.quote(q) + '&from=' + fromLang + '&to=' + toLang +",
"原文语种 toLang = 'zh' # 译文语种 salt = random.randint(32768, 65536) q = q",
"+ '?appid=' + self.appid + '&q=' + urllib.parse.quote(q) + '&from=' + fromLang +",
"+ str( salt) + '&sign=' + sign try: httpClient = http.client.HTTPConnection('api.fanyi.baidu.com') httpClient.request('GET', myurl)",
"None def __init__(self, appid, secretKey): self.appid = appid self.secretKey = secretKey def translate(self,q):",
"self.secretKey = secretKey def translate(self,q): myurl = '/api/trans/vip/translate' fromLang = 'auto' # 原文语种",
"q sign = self.appid + q + str(salt) + self.secretKey sign = hashlib.md5(sign.encode()).hexdigest()"
] |
[
"serializer_class: Type[QuoteSerializer] = QuoteSerializer class QuoteDetail(RetrieveUpdateDestroyAPIView): \"\"\"Responsible for retrieving a single quote from",
"Quote from .serializers import QuoteSerializer from .permissions import IsOwnerOrReadOnly class Quotes(ListCreateAPIView): \"\"\"Responsible for",
"quote from an application. Endpoint is `/api/<id>` ``GET``: retrieve a single quote ``PUT``:",
"retrieve all quotes ``POST``: creates a new quote \"\"\" queryset: List[Quote] = Quote.objects.all()",
"quotes from an application. Endpoint is `/api/` ``GET``: retrieve all quotes ``POST``: creates",
"import List, Type from rest_framework.generics import ( ListCreateAPIView, RetrieveUpdateDestroyAPIView, ) from app.models import",
"deletes a single quote \"\"\" permission_classes = (IsOwnerOrReadOnly,) queryset: List[Quote] = Quote.objects.all() serializer_class:",
"`/api/<id>` ``GET``: retrieve a single quote ``PUT``: updates a single quote ``DELETE``: deletes",
"QuoteDetail(RetrieveUpdateDestroyAPIView): \"\"\"Responsible for retrieving a single quote from an application. Endpoint is `/api/<id>`",
"typing import List, Type from rest_framework.generics import ( ListCreateAPIView, RetrieveUpdateDestroyAPIView, ) from app.models",
"queryset: List[Quote] = Quote.objects.all() serializer_class: Type[QuoteSerializer] = QuoteSerializer class QuoteDetail(RetrieveUpdateDestroyAPIView): \"\"\"Responsible for retrieving",
"a single quote ``DELETE``: deletes a single quote \"\"\" permission_classes = (IsOwnerOrReadOnly,) queryset:",
"for retrieving a single quote from an application. Endpoint is `/api/<id>` ``GET``: retrieve",
"single quote ``PUT``: updates a single quote ``DELETE``: deletes a single quote \"\"\"",
"an application. Endpoint is `/api/` ``GET``: retrieve all quotes ``POST``: creates a new",
"is `/api/` ``GET``: retrieve all quotes ``POST``: creates a new quote \"\"\" queryset:",
"Quote.objects.all() serializer_class: Type[QuoteSerializer] = QuoteSerializer class QuoteDetail(RetrieveUpdateDestroyAPIView): \"\"\"Responsible for retrieving a single quote",
"QuoteSerializer class QuoteDetail(RetrieveUpdateDestroyAPIView): \"\"\"Responsible for retrieving a single quote from an application. Endpoint",
"\"\"\"Module represents API for routes.\"\"\" from typing import List, Type from rest_framework.generics import",
"is `/api/<id>` ``GET``: retrieve a single quote ``PUT``: updates a single quote ``DELETE``:",
"a single quote \"\"\" permission_classes = (IsOwnerOrReadOnly,) queryset: List[Quote] = Quote.objects.all() serializer_class: Type[QuoteSerializer]",
"updates a single quote ``DELETE``: deletes a single quote \"\"\" permission_classes = (IsOwnerOrReadOnly,)",
"( ListCreateAPIView, RetrieveUpdateDestroyAPIView, ) from app.models import Quote from .serializers import QuoteSerializer from",
"``PUT``: updates a single quote ``DELETE``: deletes a single quote \"\"\" permission_classes =",
"a new quote \"\"\" queryset: List[Quote] = Quote.objects.all() serializer_class: Type[QuoteSerializer] = QuoteSerializer class",
"from .permissions import IsOwnerOrReadOnly class Quotes(ListCreateAPIView): \"\"\"Responsible for retrieving all quotes from an",
"<reponame>vyahello/quotes \"\"\"Module represents API for routes.\"\"\" from typing import List, Type from rest_framework.generics",
"single quote ``DELETE``: deletes a single quote \"\"\" permission_classes = (IsOwnerOrReadOnly,) queryset: List[Quote]",
".permissions import IsOwnerOrReadOnly class Quotes(ListCreateAPIView): \"\"\"Responsible for retrieving all quotes from an application.",
"from typing import List, Type from rest_framework.generics import ( ListCreateAPIView, RetrieveUpdateDestroyAPIView, ) from",
"Endpoint is `/api/<id>` ``GET``: retrieve a single quote ``PUT``: updates a single quote",
"from app.models import Quote from .serializers import QuoteSerializer from .permissions import IsOwnerOrReadOnly class",
"for routes.\"\"\" from typing import List, Type from rest_framework.generics import ( ListCreateAPIView, RetrieveUpdateDestroyAPIView,",
"API for routes.\"\"\" from typing import List, Type from rest_framework.generics import ( ListCreateAPIView,",
"for retrieving all quotes from an application. Endpoint is `/api/` ``GET``: retrieve all",
"= Quote.objects.all() serializer_class: Type[QuoteSerializer] = QuoteSerializer class QuoteDetail(RetrieveUpdateDestroyAPIView): \"\"\"Responsible for retrieving a single",
"``GET``: retrieve a single quote ``PUT``: updates a single quote ``DELETE``: deletes a",
"quote ``PUT``: updates a single quote ``DELETE``: deletes a single quote \"\"\" permission_classes",
"import IsOwnerOrReadOnly class Quotes(ListCreateAPIView): \"\"\"Responsible for retrieving all quotes from an application. Endpoint",
") from app.models import Quote from .serializers import QuoteSerializer from .permissions import IsOwnerOrReadOnly",
"RetrieveUpdateDestroyAPIView, ) from app.models import Quote from .serializers import QuoteSerializer from .permissions import",
"class Quotes(ListCreateAPIView): \"\"\"Responsible for retrieving all quotes from an application. Endpoint is `/api/`",
"a single quote ``PUT``: updates a single quote ``DELETE``: deletes a single quote",
"from rest_framework.generics import ( ListCreateAPIView, RetrieveUpdateDestroyAPIView, ) from app.models import Quote from .serializers",
"import QuoteSerializer from .permissions import IsOwnerOrReadOnly class Quotes(ListCreateAPIView): \"\"\"Responsible for retrieving all quotes",
"from an application. Endpoint is `/api/<id>` ``GET``: retrieve a single quote ``PUT``: updates",
"\"\"\" queryset: List[Quote] = Quote.objects.all() serializer_class: Type[QuoteSerializer] = QuoteSerializer class QuoteDetail(RetrieveUpdateDestroyAPIView): \"\"\"Responsible for",
"rest_framework.generics import ( ListCreateAPIView, RetrieveUpdateDestroyAPIView, ) from app.models import Quote from .serializers import",
"quote \"\"\" permission_classes = (IsOwnerOrReadOnly,) queryset: List[Quote] = Quote.objects.all() serializer_class: Type[QuoteSerializer] = QuoteSerializer",
"Type[QuoteSerializer] = QuoteSerializer class QuoteDetail(RetrieveUpdateDestroyAPIView): \"\"\"Responsible for retrieving a single quote from an",
"= QuoteSerializer class QuoteDetail(RetrieveUpdateDestroyAPIView): \"\"\"Responsible for retrieving a single quote from an application.",
"Quotes(ListCreateAPIView): \"\"\"Responsible for retrieving all quotes from an application. Endpoint is `/api/` ``GET``:",
"all quotes from an application. Endpoint is `/api/` ``GET``: retrieve all quotes ``POST``:",
"`/api/` ``GET``: retrieve all quotes ``POST``: creates a new quote \"\"\" queryset: List[Quote]",
"\"\"\"Responsible for retrieving a single quote from an application. Endpoint is `/api/<id>` ``GET``:",
"List[Quote] = Quote.objects.all() serializer_class: Type[QuoteSerializer] = QuoteSerializer class QuoteDetail(RetrieveUpdateDestroyAPIView): \"\"\"Responsible for retrieving a",
"quote ``DELETE``: deletes a single quote \"\"\" permission_classes = (IsOwnerOrReadOnly,) queryset: List[Quote] =",
"``DELETE``: deletes a single quote \"\"\" permission_classes = (IsOwnerOrReadOnly,) queryset: List[Quote] = Quote.objects.all()",
"creates a new quote \"\"\" queryset: List[Quote] = Quote.objects.all() serializer_class: Type[QuoteSerializer] = QuoteSerializer",
"new quote \"\"\" queryset: List[Quote] = Quote.objects.all() serializer_class: Type[QuoteSerializer] = QuoteSerializer class QuoteDetail(RetrieveUpdateDestroyAPIView):",
"\"\"\"Responsible for retrieving all quotes from an application. Endpoint is `/api/` ``GET``: retrieve",
"from .serializers import QuoteSerializer from .permissions import IsOwnerOrReadOnly class Quotes(ListCreateAPIView): \"\"\"Responsible for retrieving",
"retrieving a single quote from an application. Endpoint is `/api/<id>` ``GET``: retrieve a",
"retrieving all quotes from an application. Endpoint is `/api/` ``GET``: retrieve all quotes",
"an application. Endpoint is `/api/<id>` ``GET``: retrieve a single quote ``PUT``: updates a",
"ListCreateAPIView, RetrieveUpdateDestroyAPIView, ) from app.models import Quote from .serializers import QuoteSerializer from .permissions",
".serializers import QuoteSerializer from .permissions import IsOwnerOrReadOnly class Quotes(ListCreateAPIView): \"\"\"Responsible for retrieving all",
"import Quote from .serializers import QuoteSerializer from .permissions import IsOwnerOrReadOnly class Quotes(ListCreateAPIView): \"\"\"Responsible",
"Endpoint is `/api/` ``GET``: retrieve all quotes ``POST``: creates a new quote \"\"\"",
"quote \"\"\" queryset: List[Quote] = Quote.objects.all() serializer_class: Type[QuoteSerializer] = QuoteSerializer class QuoteDetail(RetrieveUpdateDestroyAPIView): \"\"\"Responsible",
"class QuoteDetail(RetrieveUpdateDestroyAPIView): \"\"\"Responsible for retrieving a single quote from an application. Endpoint is",
"all quotes ``POST``: creates a new quote \"\"\" queryset: List[Quote] = Quote.objects.all() serializer_class:",
"single quote \"\"\" permission_classes = (IsOwnerOrReadOnly,) queryset: List[Quote] = Quote.objects.all() serializer_class: Type[QuoteSerializer] =",
"application. Endpoint is `/api/` ``GET``: retrieve all quotes ``POST``: creates a new quote",
"List, Type from rest_framework.generics import ( ListCreateAPIView, RetrieveUpdateDestroyAPIView, ) from app.models import Quote",
"QuoteSerializer from .permissions import IsOwnerOrReadOnly class Quotes(ListCreateAPIView): \"\"\"Responsible for retrieving all quotes from",
"application. Endpoint is `/api/<id>` ``GET``: retrieve a single quote ``PUT``: updates a single",
"IsOwnerOrReadOnly class Quotes(ListCreateAPIView): \"\"\"Responsible for retrieving all quotes from an application. Endpoint is",
"``POST``: creates a new quote \"\"\" queryset: List[Quote] = Quote.objects.all() serializer_class: Type[QuoteSerializer] =",
"a single quote from an application. Endpoint is `/api/<id>` ``GET``: retrieve a single",
"retrieve a single quote ``PUT``: updates a single quote ``DELETE``: deletes a single",
"Type from rest_framework.generics import ( ListCreateAPIView, RetrieveUpdateDestroyAPIView, ) from app.models import Quote from",
"from an application. Endpoint is `/api/` ``GET``: retrieve all quotes ``POST``: creates a",
"single quote from an application. Endpoint is `/api/<id>` ``GET``: retrieve a single quote",
"import ( ListCreateAPIView, RetrieveUpdateDestroyAPIView, ) from app.models import Quote from .serializers import QuoteSerializer",
"routes.\"\"\" from typing import List, Type from rest_framework.generics import ( ListCreateAPIView, RetrieveUpdateDestroyAPIView, )",
"quotes ``POST``: creates a new quote \"\"\" queryset: List[Quote] = Quote.objects.all() serializer_class: Type[QuoteSerializer]",
"app.models import Quote from .serializers import QuoteSerializer from .permissions import IsOwnerOrReadOnly class Quotes(ListCreateAPIView):",
"``GET``: retrieve all quotes ``POST``: creates a new quote \"\"\" queryset: List[Quote] =",
"represents API for routes.\"\"\" from typing import List, Type from rest_framework.generics import ("
] |
[
"from exceptions import ( InsufficientMetadataError, MethodNotAllowedError, OperationFailedError, ConnectionError ) from utils import get_element,",
"retrieved.') self._contents_json = resp.json()['data'] return self._contents_json def publish(self): edit_uri = 'https://{0}/dvn/api/data-deposit/v1.1/swordv2/edit/dataverse/{1}'.format( self.connection.host, self.alias",
"refresh=False): if not refresh and self._contents_json: return self._contents_json content_uri = 'https://{0}/api/dataverses/{1}/contents'.format( self.connection.host, self.alias",
"publish(self): edit_uri = 'https://{0}/dvn/api/data-deposit/v1.1/swordv2/edit/dataverse/{1}'.format( self.connection.host, self.alias ) resp = requests.post( edit_uri, headers={'In-Progress': 'false'},",
"'DEACCESSIONED' def get_datasets(self): collection_info = requests.get( self.collection.get('href'), auth=self.connection.auth, ).content entries = get_elements(collection_info, tag='entry')",
"!= 200: raise ConnectionError('Atom entry could not be retrieved.') self._contents_json = resp.json()['data'] return",
"description.') if get_element(dataset._entry, 'creator', 'dcterms') is None: raise InsufficientMetadataError('This dataset must have an",
").text) def get_contents(self, refresh=False): if not refresh and self._contents_json: return self._contents_json content_uri =",
"a title.') if get_element(dataset._entry, 'description', 'dcterms') is None: raise InsufficientMetadataError('This dataset must have",
"entry could not be retrieved.') self._contents_json = resp.json()['data'] return self._contents_json def publish(self): edit_uri",
"if resp.status_code != 200: raise ConnectionError('Atom entry could not be retrieved.') self._contents_json =",
"'true' @property def alias(self): return self.collection.get('href').split('/')[-1] @property def title(self): return sanitize(get_element( self.collection, namespace='atom',",
"'dcterms') is None: raise InsufficientMetadataError('This dataset must have a description.') if get_element(dataset._entry, 'creator',",
"resp.status_code != 201: raise OperationFailedError('This dataset could not be added.') dataset.dataverse = self",
"self._contents_json content_uri = 'https://{0}/api/dataverses/{1}/contents'.format( self.connection.host, self.alias ) resp = requests.get( content_uri, params={'key': self.connection.token}",
"'DELETED' or dataset._state == 'DEACCESSIONED': return resp = requests.delete( dataset.edit_uri, auth=self.connection.auth, ) if",
"self.connection.token} ) if resp.status_code != 200: raise ConnectionError('Atom entry could not be retrieved.')",
"doi): return next((s for s in self.get_datasets() if s.doi == doi), None) def",
"def get_datasets(self): collection_info = requests.get( self.collection.get('href'), auth=self.connection.auth, ).content entries = get_elements(collection_info, tag='entry') return",
"auth=self.connection.auth, ) if resp.status_code != 200: raise OperationFailedError('The Dataverse could not be published.')",
"'creator', 'dcterms') is None: raise InsufficientMetadataError('This dataset must have an author.') resp =",
"have a title.') if get_element(dataset._entry, 'description', 'dcterms') is None: raise InsufficientMetadataError('This dataset must",
"MethodNotAllowedError, OperationFailedError, ConnectionError ) from utils import get_element, get_elements, sanitize class Dataverse(object): def",
"return next((s for s in self.get_datasets() if s.doi == doi), None) def get_dataset_by_title(self,",
"s.title == title), None) def get_dataset_by_string_in_entry(self, string): return next((s for s in self.get_datasets()",
"information, please refer to ' 'https://github.com/IQSS/dataverse/issues/778') dataset._state = 'DEACCESSIONED' def get_datasets(self): collection_info =",
"refresh and self._contents_json: return self._contents_json content_uri = 'https://{0}/api/dataverses/{1}/contents'.format( self.connection.host, self.alias ) resp =",
"GUI. For more information, please refer to ' 'https://github.com/IQSS/dataverse/issues/778') dataset._state = 'DEACCESSIONED' def",
"def alias(self): return self.collection.get('href').split('/')[-1] @property def title(self): return sanitize(get_element( self.collection, namespace='atom', tag='title', ).text)",
"__init__(self, connection, collection): self.connection = connection self.collection = collection self._contents_json = None @property",
"raise OperationFailedError('The Dataverse could not be published.') def add_dataset(self, dataset): if get_element(dataset._entry, 'title',",
"get_dataset_by_doi(self, doi): return next((s for s in self.get_datasets() if s.doi == doi), None)",
"is None: raise InsufficientMetadataError('This dataset must have a description.') if get_element(dataset._entry, 'creator', 'dcterms')",
") resp = requests.post( edit_uri, headers={'In-Progress': 'false'}, auth=self.connection.auth, ) if resp.status_code != 200:",
"status_tag.text return status.lower() == 'true' @property def alias(self): return self.collection.get('href').split('/')[-1] @property def title(self):",
"raise ConnectionError('Atom entry could not be retrieved.') self._contents_json = resp.json()['data'] return self._contents_json def",
"entries = get_elements(collection_info, tag='entry') return [Dataset.from_dataverse(entry, self) for entry in entries] def get_dataset_by_doi(self,",
"None: raise InsufficientMetadataError('This dataset must have a description.') if get_element(dataset._entry, 'creator', 'dcterms') is",
"in self.get_datasets() if s.title == title), None) def get_dataset_by_string_in_entry(self, string): return next((s for",
"get_element(dataset._entry, 'title', 'dcterms') is None: raise InsufficientMetadataError('This dataset must have a title.') if",
"dataset): if dataset._state == 'DELETED' or dataset._state == 'DEACCESSIONED': return resp = requests.delete(",
"requests.get( content_uri, params={'key': self.connection.token} ) if resp.status_code != 200: raise ConnectionError('Atom entry could",
"if get_element(dataset._entry, 'title', 'dcterms') is None: raise InsufficientMetadataError('This dataset must have a title.')",
"content_uri, params={'key': self.connection.token} ) if resp.status_code != 200: raise ConnectionError('Atom entry could not",
"'description', 'dcterms') is None: raise InsufficientMetadataError('This dataset must have a description.') if get_element(dataset._entry,",
"= connection self.collection = collection self._contents_json = None @property def is_published(self): collection_info =",
"requests.get( self.collection.get('href'), auth=self.connection.auth, ).content status_tag = get_element( collection_info, namespace=\"http://purl.org/net/sword/terms/state\", tag=\"dataverseHasBeenReleased\", ) status =",
"== 'DEACCESSIONED': return resp = requests.delete( dataset.edit_uri, auth=self.connection.auth, ) if resp.status_code == 405:",
"= None @property def is_published(self): collection_info = requests.get( self.collection.get('href'), auth=self.connection.auth, ).content status_tag =",
"= 'https://{0}/dvn/api/data-deposit/v1.1/swordv2/edit/dataverse/{1}'.format( self.connection.host, self.alias ) resp = requests.post( edit_uri, headers={'In-Progress': 'false'}, auth=self.connection.auth, )",
"published.') def add_dataset(self, dataset): if get_element(dataset._entry, 'title', 'dcterms') is None: raise InsufficientMetadataError('This dataset",
"return self._contents_json content_uri = 'https://{0}/api/dataverses/{1}/contents'.format( self.connection.host, self.alias ) resp = requests.get( content_uri, params={'key':",
"'https://{0}/dvn/api/data-deposit/v1.1/swordv2/edit/dataverse/{1}'.format( self.connection.host, self.alias ) resp = requests.post( edit_uri, headers={'In-Progress': 'false'}, auth=self.connection.auth, ) if",
"have a description.') if get_element(dataset._entry, 'creator', 'dcterms') is None: raise InsufficientMetadataError('This dataset must",
"resp.status_code != 200: raise ConnectionError('Atom entry could not be retrieved.') self._contents_json = resp.json()['data']",
"in self.get_datasets() if s.doi == doi), None) def get_dataset_by_title(self, title): return next((s for",
") if resp.status_code != 201: raise OperationFailedError('This dataset could not be added.') dataset.dataverse",
"= self dataset._refresh(receipt=resp.content) def delete_dataset(self, dataset): if dataset._state == 'DELETED' or dataset._state ==",
") if resp.status_code == 405: raise MethodNotAllowedError('Published datasets can only be ' 'deleted",
"collection_info = requests.get( self.collection.get('href'), auth=self.connection.auth, ).content status_tag = get_element( collection_info, namespace=\"http://purl.org/net/sword/terms/state\", tag=\"dataverseHasBeenReleased\", )",
"self) for entry in entries] def get_dataset_by_doi(self, doi): return next((s for s in",
"s in self.get_datasets() if s.doi == doi), None) def get_dataset_by_title(self, title): return next((s",
"== 'true' @property def alias(self): return self.collection.get('href').split('/')[-1] @property def title(self): return sanitize(get_element( self.collection,",
"dataset must have an author.') resp = requests.post( self.collection.get('href'), data=dataset.get_entry(), headers={'Content-type': 'application/atom+xml'}, auth=self.connection.auth,",
"not be retrieved.') self._contents_json = resp.json()['data'] return self._contents_json def publish(self): edit_uri = 'https://{0}/dvn/api/data-deposit/v1.1/swordv2/edit/dataverse/{1}'.format(",
"!= 201: raise OperationFailedError('This dataset could not be added.') dataset.dataverse = self dataset._refresh(receipt=resp.content)",
"self.connection.host, self.alias ) resp = requests.get( content_uri, params={'key': self.connection.token} ) if resp.status_code !=",
"= 'https://{0}/api/dataverses/{1}/contents'.format( self.connection.host, self.alias ) resp = requests.get( content_uri, params={'key': self.connection.token} ) if",
"def publish(self): edit_uri = 'https://{0}/dvn/api/data-deposit/v1.1/swordv2/edit/dataverse/{1}'.format( self.connection.host, self.alias ) resp = requests.post( edit_uri, headers={'In-Progress':",
"get_datasets(self): collection_info = requests.get( self.collection.get('href'), auth=self.connection.auth, ).content entries = get_elements(collection_info, tag='entry') return [Dataset.from_dataverse(entry,",
"dataset._state == 'DELETED' or dataset._state == 'DEACCESSIONED': return resp = requests.delete( dataset.edit_uri, auth=self.connection.auth,",
"for s in self.get_datasets() if s.doi == doi), None) def get_dataset_by_title(self, title): return",
"from dataset import Dataset from exceptions import ( InsufficientMetadataError, MethodNotAllowedError, OperationFailedError, ConnectionError )",
"'dcterms') is None: raise InsufficientMetadataError('This dataset must have an author.') resp = requests.post(",
"sanitize class Dataverse(object): def __init__(self, connection, collection): self.connection = connection self.collection = collection",
"OperationFailedError('The Dataverse could not be published.') def add_dataset(self, dataset): if get_element(dataset._entry, 'title', 'dcterms')",
"!= 200: raise OperationFailedError('The Dataverse could not be published.') def add_dataset(self, dataset): if",
"or dataset._state == 'DEACCESSIONED': return resp = requests.delete( dataset.edit_uri, auth=self.connection.auth, ) if resp.status_code",
"raise OperationFailedError('This dataset could not be added.') dataset.dataverse = self dataset._refresh(receipt=resp.content) def delete_dataset(self,",
"is None: raise InsufficientMetadataError('This dataset must have an author.') resp = requests.post( self.collection.get('href'),",
"None: raise InsufficientMetadataError('This dataset must have a title.') if get_element(dataset._entry, 'description', 'dcterms') is",
"have an author.') resp = requests.post( self.collection.get('href'), data=dataset.get_entry(), headers={'Content-type': 'application/atom+xml'}, auth=self.connection.auth, ) if",
"requests.post( edit_uri, headers={'In-Progress': 'false'}, auth=self.connection.auth, ) if resp.status_code != 200: raise OperationFailedError('The Dataverse",
"'application/atom+xml'}, auth=self.connection.auth, ) if resp.status_code != 201: raise OperationFailedError('This dataset could not be",
"def title(self): return sanitize(get_element( self.collection, namespace='atom', tag='title', ).text) def get_contents(self, refresh=False): if not",
"InsufficientMetadataError('This dataset must have a title.') if get_element(dataset._entry, 'description', 'dcterms') is None: raise",
"= status_tag.text return status.lower() == 'true' @property def alias(self): return self.collection.get('href').split('/')[-1] @property def",
"self.alias ) resp = requests.post( edit_uri, headers={'In-Progress': 'false'}, auth=self.connection.auth, ) if resp.status_code !=",
"MethodNotAllowedError('Published datasets can only be ' 'deleted from the GUI. For more information,",
"if get_element(dataset._entry, 'creator', 'dcterms') is None: raise InsufficientMetadataError('This dataset must have an author.')",
"if resp.status_code != 201: raise OperationFailedError('This dataset could not be added.') dataset.dataverse =",
"sanitize(get_element( self.collection, namespace='atom', tag='title', ).text) def get_contents(self, refresh=False): if not refresh and self._contents_json:",
"is_published(self): collection_info = requests.get( self.collection.get('href'), auth=self.connection.auth, ).content status_tag = get_element( collection_info, namespace=\"http://purl.org/net/sword/terms/state\", tag=\"dataverseHasBeenReleased\",",
"self._contents_json = resp.json()['data'] return self._contents_json def publish(self): edit_uri = 'https://{0}/dvn/api/data-deposit/v1.1/swordv2/edit/dataverse/{1}'.format( self.connection.host, self.alias )",
"self dataset._refresh(receipt=resp.content) def delete_dataset(self, dataset): if dataset._state == 'DELETED' or dataset._state == 'DEACCESSIONED':",
"= requests.get( self.collection.get('href'), auth=self.connection.auth, ).content entries = get_elements(collection_info, tag='entry') return [Dataset.from_dataverse(entry, self) for",
"def get_dataset_by_title(self, title): return next((s for s in self.get_datasets() if s.title == title),",
"for s in self.get_datasets() if s.title == title), None) def get_dataset_by_string_in_entry(self, string): return",
"@property def is_published(self): collection_info = requests.get( self.collection.get('href'), auth=self.connection.auth, ).content status_tag = get_element( collection_info,",
"resp = requests.delete( dataset.edit_uri, auth=self.connection.auth, ) if resp.status_code == 405: raise MethodNotAllowedError('Published datasets",
"self.get_datasets() if s.title == title), None) def get_dataset_by_string_in_entry(self, string): return next((s for s",
"self._contents_json = None @property def is_published(self): collection_info = requests.get( self.collection.get('href'), auth=self.connection.auth, ).content status_tag",
"resp = requests.get( content_uri, params={'key': self.connection.token} ) if resp.status_code != 200: raise ConnectionError('Atom",
"not be added.') dataset.dataverse = self dataset._refresh(receipt=resp.content) def delete_dataset(self, dataset): if dataset._state ==",
"def add_dataset(self, dataset): if get_element(dataset._entry, 'title', 'dcterms') is None: raise InsufficientMetadataError('This dataset must",
"if resp.status_code == 405: raise MethodNotAllowedError('Published datasets can only be ' 'deleted from",
"more information, please refer to ' 'https://github.com/IQSS/dataverse/issues/778') dataset._state = 'DEACCESSIONED' def get_datasets(self): collection_info",
"if s.title == title), None) def get_dataset_by_string_in_entry(self, string): return next((s for s in",
"def get_contents(self, refresh=False): if not refresh and self._contents_json: return self._contents_json content_uri = 'https://{0}/api/dataverses/{1}/contents'.format(",
"405: raise MethodNotAllowedError('Published datasets can only be ' 'deleted from the GUI. For",
"status_tag = get_element( collection_info, namespace=\"http://purl.org/net/sword/terms/state\", tag=\"dataverseHasBeenReleased\", ) status = status_tag.text return status.lower() ==",
"import get_element, get_elements, sanitize class Dataverse(object): def __init__(self, connection, collection): self.connection = connection",
"class Dataverse(object): def __init__(self, connection, collection): self.connection = connection self.collection = collection self._contents_json",
"return status.lower() == 'true' @property def alias(self): return self.collection.get('href').split('/')[-1] @property def title(self): return",
"auth=self.connection.auth, ) if resp.status_code != 201: raise OperationFailedError('This dataset could not be added.')",
"get_contents(self, refresh=False): if not refresh and self._contents_json: return self._contents_json content_uri = 'https://{0}/api/dataverses/{1}/contents'.format( self.connection.host,",
"author.') resp = requests.post( self.collection.get('href'), data=dataset.get_entry(), headers={'Content-type': 'application/atom+xml'}, auth=self.connection.auth, ) if resp.status_code !=",
"s in self.get_datasets() if s.title == title), None) def get_dataset_by_string_in_entry(self, string): return next((s",
"utils import get_element, get_elements, sanitize class Dataverse(object): def __init__(self, connection, collection): self.connection =",
"auth=self.connection.auth, ).content status_tag = get_element( collection_info, namespace=\"http://purl.org/net/sword/terms/state\", tag=\"dataverseHasBeenReleased\", ) status = status_tag.text return",
"please refer to ' 'https://github.com/IQSS/dataverse/issues/778') dataset._state = 'DEACCESSIONED' def get_datasets(self): collection_info = requests.get(",
"entry in entries] def get_dataset_by_doi(self, doi): return next((s for s in self.get_datasets() if",
"dataset could not be added.') dataset.dataverse = self dataset._refresh(receipt=resp.content) def delete_dataset(self, dataset): if",
"the GUI. For more information, please refer to ' 'https://github.com/IQSS/dataverse/issues/778') dataset._state = 'DEACCESSIONED'",
") if resp.status_code != 200: raise OperationFailedError('The Dataverse could not be published.') def",
"could not be published.') def add_dataset(self, dataset): if get_element(dataset._entry, 'title', 'dcterms') is None:",
"s.doi == doi), None) def get_dataset_by_title(self, title): return next((s for s in self.get_datasets()",
"dataset._state == 'DEACCESSIONED': return resp = requests.delete( dataset.edit_uri, auth=self.connection.auth, ) if resp.status_code ==",
"to ' 'https://github.com/IQSS/dataverse/issues/778') dataset._state = 'DEACCESSIONED' def get_datasets(self): collection_info = requests.get( self.collection.get('href'), auth=self.connection.auth,",
"'DEACCESSIONED': return resp = requests.delete( dataset.edit_uri, auth=self.connection.auth, ) if resp.status_code == 405: raise",
"must have a title.') if get_element(dataset._entry, 'description', 'dcterms') is None: raise InsufficientMetadataError('This dataset",
"is None: raise InsufficientMetadataError('This dataset must have a title.') if get_element(dataset._entry, 'description', 'dcterms')",
"from the GUI. For more information, please refer to ' 'https://github.com/IQSS/dataverse/issues/778') dataset._state =",
"self.collection.get('href'), auth=self.connection.auth, ).content entries = get_elements(collection_info, tag='entry') return [Dataset.from_dataverse(entry, self) for entry in",
"= get_elements(collection_info, tag='entry') return [Dataset.from_dataverse(entry, self) for entry in entries] def get_dataset_by_doi(self, doi):",
"'false'}, auth=self.connection.auth, ) if resp.status_code != 200: raise OperationFailedError('The Dataverse could not be",
"= requests.post( edit_uri, headers={'In-Progress': 'false'}, auth=self.connection.auth, ) if resp.status_code != 200: raise OperationFailedError('The",
"self.get_datasets() if s.doi == doi), None) def get_dataset_by_title(self, title): return next((s for s",
"dataset): if get_element(dataset._entry, 'title', 'dcterms') is None: raise InsufficientMetadataError('This dataset must have a",
"( InsufficientMetadataError, MethodNotAllowedError, OperationFailedError, ConnectionError ) from utils import get_element, get_elements, sanitize class",
"None: raise InsufficientMetadataError('This dataset must have an author.') resp = requests.post( self.collection.get('href'), data=dataset.get_entry(),",
"get_element, get_elements, sanitize class Dataverse(object): def __init__(self, connection, collection): self.connection = connection self.collection",
"must have an author.') resp = requests.post( self.collection.get('href'), data=dataset.get_entry(), headers={'Content-type': 'application/atom+xml'}, auth=self.connection.auth, )",
"dataset must have a title.') if get_element(dataset._entry, 'description', 'dcterms') is None: raise InsufficientMetadataError('This",
"dataset._state = 'DEACCESSIONED' def get_datasets(self): collection_info = requests.get( self.collection.get('href'), auth=self.connection.auth, ).content entries =",
"[Dataset.from_dataverse(entry, self) for entry in entries] def get_dataset_by_doi(self, doi): return next((s for s",
"get_dataset_by_string_in_entry(self, string): return next((s for s in self.get_datasets() if string in s.get_entry()), None)",
"= requests.delete( dataset.edit_uri, auth=self.connection.auth, ) if resp.status_code == 405: raise MethodNotAllowedError('Published datasets can",
"def get_dataset_by_string_in_entry(self, string): return next((s for s in self.get_datasets() if string in s.get_entry()),",
"if resp.status_code != 200: raise OperationFailedError('The Dataverse could not be published.') def add_dataset(self,",
"resp = requests.post( edit_uri, headers={'In-Progress': 'false'}, auth=self.connection.auth, ) if resp.status_code != 200: raise",
"from utils import get_element, get_elements, sanitize class Dataverse(object): def __init__(self, connection, collection): self.connection",
"self.collection.get('href').split('/')[-1] @property def title(self): return sanitize(get_element( self.collection, namespace='atom', tag='title', ).text) def get_contents(self, refresh=False):",
"get_element(dataset._entry, 'description', 'dcterms') is None: raise InsufficientMetadataError('This dataset must have a description.') if",
"resp.status_code != 200: raise OperationFailedError('The Dataverse could not be published.') def add_dataset(self, dataset):",
"raise InsufficientMetadataError('This dataset must have a description.') if get_element(dataset._entry, 'creator', 'dcterms') is None:",
"could not be retrieved.') self._contents_json = resp.json()['data'] return self._contents_json def publish(self): edit_uri =",
"resp = requests.post( self.collection.get('href'), data=dataset.get_entry(), headers={'Content-type': 'application/atom+xml'}, auth=self.connection.auth, ) if resp.status_code != 201:",
"collection_info = requests.get( self.collection.get('href'), auth=self.connection.auth, ).content entries = get_elements(collection_info, tag='entry') return [Dataset.from_dataverse(entry, self)",
"for entry in entries] def get_dataset_by_doi(self, doi): return next((s for s in self.get_datasets()",
"add_dataset(self, dataset): if get_element(dataset._entry, 'title', 'dcterms') is None: raise InsufficientMetadataError('This dataset must have",
") if resp.status_code != 200: raise ConnectionError('Atom entry could not be retrieved.') self._contents_json",
"= requests.get( content_uri, params={'key': self.connection.token} ) if resp.status_code != 200: raise ConnectionError('Atom entry",
"a description.') if get_element(dataset._entry, 'creator', 'dcterms') is None: raise InsufficientMetadataError('This dataset must have",
").content entries = get_elements(collection_info, tag='entry') return [Dataset.from_dataverse(entry, self) for entry in entries] def",
"next((s for s in self.get_datasets() if s.doi == doi), None) def get_dataset_by_title(self, title):",
"title(self): return sanitize(get_element( self.collection, namespace='atom', tag='title', ).text) def get_contents(self, refresh=False): if not refresh",
"edit_uri = 'https://{0}/dvn/api/data-deposit/v1.1/swordv2/edit/dataverse/{1}'.format( self.connection.host, self.alias ) resp = requests.post( edit_uri, headers={'In-Progress': 'false'}, auth=self.connection.auth,",
"201: raise OperationFailedError('This dataset could not be added.') dataset.dataverse = self dataset._refresh(receipt=resp.content) def",
"alias(self): return self.collection.get('href').split('/')[-1] @property def title(self): return sanitize(get_element( self.collection, namespace='atom', tag='title', ).text) def",
"dataset._refresh(receipt=resp.content) def delete_dataset(self, dataset): if dataset._state == 'DELETED' or dataset._state == 'DEACCESSIONED': return",
"return [Dataset.from_dataverse(entry, self) for entry in entries] def get_dataset_by_doi(self, doi): return next((s for",
"return next((s for s in self.get_datasets() if s.title == title), None) def get_dataset_by_string_in_entry(self,",
"not refresh and self._contents_json: return self._contents_json content_uri = 'https://{0}/api/dataverses/{1}/contents'.format( self.connection.host, self.alias ) resp",
"be retrieved.') self._contents_json = resp.json()['data'] return self._contents_json def publish(self): edit_uri = 'https://{0}/dvn/api/data-deposit/v1.1/swordv2/edit/dataverse/{1}'.format( self.connection.host,",
"dataset must have a description.') if get_element(dataset._entry, 'creator', 'dcterms') is None: raise InsufficientMetadataError('This",
"requests.delete( dataset.edit_uri, auth=self.connection.auth, ) if resp.status_code == 405: raise MethodNotAllowedError('Published datasets can only",
"if get_element(dataset._entry, 'description', 'dcterms') is None: raise InsufficientMetadataError('This dataset must have a description.')",
"Dataset from exceptions import ( InsufficientMetadataError, MethodNotAllowedError, OperationFailedError, ConnectionError ) from utils import",
") resp = requests.get( content_uri, params={'key': self.connection.token} ) if resp.status_code != 200: raise",
"dataset.edit_uri, auth=self.connection.auth, ) if resp.status_code == 405: raise MethodNotAllowedError('Published datasets can only be",
"exceptions import ( InsufficientMetadataError, MethodNotAllowedError, OperationFailedError, ConnectionError ) from utils import get_element, get_elements,",
"status = status_tag.text return status.lower() == 'true' @property def alias(self): return self.collection.get('href').split('/')[-1] @property",
"def delete_dataset(self, dataset): if dataset._state == 'DELETED' or dataset._state == 'DEACCESSIONED': return resp",
"' 'deleted from the GUI. For more information, please refer to ' 'https://github.com/IQSS/dataverse/issues/778')",
") from utils import get_element, get_elements, sanitize class Dataverse(object): def __init__(self, connection, collection):",
"if s.doi == doi), None) def get_dataset_by_title(self, title): return next((s for s in",
"raise InsufficientMetadataError('This dataset must have an author.') resp = requests.post( self.collection.get('href'), data=dataset.get_entry(), headers={'Content-type':",
"collection_info, namespace=\"http://purl.org/net/sword/terms/state\", tag=\"dataverseHasBeenReleased\", ) status = status_tag.text return status.lower() == 'true' @property def",
"tag='title', ).text) def get_contents(self, refresh=False): if not refresh and self._contents_json: return self._contents_json content_uri",
"get_element(dataset._entry, 'creator', 'dcterms') is None: raise InsufficientMetadataError('This dataset must have an author.') resp",
"'https://github.com/IQSS/dataverse/issues/778') dataset._state = 'DEACCESSIONED' def get_datasets(self): collection_info = requests.get( self.collection.get('href'), auth=self.connection.auth, ).content entries",
"== doi), None) def get_dataset_by_title(self, title): return next((s for s in self.get_datasets() if",
"an author.') resp = requests.post( self.collection.get('href'), data=dataset.get_entry(), headers={'Content-type': 'application/atom+xml'}, auth=self.connection.auth, ) if resp.status_code",
"title.') if get_element(dataset._entry, 'description', 'dcterms') is None: raise InsufficientMetadataError('This dataset must have a",
"== 'DELETED' or dataset._state == 'DEACCESSIONED': return resp = requests.delete( dataset.edit_uri, auth=self.connection.auth, )",
"def is_published(self): collection_info = requests.get( self.collection.get('href'), auth=self.connection.auth, ).content status_tag = get_element( collection_info, namespace=\"http://purl.org/net/sword/terms/state\",",
"return sanitize(get_element( self.collection, namespace='atom', tag='title', ).text) def get_contents(self, refresh=False): if not refresh and",
"if dataset._state == 'DELETED' or dataset._state == 'DEACCESSIONED': return resp = requests.delete( dataset.edit_uri,",
") status = status_tag.text return status.lower() == 'true' @property def alias(self): return self.collection.get('href').split('/')[-1]",
"self.connection = connection self.collection = collection self._contents_json = None @property def is_published(self): collection_info",
"InsufficientMetadataError, MethodNotAllowedError, OperationFailedError, ConnectionError ) from utils import get_element, get_elements, sanitize class Dataverse(object):",
"return resp = requests.delete( dataset.edit_uri, auth=self.connection.auth, ) if resp.status_code == 405: raise MethodNotAllowedError('Published",
"can only be ' 'deleted from the GUI. For more information, please refer",
"requests.post( self.collection.get('href'), data=dataset.get_entry(), headers={'Content-type': 'application/atom+xml'}, auth=self.connection.auth, ) if resp.status_code != 201: raise OperationFailedError('This",
"@property def title(self): return sanitize(get_element( self.collection, namespace='atom', tag='title', ).text) def get_contents(self, refresh=False): if",
"= collection self._contents_json = None @property def is_published(self): collection_info = requests.get( self.collection.get('href'), auth=self.connection.auth,",
"InsufficientMetadataError('This dataset must have a description.') if get_element(dataset._entry, 'creator', 'dcterms') is None: raise",
"must have a description.') if get_element(dataset._entry, 'creator', 'dcterms') is None: raise InsufficientMetadataError('This dataset",
"'https://{0}/api/dataverses/{1}/contents'.format( self.connection.host, self.alias ) resp = requests.get( content_uri, params={'key': self.connection.token} ) if resp.status_code",
"added.') dataset.dataverse = self dataset._refresh(receipt=resp.content) def delete_dataset(self, dataset): if dataset._state == 'DELETED' or",
"None @property def is_published(self): collection_info = requests.get( self.collection.get('href'), auth=self.connection.auth, ).content status_tag = get_element(",
"refer to ' 'https://github.com/IQSS/dataverse/issues/778') dataset._state = 'DEACCESSIONED' def get_datasets(self): collection_info = requests.get( self.collection.get('href'),",
"import Dataset from exceptions import ( InsufficientMetadataError, MethodNotAllowedError, OperationFailedError, ConnectionError ) from utils",
"self._contents_json: return self._contents_json content_uri = 'https://{0}/api/dataverses/{1}/contents'.format( self.connection.host, self.alias ) resp = requests.get( content_uri,",
"self._contents_json def publish(self): edit_uri = 'https://{0}/dvn/api/data-deposit/v1.1/swordv2/edit/dataverse/{1}'.format( self.connection.host, self.alias ) resp = requests.post( edit_uri,",
"return self._contents_json def publish(self): edit_uri = 'https://{0}/dvn/api/data-deposit/v1.1/swordv2/edit/dataverse/{1}'.format( self.connection.host, self.alias ) resp = requests.post(",
"ConnectionError ) from utils import get_element, get_elements, sanitize class Dataverse(object): def __init__(self, connection,",
"headers={'In-Progress': 'false'}, auth=self.connection.auth, ) if resp.status_code != 200: raise OperationFailedError('The Dataverse could not",
"= get_element( collection_info, namespace=\"http://purl.org/net/sword/terms/state\", tag=\"dataverseHasBeenReleased\", ) status = status_tag.text return status.lower() == 'true'",
"== 405: raise MethodNotAllowedError('Published datasets can only be ' 'deleted from the GUI.",
"'dcterms') is None: raise InsufficientMetadataError('This dataset must have a title.') if get_element(dataset._entry, 'description',",
"raise MethodNotAllowedError('Published datasets can only be ' 'deleted from the GUI. For more",
"get_element( collection_info, namespace=\"http://purl.org/net/sword/terms/state\", tag=\"dataverseHasBeenReleased\", ) status = status_tag.text return status.lower() == 'true' @property",
"requests from dataset import Dataset from exceptions import ( InsufficientMetadataError, MethodNotAllowedError, OperationFailedError, ConnectionError",
"content_uri = 'https://{0}/api/dataverses/{1}/contents'.format( self.connection.host, self.alias ) resp = requests.get( content_uri, params={'key': self.connection.token} )",
"200: raise OperationFailedError('The Dataverse could not be published.') def add_dataset(self, dataset): if get_element(dataset._entry,",
"Dataverse could not be published.') def add_dataset(self, dataset): if get_element(dataset._entry, 'title', 'dcterms') is",
"auth=self.connection.auth, ).content entries = get_elements(collection_info, tag='entry') return [Dataset.from_dataverse(entry, self) for entry in entries]",
"tag=\"dataverseHasBeenReleased\", ) status = status_tag.text return status.lower() == 'true' @property def alias(self): return",
"get_elements(collection_info, tag='entry') return [Dataset.from_dataverse(entry, self) for entry in entries] def get_dataset_by_doi(self, doi): return",
"only be ' 'deleted from the GUI. For more information, please refer to",
"def get_dataset_by_doi(self, doi): return next((s for s in self.get_datasets() if s.doi == doi),",
"None) def get_dataset_by_title(self, title): return next((s for s in self.get_datasets() if s.title ==",
"edit_uri, headers={'In-Progress': 'false'}, auth=self.connection.auth, ) if resp.status_code != 200: raise OperationFailedError('The Dataverse could",
"title): return next((s for s in self.get_datasets() if s.title == title), None) def",
"self.collection.get('href'), auth=self.connection.auth, ).content status_tag = get_element( collection_info, namespace=\"http://purl.org/net/sword/terms/state\", tag=\"dataverseHasBeenReleased\", ) status = status_tag.text",
"self.collection, namespace='atom', tag='title', ).text) def get_contents(self, refresh=False): if not refresh and self._contents_json: return",
"ConnectionError('Atom entry could not be retrieved.') self._contents_json = resp.json()['data'] return self._contents_json def publish(self):",
"status.lower() == 'true' @property def alias(self): return self.collection.get('href').split('/')[-1] @property def title(self): return sanitize(get_element(",
"get_elements, sanitize class Dataverse(object): def __init__(self, connection, collection): self.connection = connection self.collection =",
"auth=self.connection.auth, ) if resp.status_code == 405: raise MethodNotAllowedError('Published datasets can only be '",
"dataset import Dataset from exceptions import ( InsufficientMetadataError, MethodNotAllowedError, OperationFailedError, ConnectionError ) from",
"data=dataset.get_entry(), headers={'Content-type': 'application/atom+xml'}, auth=self.connection.auth, ) if resp.status_code != 201: raise OperationFailedError('This dataset could",
"= requests.post( self.collection.get('href'), data=dataset.get_entry(), headers={'Content-type': 'application/atom+xml'}, auth=self.connection.auth, ) if resp.status_code != 201: raise",
"requests.get( self.collection.get('href'), auth=self.connection.auth, ).content entries = get_elements(collection_info, tag='entry') return [Dataset.from_dataverse(entry, self) for entry",
"entries] def get_dataset_by_doi(self, doi): return next((s for s in self.get_datasets() if s.doi ==",
"in entries] def get_dataset_by_doi(self, doi): return next((s for s in self.get_datasets() if s.doi",
"= resp.json()['data'] return self._contents_json def publish(self): edit_uri = 'https://{0}/dvn/api/data-deposit/v1.1/swordv2/edit/dataverse/{1}'.format( self.connection.host, self.alias ) resp",
"' 'https://github.com/IQSS/dataverse/issues/778') dataset._state = 'DEACCESSIONED' def get_datasets(self): collection_info = requests.get( self.collection.get('href'), auth=self.connection.auth, ).content",
"== title), None) def get_dataset_by_string_in_entry(self, string): return next((s for s in self.get_datasets() if",
"be ' 'deleted from the GUI. For more information, please refer to '",
"OperationFailedError, ConnectionError ) from utils import get_element, get_elements, sanitize class Dataverse(object): def __init__(self,",
"get_dataset_by_title(self, title): return next((s for s in self.get_datasets() if s.title == title), None)",
"could not be added.') dataset.dataverse = self dataset._refresh(receipt=resp.content) def delete_dataset(self, dataset): if dataset._state",
"delete_dataset(self, dataset): if dataset._state == 'DELETED' or dataset._state == 'DEACCESSIONED': return resp =",
"be added.') dataset.dataverse = self dataset._refresh(receipt=resp.content) def delete_dataset(self, dataset): if dataset._state == 'DELETED'",
"@property def alias(self): return self.collection.get('href').split('/')[-1] @property def title(self): return sanitize(get_element( self.collection, namespace='atom', tag='title',",
"self.alias ) resp = requests.get( content_uri, params={'key': self.connection.token} ) if resp.status_code != 200:",
"connection self.collection = collection self._contents_json = None @property def is_published(self): collection_info = requests.get(",
").content status_tag = get_element( collection_info, namespace=\"http://purl.org/net/sword/terms/state\", tag=\"dataverseHasBeenReleased\", ) status = status_tag.text return status.lower()",
"params={'key': self.connection.token} ) if resp.status_code != 200: raise ConnectionError('Atom entry could not be",
"self.collection = collection self._contents_json = None @property def is_published(self): collection_info = requests.get( self.collection.get('href'),",
"be published.') def add_dataset(self, dataset): if get_element(dataset._entry, 'title', 'dcterms') is None: raise InsufficientMetadataError('This",
"headers={'Content-type': 'application/atom+xml'}, auth=self.connection.auth, ) if resp.status_code != 201: raise OperationFailedError('This dataset could not",
"doi), None) def get_dataset_by_title(self, title): return next((s for s in self.get_datasets() if s.title",
"not be published.') def add_dataset(self, dataset): if get_element(dataset._entry, 'title', 'dcterms') is None: raise",
"next((s for s in self.get_datasets() if s.title == title), None) def get_dataset_by_string_in_entry(self, string):",
"return self.collection.get('href').split('/')[-1] @property def title(self): return sanitize(get_element( self.collection, namespace='atom', tag='title', ).text) def get_contents(self,",
"Dataverse(object): def __init__(self, connection, collection): self.connection = connection self.collection = collection self._contents_json =",
"'deleted from the GUI. For more information, please refer to ' 'https://github.com/IQSS/dataverse/issues/778') dataset._state",
"namespace=\"http://purl.org/net/sword/terms/state\", tag=\"dataverseHasBeenReleased\", ) status = status_tag.text return status.lower() == 'true' @property def alias(self):",
"dataset.dataverse = self dataset._refresh(receipt=resp.content) def delete_dataset(self, dataset): if dataset._state == 'DELETED' or dataset._state",
"For more information, please refer to ' 'https://github.com/IQSS/dataverse/issues/778') dataset._state = 'DEACCESSIONED' def get_datasets(self):",
"import ( InsufficientMetadataError, MethodNotAllowedError, OperationFailedError, ConnectionError ) from utils import get_element, get_elements, sanitize",
"InsufficientMetadataError('This dataset must have an author.') resp = requests.post( self.collection.get('href'), data=dataset.get_entry(), headers={'Content-type': 'application/atom+xml'},",
"resp.status_code == 405: raise MethodNotAllowedError('Published datasets can only be ' 'deleted from the",
"collection self._contents_json = None @property def is_published(self): collection_info = requests.get( self.collection.get('href'), auth=self.connection.auth, ).content",
"None) def get_dataset_by_string_in_entry(self, string): return next((s for s in self.get_datasets() if string in",
"'title', 'dcterms') is None: raise InsufficientMetadataError('This dataset must have a title.') if get_element(dataset._entry,",
"= requests.get( self.collection.get('href'), auth=self.connection.auth, ).content status_tag = get_element( collection_info, namespace=\"http://purl.org/net/sword/terms/state\", tag=\"dataverseHasBeenReleased\", ) status",
"self.connection.host, self.alias ) resp = requests.post( edit_uri, headers={'In-Progress': 'false'}, auth=self.connection.auth, ) if resp.status_code",
"OperationFailedError('This dataset could not be added.') dataset.dataverse = self dataset._refresh(receipt=resp.content) def delete_dataset(self, dataset):",
"def __init__(self, connection, collection): self.connection = connection self.collection = collection self._contents_json = None",
"= 'DEACCESSIONED' def get_datasets(self): collection_info = requests.get( self.collection.get('href'), auth=self.connection.auth, ).content entries = get_elements(collection_info,",
"import requests from dataset import Dataset from exceptions import ( InsufficientMetadataError, MethodNotAllowedError, OperationFailedError,",
"raise InsufficientMetadataError('This dataset must have a title.') if get_element(dataset._entry, 'description', 'dcterms') is None:",
"title), None) def get_dataset_by_string_in_entry(self, string): return next((s for s in self.get_datasets() if string",
"connection, collection): self.connection = connection self.collection = collection self._contents_json = None @property def",
"and self._contents_json: return self._contents_json content_uri = 'https://{0}/api/dataverses/{1}/contents'.format( self.connection.host, self.alias ) resp = requests.get(",
"resp.json()['data'] return self._contents_json def publish(self): edit_uri = 'https://{0}/dvn/api/data-deposit/v1.1/swordv2/edit/dataverse/{1}'.format( self.connection.host, self.alias ) resp =",
"datasets can only be ' 'deleted from the GUI. For more information, please",
"self.collection.get('href'), data=dataset.get_entry(), headers={'Content-type': 'application/atom+xml'}, auth=self.connection.auth, ) if resp.status_code != 201: raise OperationFailedError('This dataset",
"tag='entry') return [Dataset.from_dataverse(entry, self) for entry in entries] def get_dataset_by_doi(self, doi): return next((s",
"200: raise ConnectionError('Atom entry could not be retrieved.') self._contents_json = resp.json()['data'] return self._contents_json",
"namespace='atom', tag='title', ).text) def get_contents(self, refresh=False): if not refresh and self._contents_json: return self._contents_json",
"collection): self.connection = connection self.collection = collection self._contents_json = None @property def is_published(self):",
"if not refresh and self._contents_json: return self._contents_json content_uri = 'https://{0}/api/dataverses/{1}/contents'.format( self.connection.host, self.alias )"
] |
[
"License is distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS",
"writing, software # distributed under the License is distributed on an \"AS IS\"",
"so p1 is not up to date await self.add_label(p2[\"number\"], \"merge\") await self.run_engine() ctxt",
"{ \"name\": \"merge\", \"conditions\": [f\"base={self.main_branch_name}\", \"label=merge\"], \"actions\": {\"merge\": {}, \"delete_head_branch\": {}}, }, ]",
"await self.wait_for(\"push\", {\"ref\": f\"refs/heads/{self.main_branch_name}\"}) await self.run_engine() commits = await self.get_commits(p2[\"number\"]) assert len(commits) ==",
"Unless required by applicable law or agreed to in writing, software # distributed",
"branch\") async def test_update_action_on_closed_pr_deleted_branch(self): rules = { \"pull_request_rules\": [ { \"name\": \"update\", \"conditions\":",
"self.wait_for(\"pull_request\", {\"action\": \"closed\"}) p1 = await self.get_pull(p1[\"number\"]) assert p1[\"merged\"] await self.wait_for(\"push\", {\"ref\": f\"refs/heads/{self.main_branch_name}\"})",
"Now merge p2 so p1 is not up to date await self.add_label(p2[\"number\"], \"merge\")",
"See the # License for the specific language governing permissions and limitations #",
"assert commits[-1][\"commit\"][\"author\"][\"name\"] == config.BOT_USER_LOGIN assert commits[-1][\"commit\"][\"message\"].startswith(\"Merge branch\") async def test_update_action_on_closed_pr_deleted_branch(self): rules = {",
"{ \"name\": \"update\", \"conditions\": [f\"base={self.main_branch_name}\"], \"actions\": {\"update\": {}}, }, { \"name\": \"merge\", \"conditions\":",
"rules = { \"pull_request_rules\": [ { \"name\": \"update\", \"conditions\": [f\"base={self.main_branch_name}\"], \"actions\": {\"update\": {}},",
"\"License\"); you may # not use this file except in compliance with the",
"commits = await self.get_commits(p2[\"number\"]) assert len(commits) == 2 assert commits[-1][\"commit\"][\"author\"][\"name\"] == config.BOT_USER_LOGIN assert",
"== config.BOT_USER_LOGIN assert commits[-1][\"commit\"][\"message\"].startswith(\"Merge branch\") async def test_update_action_on_closed_pr_deleted_branch(self): rules = { \"pull_request_rules\": [",
"from mergify_engine import context from mergify_engine.tests.functional import base class TestUpdateAction(base.FunctionalTestBase): async def test_update_action(self):",
"Apache License, Version 2.0 (the \"License\"); you may # not use this file",
"def test_update_action(self): rules = { \"pull_request_rules\": [ { \"name\": \"update\", \"conditions\": [f\"base={self.main_branch_name}\"], \"actions\":",
"the License. You may obtain # a copy of the License at #",
"self.run_engine() commits = await self.get_commits(p2[\"number\"]) assert len(commits) == 2 assert commits[-1][\"commit\"][\"author\"][\"name\"] == config.BOT_USER_LOGIN",
"\"merge\", \"conditions\": [f\"base={self.main_branch_name}\", \"label=merge\"], \"actions\": {\"merge\": {}}, }, ] } await self.setup_repo(yaml.dump(rules)) p1,",
"self.get_commits(p2[\"number\"]) assert len(commits) == 2 assert commits[-1][\"commit\"][\"author\"][\"name\"] == config.BOT_USER_LOGIN assert commits[-1][\"commit\"][\"message\"].startswith(\"Merge branch\") async",
"law or agreed to in writing, software # distributed under the License is",
"© 2018–2021 Mergify SAS # # Licensed under the Apache License, Version 2.0",
"may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #",
"the Apache License, Version 2.0 (the \"License\"); you may # not use this",
"== 1 await self.add_label(p1[\"number\"], \"merge\") await self.run_engine() await self.wait_for(\"pull_request\", {\"action\": \"closed\"}) p1 =",
"self.get_commits(p2[\"number\"]) assert len(commits) == 1 await self.add_label(p1[\"number\"], \"merge\") await self.run_engine() await self.wait_for(\"pull_request\", {\"action\":",
"[]) checks = await ctxt.pull_engine_check_runs for check in checks: assert check[\"conclusion\"] == \"success\",",
"ctxt = await context.Context.create(self.repository_ctxt, p1, []) checks = await ctxt.pull_engine_check_runs for check in",
"await self.run_engine() ctxt = await context.Context.create(self.repository_ctxt, p1, []) checks = await ctxt.pull_engine_check_runs for",
"express or implied. See the # License for the specific language governing permissions",
"config from mergify_engine import context from mergify_engine.tests.functional import base class TestUpdateAction(base.FunctionalTestBase): async def",
"\"delete_head_branch\": {}}, }, ] } await self.setup_repo(yaml.dump(rules)) p1, _ = await self.create_pr() p2,",
"= await self.get_commits(p2[\"number\"]) assert len(commits) == 1 await self.add_label(p1[\"number\"], \"merge\") await self.run_engine() p1",
"from mergify_engine import config from mergify_engine import context from mergify_engine.tests.functional import base class",
"p2, _ = await self.create_pr() commits = await self.get_commits(p2[\"number\"]) assert len(commits) == 1",
"\"update\", \"conditions\": [f\"base={self.main_branch_name}\"], \"actions\": {\"update\": {}}, }, { \"name\": \"merge\", \"conditions\": [f\"base={self.main_branch_name}\", \"label=merge\"],",
"an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either",
"# a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless",
"CONDITIONS OF ANY KIND, either express or implied. See the # License for",
"not use this file except in compliance with the License. You may obtain",
"async def test_update_action_on_closed_pr_deleted_branch(self): rules = { \"pull_request_rules\": [ { \"name\": \"update\", \"conditions\": [f\"base={self.main_branch_name}\"],",
"1 await self.add_label(p1[\"number\"], \"merge\") await self.run_engine() await self.wait_for(\"pull_request\", {\"action\": \"closed\"}) p1 = await",
"mergify_engine.tests.functional import base class TestUpdateAction(base.FunctionalTestBase): async def test_update_action(self): rules = { \"pull_request_rules\": [",
"of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable",
"with the License. You may obtain # a copy of the License at",
"_ = await self.create_pr() p2, _ = await self.create_pr() commits = await self.get_commits(p2[\"number\"])",
"for the specific language governing permissions and limitations # under the License. import",
"the specific language governing permissions and limitations # under the License. import yaml",
"{ \"name\": \"merge\", \"conditions\": [f\"base={self.main_branch_name}\", \"label=merge\"], \"actions\": {\"merge\": {}}, }, ] } await",
"await self.run_engine() p1 = await self.get_pull(p1[\"number\"]) assert p1[\"merged\"] await self.wait_for(\"push\", {\"ref\": f\"refs/heads/{self.main_branch_name}\"}) await",
"Licensed under the Apache License, Version 2.0 (the \"License\"); you may # not",
"await self.add_label(p1[\"number\"], \"merge\") await self.run_engine() await self.wait_for(\"pull_request\", {\"action\": \"closed\"}) p1 = await self.get_pull(p1[\"number\"])",
"] } await self.setup_repo(yaml.dump(rules)) p1, _ = await self.create_pr() p2, _ = await",
"assert len(commits) == 2 assert commits[-1][\"commit\"][\"author\"][\"name\"] == config.BOT_USER_LOGIN assert commits[-1][\"commit\"][\"message\"].startswith(\"Merge branch\") async def",
"License for the specific language governing permissions and limitations # under the License.",
"limitations # under the License. import yaml from mergify_engine import config from mergify_engine",
"test_update_action(self): rules = { \"pull_request_rules\": [ { \"name\": \"update\", \"conditions\": [f\"base={self.main_branch_name}\"], \"actions\": {\"update\":",
"assert len(commits) == 1 await self.add_label(p1[\"number\"], \"merge\") await self.run_engine() p1 = await self.get_pull(p1[\"number\"])",
"1 await self.add_label(p1[\"number\"], \"merge\") await self.run_engine() p1 = await self.get_pull(p1[\"number\"]) assert p1[\"merged\"] await",
"{\"merge\": {}}, }, ] } await self.setup_repo(yaml.dump(rules)) p1, _ = await self.create_pr() p2,",
"2.0 (the \"License\"); you may # not use this file except in compliance",
"License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or",
"}, { \"name\": \"merge\", \"conditions\": [f\"base={self.main_branch_name}\", \"label=merge\"], \"actions\": {\"merge\": {}}, }, ] }",
"context from mergify_engine.tests.functional import base class TestUpdateAction(base.FunctionalTestBase): async def test_update_action(self): rules = {",
"await self.create_pr() p2, _ = await self.create_pr() commits = await self.get_commits(p2[\"number\"]) assert len(commits)",
"await self.wait_for(\"pull_request\", {\"action\": \"closed\"}) p1 = await self.get_pull(p1[\"number\"]) assert p1[\"merged\"] await self.wait_for(\"push\", {\"ref\":",
"= await self.get_pull(p1[\"number\"]) assert p1[\"merged\"] await self.wait_for(\"push\", {\"ref\": f\"refs/heads/{self.main_branch_name}\"}) await self.run_engine() commits =",
"# under the License. import yaml from mergify_engine import config from mergify_engine import",
"at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed",
"len(commits) == 1 await self.add_label(p1[\"number\"], \"merge\") await self.run_engine() p1 = await self.get_pull(p1[\"number\"]) assert",
"await self.get_commits(p2[\"number\"]) assert len(commits) == 1 await self.add_label(p1[\"number\"], \"merge\") await self.run_engine() await self.wait_for(\"pull_request\",",
"use this file except in compliance with the License. You may obtain #",
"def test_update_action_on_closed_pr_deleted_branch(self): rules = { \"pull_request_rules\": [ { \"name\": \"update\", \"conditions\": [f\"base={self.main_branch_name}\"], \"actions\":",
"mergify_engine import context from mergify_engine.tests.functional import base class TestUpdateAction(base.FunctionalTestBase): async def test_update_action(self): rules",
"# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT",
"self.add_label(p1[\"number\"], \"merge\") await self.run_engine() await self.wait_for(\"pull_request\", {\"action\": \"closed\"}) p1 = await self.get_pull(p1[\"number\"]) assert",
"== config.BOT_USER_LOGIN assert commits[-1][\"commit\"][\"message\"].startswith(\"Merge branch\") # Now merge p2 so p1 is not",
"await context.Context.create(self.repository_ctxt, p1, []) checks = await ctxt.pull_engine_check_runs for check in checks: assert",
"config.BOT_USER_LOGIN assert commits[-1][\"commit\"][\"message\"].startswith(\"Merge branch\") # Now merge p2 so p1 is not up",
"WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the #",
"# Copyright © 2018–2021 Mergify SAS # # Licensed under the Apache License,",
"compliance with the License. You may obtain # a copy of the License",
"\"name\": \"merge\", \"conditions\": [f\"base={self.main_branch_name}\", \"label=merge\"], \"actions\": {\"merge\": {}, \"delete_head_branch\": {}}, }, ] }",
"License, Version 2.0 (the \"License\"); you may # not use this file except",
"\"label=merge\"], \"actions\": {\"merge\": {}, \"delete_head_branch\": {}}, }, ] } await self.setup_repo(yaml.dump(rules)) p1, _",
"context.Context.create(self.repository_ctxt, p1, []) checks = await ctxt.pull_engine_check_runs for check in checks: assert check[\"conclusion\"]",
"BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.",
"is distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF",
"-*- encoding: utf-8 -*- # # Copyright © 2018–2021 Mergify SAS # #",
"= await self.create_pr() p2, _ = await self.create_pr() commits = await self.get_commits(p2[\"number\"]) assert",
"assert len(commits) == 2 assert commits[-1][\"commit\"][\"author\"][\"name\"] == config.BOT_USER_LOGIN assert commits[-1][\"commit\"][\"message\"].startswith(\"Merge branch\") # Now",
"commits[-1][\"commit\"][\"message\"].startswith(\"Merge branch\") # Now merge p2 so p1 is not up to date",
"test_update_action_on_closed_pr_deleted_branch(self): rules = { \"pull_request_rules\": [ { \"name\": \"update\", \"conditions\": [f\"base={self.main_branch_name}\"], \"actions\": {\"update\":",
"self.run_engine() ctxt = await context.Context.create(self.repository_ctxt, p1, []) checks = await ctxt.pull_engine_check_runs for check",
"\"name\": \"merge\", \"conditions\": [f\"base={self.main_branch_name}\", \"label=merge\"], \"actions\": {\"merge\": {}}, }, ] } await self.setup_repo(yaml.dump(rules))",
"checks = await ctxt.pull_engine_check_runs for check in checks: assert check[\"conclusion\"] == \"success\", check",
"utf-8 -*- # # Copyright © 2018–2021 Mergify SAS # # Licensed under",
"await self.add_label(p1[\"number\"], \"merge\") await self.run_engine() p1 = await self.get_pull(p1[\"number\"]) assert p1[\"merged\"] await self.wait_for(\"push\",",
"2 assert commits[-1][\"commit\"][\"author\"][\"name\"] == config.BOT_USER_LOGIN assert commits[-1][\"commit\"][\"message\"].startswith(\"Merge branch\") # Now merge p2 so",
"IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or",
"implied. See the # License for the specific language governing permissions and limitations",
"a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required",
"OF ANY KIND, either express or implied. See the # License for the",
"\"actions\": {\"update\": {}}, }, { \"name\": \"merge\", \"conditions\": [f\"base={self.main_branch_name}\", \"label=merge\"], \"actions\": {\"merge\": {},",
"await self.run_engine() commits = await self.get_commits(p2[\"number\"]) assert len(commits) == 2 assert commits[-1][\"commit\"][\"author\"][\"name\"] ==",
"{}}, }, ] } await self.setup_repo(yaml.dump(rules)) p1, _ = await self.create_pr() p2, _",
"date await self.add_label(p2[\"number\"], \"merge\") await self.run_engine() ctxt = await context.Context.create(self.repository_ctxt, p1, []) checks",
"governing permissions and limitations # under the License. import yaml from mergify_engine import",
"assert commits[-1][\"commit\"][\"message\"].startswith(\"Merge branch\") # Now merge p2 so p1 is not up to",
"# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in",
"= await context.Context.create(self.repository_ctxt, p1, []) checks = await ctxt.pull_engine_check_runs for check in checks:",
"self.create_pr() commits = await self.get_commits(p2[\"number\"]) assert len(commits) == 1 await self.add_label(p1[\"number\"], \"merge\") await",
"from mergify_engine.tests.functional import base class TestUpdateAction(base.FunctionalTestBase): async def test_update_action(self): rules = { \"pull_request_rules\":",
"\"actions\": {\"merge\": {}, \"delete_head_branch\": {}}, }, ] } await self.setup_repo(yaml.dump(rules)) p1, _ =",
"# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the",
"async def test_update_action(self): rules = { \"pull_request_rules\": [ { \"name\": \"update\", \"conditions\": [f\"base={self.main_branch_name}\"],",
"assert commits[-1][\"commit\"][\"author\"][\"name\"] == config.BOT_USER_LOGIN assert commits[-1][\"commit\"][\"message\"].startswith(\"Merge branch\") # Now merge p2 so p1",
"self.wait_for(\"push\", {\"ref\": f\"refs/heads/{self.main_branch_name}\"}) await self.run_engine() commits = await self.get_commits(p2[\"number\"]) assert len(commits) == 2",
"you may # not use this file except in compliance with the License.",
"len(commits) == 2 assert commits[-1][\"commit\"][\"author\"][\"name\"] == config.BOT_USER_LOGIN assert commits[-1][\"commit\"][\"message\"].startswith(\"Merge branch\") async def test_update_action_on_closed_pr_deleted_branch(self):",
"commits[-1][\"commit\"][\"author\"][\"name\"] == config.BOT_USER_LOGIN assert commits[-1][\"commit\"][\"message\"].startswith(\"Merge branch\") async def test_update_action_on_closed_pr_deleted_branch(self): rules = { \"pull_request_rules\":",
"\"merge\", \"conditions\": [f\"base={self.main_branch_name}\", \"label=merge\"], \"actions\": {\"merge\": {}, \"delete_head_branch\": {}}, }, ] } await",
"agreed to in writing, software # distributed under the License is distributed on",
"the License. import yaml from mergify_engine import config from mergify_engine import context from",
"{}, \"delete_head_branch\": {}}, }, ] } await self.setup_repo(yaml.dump(rules)) p1, _ = await self.create_pr()",
"self.add_label(p2[\"number\"], \"merge\") await self.run_engine() ctxt = await context.Context.create(self.repository_ctxt, p1, []) checks = await",
"is not up to date await self.add_label(p2[\"number\"], \"merge\") await self.run_engine() ctxt = await",
"{\"action\": \"closed\"}) p1 = await self.get_pull(p1[\"number\"]) assert p1[\"merged\"] await self.wait_for(\"push\", {\"ref\": f\"refs/heads/{self.main_branch_name}\"}) await",
"self.add_label(p1[\"number\"], \"merge\") await self.run_engine() p1 = await self.get_pull(p1[\"number\"]) assert p1[\"merged\"] await self.wait_for(\"push\", {\"ref\":",
"(the \"License\"); you may # not use this file except in compliance with",
"[f\"base={self.main_branch_name}\", \"label=merge\"], \"actions\": {\"merge\": {}, \"delete_head_branch\": {}}, }, ] } await self.setup_repo(yaml.dump(rules)) p1,",
"{\"merge\": {}, \"delete_head_branch\": {}}, }, ] } await self.setup_repo(yaml.dump(rules)) p1, _ = await",
"may # not use this file except in compliance with the License. You",
"KIND, either express or implied. See the # License for the specific language",
"\"pull_request_rules\": [ { \"name\": \"update\", \"conditions\": [f\"base={self.main_branch_name}\"], \"actions\": {\"update\": {}}, }, { \"name\":",
"commits = await self.get_commits(p2[\"number\"]) assert len(commits) == 1 await self.add_label(p1[\"number\"], \"merge\") await self.run_engine()",
"== 1 await self.add_label(p1[\"number\"], \"merge\") await self.run_engine() p1 = await self.get_pull(p1[\"number\"]) assert p1[\"merged\"]",
"self.get_commits(p2[\"number\"]) assert len(commits) == 2 assert commits[-1][\"commit\"][\"author\"][\"name\"] == config.BOT_USER_LOGIN assert commits[-1][\"commit\"][\"message\"].startswith(\"Merge branch\") #",
"# -*- encoding: utf-8 -*- # # Copyright © 2018–2021 Mergify SAS #",
"either express or implied. See the # License for the specific language governing",
"\"closed\"}) p1 = await self.get_pull(p1[\"number\"]) assert p1[\"merged\"] await self.wait_for(\"push\", {\"ref\": f\"refs/heads/{self.main_branch_name}\"}) await self.run_engine()",
"# # Unless required by applicable law or agreed to in writing, software",
"file except in compliance with the License. You may obtain # a copy",
"self.create_pr() p2, _ = await self.create_pr() commits = await self.get_commits(p2[\"number\"]) assert len(commits) ==",
"this file except in compliance with the License. You may obtain # a",
"-*- # # Copyright © 2018–2021 Mergify SAS # # Licensed under the",
"# Unless required by applicable law or agreed to in writing, software #",
"import yaml from mergify_engine import config from mergify_engine import context from mergify_engine.tests.functional import",
"assert p1[\"merged\"] await self.wait_for(\"push\", {\"ref\": f\"refs/heads/{self.main_branch_name}\"}) await self.run_engine() commits = await self.get_commits(p2[\"number\"]) assert",
"by applicable law or agreed to in writing, software # distributed under the",
"\"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express",
"yaml from mergify_engine import config from mergify_engine import context from mergify_engine.tests.functional import base",
"self.run_engine() p1 = await self.get_pull(p1[\"number\"]) assert p1[\"merged\"] await self.wait_for(\"push\", {\"ref\": f\"refs/heads/{self.main_branch_name}\"}) await self.run_engine()",
"under the License is distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES",
"copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by",
"or implied. See the # License for the specific language governing permissions and",
"2 assert commits[-1][\"commit\"][\"author\"][\"name\"] == config.BOT_USER_LOGIN assert commits[-1][\"commit\"][\"message\"].startswith(\"Merge branch\") async def test_update_action_on_closed_pr_deleted_branch(self): rules =",
"# Now merge p2 so p1 is not up to date await self.add_label(p2[\"number\"],",
"under the License. import yaml from mergify_engine import config from mergify_engine import context",
"software # distributed under the License is distributed on an \"AS IS\" BASIS,",
"class TestUpdateAction(base.FunctionalTestBase): async def test_update_action(self): rules = { \"pull_request_rules\": [ { \"name\": \"update\",",
"# # Copyright © 2018–2021 Mergify SAS # # Licensed under the Apache",
"{\"update\": {}}, }, { \"name\": \"merge\", \"conditions\": [f\"base={self.main_branch_name}\", \"label=merge\"], \"actions\": {\"merge\": {}}, },",
"== 2 assert commits[-1][\"commit\"][\"author\"][\"name\"] == config.BOT_USER_LOGIN assert commits[-1][\"commit\"][\"message\"].startswith(\"Merge branch\") async def test_update_action_on_closed_pr_deleted_branch(self): rules",
"branch\") # Now merge p2 so p1 is not up to date await",
"License. import yaml from mergify_engine import config from mergify_engine import context from mergify_engine.tests.functional",
"len(commits) == 1 await self.add_label(p1[\"number\"], \"merge\") await self.run_engine() await self.wait_for(\"pull_request\", {\"action\": \"closed\"}) p1",
"assert commits[-1][\"commit\"][\"message\"].startswith(\"Merge branch\") async def test_update_action_on_closed_pr_deleted_branch(self): rules = { \"pull_request_rules\": [ { \"name\":",
"License. You may obtain # a copy of the License at # #",
"mergify_engine import config from mergify_engine import context from mergify_engine.tests.functional import base class TestUpdateAction(base.FunctionalTestBase):",
"# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to",
"the License is distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR",
"await self.get_commits(p2[\"number\"]) assert len(commits) == 1 await self.add_label(p1[\"number\"], \"merge\") await self.run_engine() p1 =",
"Copyright © 2018–2021 Mergify SAS # # Licensed under the Apache License, Version",
"\"merge\") await self.run_engine() ctxt = await context.Context.create(self.repository_ctxt, p1, []) checks = await ctxt.pull_engine_check_runs",
"distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY",
"Mergify SAS # # Licensed under the Apache License, Version 2.0 (the \"License\");",
"encoding: utf-8 -*- # # Copyright © 2018–2021 Mergify SAS # # Licensed",
"[f\"base={self.main_branch_name}\"], \"actions\": {\"update\": {}}, }, { \"name\": \"merge\", \"conditions\": [f\"base={self.main_branch_name}\", \"label=merge\"], \"actions\": {\"merge\":",
"= await self.get_commits(p2[\"number\"]) assert len(commits) == 1 await self.add_label(p1[\"number\"], \"merge\") await self.run_engine() await",
"{}}, }, { \"name\": \"merge\", \"conditions\": [f\"base={self.main_branch_name}\", \"label=merge\"], \"actions\": {\"merge\": {}, \"delete_head_branch\": {}},",
"\"conditions\": [f\"base={self.main_branch_name}\"], \"actions\": {\"update\": {}}, }, { \"name\": \"merge\", \"conditions\": [f\"base={self.main_branch_name}\", \"label=merge\"], \"actions\":",
"merge p2 so p1 is not up to date await self.add_label(p2[\"number\"], \"merge\") await",
"# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may",
"on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND,",
"{}}, }, { \"name\": \"merge\", \"conditions\": [f\"base={self.main_branch_name}\", \"label=merge\"], \"actions\": {\"merge\": {}}, }, ]",
"_ = await self.create_pr() commits = await self.get_commits(p2[\"number\"]) assert len(commits) == 1 await",
"self.get_pull(p1[\"number\"]) assert p1[\"merged\"] await self.wait_for(\"push\", {\"ref\": f\"refs/heads/{self.main_branch_name}\"}) await self.run_engine() commits = await self.get_commits(p2[\"number\"])",
"ANY KIND, either express or implied. See the # License for the specific",
"the # License for the specific language governing permissions and limitations # under",
"except in compliance with the License. You may obtain # a copy of",
"{\"ref\": f\"refs/heads/{self.main_branch_name}\"}) await self.run_engine() commits = await self.get_commits(p2[\"number\"]) assert len(commits) == 2 assert",
"the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law",
"{ \"pull_request_rules\": [ { \"name\": \"update\", \"conditions\": [f\"base={self.main_branch_name}\"], \"actions\": {\"update\": {}}, }, {",
"}, ] } await self.setup_repo(yaml.dump(rules)) p1, _ = await self.create_pr() p2, _ =",
"f\"refs/heads/{self.main_branch_name}\"}) await self.run_engine() commits = await self.get_commits(p2[\"number\"]) assert len(commits) == 2 assert commits[-1][\"commit\"][\"author\"][\"name\"]",
"up to date await self.add_label(p2[\"number\"], \"merge\") await self.run_engine() ctxt = await context.Context.create(self.repository_ctxt, p1,",
"specific language governing permissions and limitations # under the License. import yaml from",
"\"label=merge\"], \"actions\": {\"merge\": {}}, }, ] } await self.setup_repo(yaml.dump(rules)) p1, _ = await",
"to in writing, software # distributed under the License is distributed on an",
"p1[\"merged\"] await self.wait_for(\"push\", {\"ref\": f\"refs/heads/{self.main_branch_name}\"}) await self.run_engine() commits = await self.get_commits(p2[\"number\"]) assert len(commits)",
"You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0",
"\"merge\") await self.run_engine() p1 = await self.get_pull(p1[\"number\"]) assert p1[\"merged\"] await self.wait_for(\"push\", {\"ref\": f\"refs/heads/{self.main_branch_name}\"})",
"and limitations # under the License. import yaml from mergify_engine import config from",
"await self.add_label(p2[\"number\"], \"merge\") await self.run_engine() ctxt = await context.Context.create(self.repository_ctxt, p1, []) checks =",
"} await self.setup_repo(yaml.dump(rules)) p1, _ = await self.create_pr() p2, _ = await self.create_pr()",
"\"conditions\": [f\"base={self.main_branch_name}\", \"label=merge\"], \"actions\": {\"merge\": {}}, }, ] } await self.setup_repo(yaml.dump(rules)) p1, _",
"required by applicable law or agreed to in writing, software # distributed under",
"await self.get_commits(p2[\"number\"]) assert len(commits) == 2 assert commits[-1][\"commit\"][\"author\"][\"name\"] == config.BOT_USER_LOGIN assert commits[-1][\"commit\"][\"message\"].startswith(\"Merge branch\")",
"p1, _ = await self.create_pr() p2, _ = await self.create_pr() commits = await",
"\"conditions\": [f\"base={self.main_branch_name}\", \"label=merge\"], \"actions\": {\"merge\": {}, \"delete_head_branch\": {}}, }, ] } await self.setup_repo(yaml.dump(rules))",
"[f\"base={self.main_branch_name}\", \"label=merge\"], \"actions\": {\"merge\": {}}, }, ] } await self.setup_repo(yaml.dump(rules)) p1, _ =",
"applicable law or agreed to in writing, software # distributed under the License",
"p1 is not up to date await self.add_label(p2[\"number\"], \"merge\") await self.run_engine() ctxt =",
"distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT #",
"import context from mergify_engine.tests.functional import base class TestUpdateAction(base.FunctionalTestBase): async def test_update_action(self): rules =",
"OR CONDITIONS OF ANY KIND, either express or implied. See the # License",
"TestUpdateAction(base.FunctionalTestBase): async def test_update_action(self): rules = { \"pull_request_rules\": [ { \"name\": \"update\", \"conditions\":",
"assert len(commits) == 1 await self.add_label(p1[\"number\"], \"merge\") await self.run_engine() await self.wait_for(\"pull_request\", {\"action\": \"closed\"})",
"obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #",
"self.run_engine() await self.wait_for(\"pull_request\", {\"action\": \"closed\"}) p1 = await self.get_pull(p1[\"number\"]) assert p1[\"merged\"] await self.wait_for(\"push\",",
"= await self.create_pr() commits = await self.get_commits(p2[\"number\"]) assert len(commits) == 1 await self.add_label(p1[\"number\"],",
"{\"update\": {}}, }, { \"name\": \"merge\", \"conditions\": [f\"base={self.main_branch_name}\", \"label=merge\"], \"actions\": {\"merge\": {}, \"delete_head_branch\":",
"self.get_commits(p2[\"number\"]) assert len(commits) == 1 await self.add_label(p1[\"number\"], \"merge\") await self.run_engine() p1 = await",
"len(commits) == 2 assert commits[-1][\"commit\"][\"author\"][\"name\"] == config.BOT_USER_LOGIN assert commits[-1][\"commit\"][\"message\"].startswith(\"Merge branch\") # Now merge",
"== 2 assert commits[-1][\"commit\"][\"author\"][\"name\"] == config.BOT_USER_LOGIN assert commits[-1][\"commit\"][\"message\"].startswith(\"Merge branch\") # Now merge p2",
"to date await self.add_label(p2[\"number\"], \"merge\") await self.run_engine() ctxt = await context.Context.create(self.repository_ctxt, p1, [])",
"language governing permissions and limitations # under the License. import yaml from mergify_engine",
"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may #",
"in compliance with the License. You may obtain # a copy of the",
"# not use this file except in compliance with the License. You may",
"import config from mergify_engine import context from mergify_engine.tests.functional import base class TestUpdateAction(base.FunctionalTestBase): async",
"self.setup_repo(yaml.dump(rules)) p1, _ = await self.create_pr() p2, _ = await self.create_pr() commits =",
"\"actions\": {\"merge\": {}}, }, ] } await self.setup_repo(yaml.dump(rules)) p1, _ = await self.create_pr()",
"or agreed to in writing, software # distributed under the License is distributed",
"await self.get_pull(p1[\"number\"]) assert p1[\"merged\"] await self.wait_for(\"push\", {\"ref\": f\"refs/heads/{self.main_branch_name}\"}) await self.run_engine() commits = await",
"p2 so p1 is not up to date await self.add_label(p2[\"number\"], \"merge\") await self.run_engine()",
"not up to date await self.add_label(p2[\"number\"], \"merge\") await self.run_engine() ctxt = await context.Context.create(self.repository_ctxt,",
"import base class TestUpdateAction(base.FunctionalTestBase): async def test_update_action(self): rules = { \"pull_request_rules\": [ {",
"# License for the specific language governing permissions and limitations # under the",
"await self.run_engine() await self.wait_for(\"pull_request\", {\"action\": \"closed\"}) p1 = await self.get_pull(p1[\"number\"]) assert p1[\"merged\"] await",
"base class TestUpdateAction(base.FunctionalTestBase): async def test_update_action(self): rules = { \"pull_request_rules\": [ { \"name\":",
"[ { \"name\": \"update\", \"conditions\": [f\"base={self.main_branch_name}\"], \"actions\": {\"update\": {}}, }, { \"name\": \"merge\",",
"commits[-1][\"commit\"][\"message\"].startswith(\"Merge branch\") async def test_update_action_on_closed_pr_deleted_branch(self): rules = { \"pull_request_rules\": [ { \"name\": \"update\",",
"await self.create_pr() commits = await self.get_commits(p2[\"number\"]) assert len(commits) == 1 await self.add_label(p1[\"number\"], \"merge\")",
"under the Apache License, Version 2.0 (the \"License\"); you may # not use",
"\"name\": \"update\", \"conditions\": [f\"base={self.main_branch_name}\"], \"actions\": {\"update\": {}}, }, { \"name\": \"merge\", \"conditions\": [f\"base={self.main_branch_name}\",",
"commits[-1][\"commit\"][\"author\"][\"name\"] == config.BOT_USER_LOGIN assert commits[-1][\"commit\"][\"message\"].startswith(\"Merge branch\") # Now merge p2 so p1 is",
"WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See",
"\"merge\") await self.run_engine() await self.wait_for(\"pull_request\", {\"action\": \"closed\"}) p1 = await self.get_pull(p1[\"number\"]) assert p1[\"merged\"]",
"}, { \"name\": \"merge\", \"conditions\": [f\"base={self.main_branch_name}\", \"label=merge\"], \"actions\": {\"merge\": {}, \"delete_head_branch\": {}}, },",
"http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,",
"= { \"pull_request_rules\": [ { \"name\": \"update\", \"conditions\": [f\"base={self.main_branch_name}\"], \"actions\": {\"update\": {}}, },",
"config.BOT_USER_LOGIN assert commits[-1][\"commit\"][\"message\"].startswith(\"Merge branch\") async def test_update_action_on_closed_pr_deleted_branch(self): rules = { \"pull_request_rules\": [ {",
"in writing, software # distributed under the License is distributed on an \"AS",
"p1 = await self.get_pull(p1[\"number\"]) assert p1[\"merged\"] await self.wait_for(\"push\", {\"ref\": f\"refs/heads/{self.main_branch_name}\"}) await self.run_engine() commits",
"2018–2021 Mergify SAS # # Licensed under the Apache License, Version 2.0 (the",
"await self.setup_repo(yaml.dump(rules)) p1, _ = await self.create_pr() p2, _ = await self.create_pr() commits",
"Version 2.0 (the \"License\"); you may # not use this file except in",
"= await self.get_commits(p2[\"number\"]) assert len(commits) == 2 assert commits[-1][\"commit\"][\"author\"][\"name\"] == config.BOT_USER_LOGIN assert commits[-1][\"commit\"][\"message\"].startswith(\"Merge",
"p1, []) checks = await ctxt.pull_engine_check_runs for check in checks: assert check[\"conclusion\"] ==",
"SAS # # Licensed under the Apache License, Version 2.0 (the \"License\"); you",
"permissions and limitations # under the License. import yaml from mergify_engine import config",
"\"actions\": {\"update\": {}}, }, { \"name\": \"merge\", \"conditions\": [f\"base={self.main_branch_name}\", \"label=merge\"], \"actions\": {\"merge\": {}},"
] |
[
"in range(count): prev = head head = head.next if remain > 0: prev",
"head.next if remain > 0: prev = head head = head.next remain -=",
"break else: result[i] = head for j in range(count): prev = head head",
"> 0: prev = head head = head.next remain -= 1 prev.next =",
"range(k): if head is None: break else: result[i] = head for j in",
"= None for i in range(k): if head is None: break else: result[i]",
"head = head.next if remain > 0: prev = head head = head.next",
"remain = divmod(le, k) result = [None] * k head = root prev",
"in Parts.py # Definition for singly-linked list. class ListNode: def __init__(self, x): self.val",
"ListNode, k: int) -> List[ListNode]: le = 0 head = root while head:",
"= root prev = None for i in range(k): if head is None:",
"Solution: def splitListToParts(self, root: ListNode, k: int) -> List[ListNode]: le = 0 head",
"singly-linked list. class ListNode: def __init__(self, x): self.val = x self.next = None",
"result = [None] * k head = root prev = None for i",
"Linked List in Parts/0725-Split Linked List in Parts.py # Definition for singly-linked list.",
"= head for j in range(count): prev = head head = head.next if",
"in Parts/0725-Split Linked List in Parts.py # Definition for singly-linked list. class ListNode:",
"k) result = [None] * k head = root prev = None for",
"j in range(count): prev = head head = head.next if remain > 0:",
"None for i in range(k): if head is None: break else: result[i] =",
"le += 1 head = head.next count, remain = divmod(le, k) result =",
"class Solution: def splitListToParts(self, root: ListNode, k: int) -> List[ListNode]: le = 0",
"Parts.py # Definition for singly-linked list. class ListNode: def __init__(self, x): self.val =",
"+= 1 head = head.next count, remain = divmod(le, k) result = [None]",
"head is None: break else: result[i] = head for j in range(count): prev",
"count, remain = divmod(le, k) result = [None] * k head = root",
"<reponame>jiadaizhao/LeetCode<filename>0701-0800/0725-Split Linked List in Parts/0725-Split Linked List in Parts.py # Definition for singly-linked",
"while head: le += 1 head = head.next count, remain = divmod(le, k)",
"= head.next count, remain = divmod(le, k) result = [None] * k head",
"if remain > 0: prev = head head = head.next remain -= 1",
"head.next count, remain = divmod(le, k) result = [None] * k head =",
"result[i] = head for j in range(count): prev = head head = head.next",
"__init__(self, x): self.val = x self.next = None class Solution: def splitListToParts(self, root:",
"= 0 head = root while head: le += 1 head = head.next",
"class ListNode: def __init__(self, x): self.val = x self.next = None class Solution:",
"Parts/0725-Split Linked List in Parts.py # Definition for singly-linked list. class ListNode: def",
"in range(k): if head is None: break else: result[i] = head for j",
"x): self.val = x self.next = None class Solution: def splitListToParts(self, root: ListNode,",
"remain > 0: prev = head head = head.next remain -= 1 prev.next",
"else: result[i] = head for j in range(count): prev = head head =",
"* k head = root prev = None for i in range(k): if",
"self.next = None class Solution: def splitListToParts(self, root: ListNode, k: int) -> List[ListNode]:",
"for i in range(k): if head is None: break else: result[i] = head",
"divmod(le, k) result = [None] * k head = root prev = None",
"List in Parts/0725-Split Linked List in Parts.py # Definition for singly-linked list. class",
"Linked List in Parts.py # Definition for singly-linked list. class ListNode: def __init__(self,",
"i in range(k): if head is None: break else: result[i] = head for",
"k head = root prev = None for i in range(k): if head",
"None class Solution: def splitListToParts(self, root: ListNode, k: int) -> List[ListNode]: le =",
"= head head = head.next remain -= 1 prev.next = None return result",
"for j in range(count): prev = head head = head.next if remain >",
"def splitListToParts(self, root: ListNode, k: int) -> List[ListNode]: le = 0 head =",
"head = head.next count, remain = divmod(le, k) result = [None] * k",
"head for j in range(count): prev = head head = head.next if remain",
"k: int) -> List[ListNode]: le = 0 head = root while head: le",
"= root while head: le += 1 head = head.next count, remain =",
"= None class Solution: def splitListToParts(self, root: ListNode, k: int) -> List[ListNode]: le",
"range(count): prev = head head = head.next if remain > 0: prev =",
"-> List[ListNode]: le = 0 head = root while head: le += 1",
"root: ListNode, k: int) -> List[ListNode]: le = 0 head = root while",
"None: break else: result[i] = head for j in range(count): prev = head",
"self.val = x self.next = None class Solution: def splitListToParts(self, root: ListNode, k:",
"head: le += 1 head = head.next count, remain = divmod(le, k) result",
"[None] * k head = root prev = None for i in range(k):",
"head head = head.next if remain > 0: prev = head head =",
"is None: break else: result[i] = head for j in range(count): prev =",
"splitListToParts(self, root: ListNode, k: int) -> List[ListNode]: le = 0 head = root",
"= head.next if remain > 0: prev = head head = head.next remain",
"prev = None for i in range(k): if head is None: break else:",
"def __init__(self, x): self.val = x self.next = None class Solution: def splitListToParts(self,",
"x self.next = None class Solution: def splitListToParts(self, root: ListNode, k: int) ->",
"head = root while head: le += 1 head = head.next count, remain",
"head = root prev = None for i in range(k): if head is",
"= divmod(le, k) result = [None] * k head = root prev =",
"List[ListNode]: le = 0 head = root while head: le += 1 head",
"= head head = head.next if remain > 0: prev = head head",
"= x self.next = None class Solution: def splitListToParts(self, root: ListNode, k: int)",
"= [None] * k head = root prev = None for i in",
"le = 0 head = root while head: le += 1 head =",
"if head is None: break else: result[i] = head for j in range(count):",
"ListNode: def __init__(self, x): self.val = x self.next = None class Solution: def",
"for singly-linked list. class ListNode: def __init__(self, x): self.val = x self.next =",
"Definition for singly-linked list. class ListNode: def __init__(self, x): self.val = x self.next",
"root prev = None for i in range(k): if head is None: break",
"1 head = head.next count, remain = divmod(le, k) result = [None] *",
"list. class ListNode: def __init__(self, x): self.val = x self.next = None class",
"prev = head head = head.next if remain > 0: prev = head",
"0 head = root while head: le += 1 head = head.next count,",
"root while head: le += 1 head = head.next count, remain = divmod(le,",
"int) -> List[ListNode]: le = 0 head = root while head: le +=",
"List in Parts.py # Definition for singly-linked list. class ListNode: def __init__(self, x):",
"# Definition for singly-linked list. class ListNode: def __init__(self, x): self.val = x",
"0: prev = head head = head.next remain -= 1 prev.next = None",
"prev = head head = head.next remain -= 1 prev.next = None return"
] |
[
"removed. for inst in module.global_instructions.name_insts: remove_debug_if_dead(inst) for inst in module.global_instructions.op_string_insts: remove_debug_if_dead(inst) for inst",
"# Remove unused instructions in functions. for function in module.functions: process_function(module, function) #",
"return ID that is not used by any non-debug and non-decoration instruction, and",
"if it is not used.\"\"\" assert inst.op_name in ir.DEBUG_INSTRUCTIONS if inst.op_name == 'OpName':",
"on one function.\"\"\" # We need to re-run the pass if elimination of",
"makes # instructions dead in an already processed basic block. rerun = True",
"(operand.inst.op_name != 'OpLabel' and operand.inst.basic_block in processed_bbs and not operand.inst.uses()): rerun = True",
"This is done before the real pass because: # * They need some",
"to ensure constants used in OpLine # are removed. # Note: the debug",
"processed basic block. rerun = True while rerun: rerun = False processed_bbs =",
"processed_bbs.add(inst.basic_block) operands = inst.operands[:] inst.destroy() for operand in operands: if (operand.inst.op_name != 'OpLabel'",
"is an instruction having a return ID that is not used by any",
"non-decoration instruction, and does not have side effects.\"\"\" from spirv_tools import ir def",
"operand in operands: if (operand.inst.op_name != 'OpLabel' and operand.inst.basic_block in processed_bbs and not",
"= True break else: inst.destroy() def run(module): \"\"\"Remove all unused instructions.\"\"\" # Garbage",
"decoration instructions. # This is done before the real pass because: # *",
"rerun = False processed_bbs = set() for inst in function.instructions_reversed(): if inst.op_name ==",
"operand.inst.uses()): rerun = True break else: inst.destroy() def run(module): \"\"\"Remove all unused instructions.\"\"\"",
"not have inst.result_id # * They come in the wrong order with regard",
"of \"unused instruction\" is an instruction having a return ID that is not",
"# instructions dead in an already processed basic block. rerun = True while",
"ID that is not used by any non-debug and non-decoration instruction, and does",
"assert inst.op_name in ir.DEBUG_INSTRUCTIONS if inst.op_name == 'OpName': if inst.operands[0].inst is None: inst.destroy()",
"dead in an already processed basic block. rerun = True while rerun: rerun",
"function.instructions_reversed(): if inst.op_name == 'OpLabel': processed_bbs.add(inst.basic_block) if not inst.has_side_effects() and not inst.uses(): if",
"instructions that are live at the start # of this pass is handled",
"the instruction they # point to is removed. for inst in module.global_instructions.name_insts: remove_debug_if_dead(inst)",
"inst.op_name in ir.DECORATION_INSTRUCTIONS if inst.op_name != 'OpDecorationGroup': if inst.operands[0].inst is None: inst.destroy() def",
"real pass to ensure constants used in OpLine # are removed. # Note:",
"before the real pass because: # * They need some special handling, as",
"an already processed basic block. rerun = True while rerun: rerun = False",
"non-debug and non-decoration instruction, and does not have side effects.\"\"\" from spirv_tools import",
"any non-debug and non-decoration instruction, and does not have side effects.\"\"\" from spirv_tools",
"inst.op_name in ir.DEBUG_INSTRUCTIONS if inst.op_name == 'OpName': if inst.operands[0].inst is None: inst.destroy() def",
"inst.op_name == 'OpPhi': processed_bbs.add(inst.basic_block) operands = inst.operands[:] inst.destroy() for operand in operands: if",
"do not have inst.result_id # * They come in the wrong order with",
"decoration instruction if it is not used.\"\"\" assert inst.op_name in ir.DECORATION_INSTRUCTIONS if inst.op_name",
"pass to ensure constants used in OpLine # are removed. # Note: the",
"ensure constants used in OpLine # are removed. # Note: the debug and",
"# * They need some special handling, as they do not have inst.result_id",
"in functions. for function in module.functions: process_function(module, function) # Remove unused global instructions.",
"some special handling, as they do not have inst.result_id # * They come",
"function in module.functions: process_function(module, function) # Remove unused global instructions. for inst in",
"# We need to re-run the pass if elimination of a phi-node makes",
"extra code in the real pass to ensure constants used in OpLine #",
"operands: if (operand.inst.op_name != 'OpLabel' and operand.inst.basic_block in processed_bbs and not operand.inst.uses()): rerun",
"debug and decoration instructions. # This is done before the real pass because:",
"is None: inst.destroy() def remove_decoration_if_dead(inst): \"\"\"Remove decoration instruction if it is not used.\"\"\"",
"constants used in OpLine # are removed. # Note: the debug and decoration",
"\"\"\"Removes unused instructions. The definition of \"unused instruction\" is an instruction having a",
"function): \"\"\"Run the pass on one function.\"\"\" # We need to re-run the",
"Remove unused instructions in functions. for function in module.functions: process_function(module, function) # Remove",
"inst.op_name == 'OpLabel': processed_bbs.add(inst.basic_block) if not inst.has_side_effects() and not inst.uses(): if inst.op_name ==",
"the debug and decoration instructions that are live at the start # of",
"the wrong order with regard to constants, so we would # need extra",
"having a return ID that is not used by any non-debug and non-decoration",
"not have side effects.\"\"\" from spirv_tools import ir def remove_debug_if_dead(inst): \"\"\"Remove debug instruction",
"False processed_bbs = set() for inst in function.instructions_reversed(): if inst.op_name == 'OpLabel': processed_bbs.add(inst.basic_block)",
"operand.inst.basic_block in processed_bbs and not operand.inst.uses()): rerun = True break else: inst.destroy() def",
"to re-run the pass if elimination of a phi-node makes # instructions dead",
"* They come in the wrong order with regard to constants, so we",
"for inst in function.instructions_reversed(): if inst.op_name == 'OpLabel': processed_bbs.add(inst.basic_block) if not inst.has_side_effects() and",
"inst.operands[:] inst.destroy() for operand in operands: if (operand.inst.op_name != 'OpLabel' and operand.inst.basic_block in",
"remove_debug_if_dead(inst) for inst in module.global_instructions.op_string_insts: remove_debug_if_dead(inst) for inst in reversed(module.global_instructions.decoration_insts): remove_decoration_if_dead(inst) # Remove",
"processed_bbs and not operand.inst.uses()): rerun = True break else: inst.destroy() def run(module): \"\"\"Remove",
"all unused instructions.\"\"\" # Garbage collect old unused debug and decoration instructions. #",
"spirv_tools import ir def remove_debug_if_dead(inst): \"\"\"Remove debug instruction if it is not used.\"\"\"",
"ir.DECORATION_INSTRUCTIONS if inst.op_name != 'OpDecorationGroup': if inst.operands[0].inst is None: inst.destroy() def process_function(module, function):",
"in reversed(module.global_instructions.decoration_insts): remove_decoration_if_dead(inst) # Remove unused instructions in functions. for function in module.functions:",
"effects.\"\"\" from spirv_tools import ir def remove_debug_if_dead(inst): \"\"\"Remove debug instruction if it is",
"start # of this pass is handled by the real pass when the",
"\"\"\"Remove debug instruction if it is not used.\"\"\" assert inst.op_name in ir.DEBUG_INSTRUCTIONS if",
"real pass when the instruction they # point to is removed. for inst",
"inst.op_name == 'OpName': if inst.operands[0].inst is None: inst.destroy() def remove_decoration_if_dead(inst): \"\"\"Remove decoration instruction",
"process_function(module, function) # Remove unused global instructions. for inst in module.global_instructions.instructions_reversed(): if not",
"order with regard to constants, so we would # need extra code in",
"the pass on one function.\"\"\" # We need to re-run the pass if",
"= inst.operands[:] inst.destroy() for operand in operands: if (operand.inst.op_name != 'OpLabel' and operand.inst.basic_block",
"inst in reversed(module.global_instructions.decoration_insts): remove_decoration_if_dead(inst) # Remove unused instructions in functions. for function in",
"== 'OpLabel': processed_bbs.add(inst.basic_block) if not inst.has_side_effects() and not inst.uses(): if inst.op_name == 'OpPhi':",
"= True while rerun: rerun = False processed_bbs = set() for inst in",
"in the real pass to ensure constants used in OpLine # are removed.",
"wrong order with regard to constants, so we would # need extra code",
"decoration instructions that are live at the start # of this pass is",
"from spirv_tools import ir def remove_debug_if_dead(inst): \"\"\"Remove debug instruction if it is not",
"instructions dead in an already processed basic block. rerun = True while rerun:",
"instruction if it is not used.\"\"\" assert inst.op_name in ir.DECORATION_INSTRUCTIONS if inst.op_name !=",
"not used.\"\"\" assert inst.op_name in ir.DECORATION_INSTRUCTIONS if inst.op_name != 'OpDecorationGroup': if inst.operands[0].inst is",
"if inst.op_name != 'OpDecorationGroup': if inst.operands[0].inst is None: inst.destroy() def process_function(module, function): \"\"\"Run",
"the pass if elimination of a phi-node makes # instructions dead in an",
"of a phi-node makes # instructions dead in an already processed basic block.",
"need some special handling, as they do not have inst.result_id # * They",
"inst.destroy() def run(module): \"\"\"Remove all unused instructions.\"\"\" # Garbage collect old unused debug",
"reversed(module.global_instructions.decoration_insts): remove_decoration_if_dead(inst) # Remove unused instructions in functions. for function in module.functions: process_function(module,",
"that are live at the start # of this pass is handled by",
"of this pass is handled by the real pass when the instruction they",
"if inst.operands[0].inst is None: inst.destroy() def remove_decoration_if_dead(inst): \"\"\"Remove decoration instruction if it is",
"one function.\"\"\" # We need to re-run the pass if elimination of a",
"does not have side effects.\"\"\" from spirv_tools import ir def remove_debug_if_dead(inst): \"\"\"Remove debug",
"pass if elimination of a phi-node makes # instructions dead in an already",
"and decoration instructions that are live at the start # of this pass",
"to is removed. for inst in module.global_instructions.name_insts: remove_debug_if_dead(inst) for inst in module.global_instructions.op_string_insts: remove_debug_if_dead(inst)",
"in function.instructions_reversed(): if inst.op_name == 'OpLabel': processed_bbs.add(inst.basic_block) if not inst.has_side_effects() and not inst.uses():",
"They need some special handling, as they do not have inst.result_id # *",
"instruction, and does not have side effects.\"\"\" from spirv_tools import ir def remove_debug_if_dead(inst):",
"for inst in module.global_instructions.op_string_insts: remove_debug_if_dead(inst) for inst in reversed(module.global_instructions.decoration_insts): remove_decoration_if_dead(inst) # Remove unused",
"remove_decoration_if_dead(inst): \"\"\"Remove decoration instruction if it is not used.\"\"\" assert inst.op_name in ir.DECORATION_INSTRUCTIONS",
"inst.operands[0].inst is None: inst.destroy() def process_function(module, function): \"\"\"Run the pass on one function.\"\"\"",
"import ir def remove_debug_if_dead(inst): \"\"\"Remove debug instruction if it is not used.\"\"\" assert",
"it is not used.\"\"\" assert inst.op_name in ir.DEBUG_INSTRUCTIONS if inst.op_name == 'OpName': if",
"would # need extra code in the real pass to ensure constants used",
"if inst.op_name == 'OpName': if inst.operands[0].inst is None: inst.destroy() def remove_decoration_if_dead(inst): \"\"\"Remove decoration",
"= False processed_bbs = set() for inst in function.instructions_reversed(): if inst.op_name == 'OpLabel':",
"# This is done before the real pass because: # * They need",
"special handling, as they do not have inst.result_id # * They come in",
"the real pass because: # * They need some special handling, as they",
"Garbage collect old unused debug and decoration instructions. # This is done before",
"basic block. rerun = True while rerun: rerun = False processed_bbs = set()",
"if (operand.inst.op_name != 'OpLabel' and operand.inst.basic_block in processed_bbs and not operand.inst.uses()): rerun =",
"pass is handled by the real pass when the instruction they # point",
"not inst.uses(): if inst.op_name == 'OpPhi': processed_bbs.add(inst.basic_block) operands = inst.operands[:] inst.destroy() for operand",
"in module.functions: process_function(module, function) # Remove unused global instructions. for inst in module.global_instructions.instructions_reversed():",
"process_function(module, function): \"\"\"Run the pass on one function.\"\"\" # We need to re-run",
"== 'OpPhi': processed_bbs.add(inst.basic_block) operands = inst.operands[:] inst.destroy() for operand in operands: if (operand.inst.op_name",
"function.\"\"\" # We need to re-run the pass if elimination of a phi-node",
"!= 'OpDecorationGroup': if inst.operands[0].inst is None: inst.destroy() def process_function(module, function): \"\"\"Run the pass",
"already processed basic block. rerun = True while rerun: rerun = False processed_bbs",
"pass when the instruction they # point to is removed. for inst in",
"remove_decoration_if_dead(inst) # Remove unused instructions in functions. for function in module.functions: process_function(module, function)",
"that is not used by any non-debug and non-decoration instruction, and does not",
"with regard to constants, so we would # need extra code in the",
"<reponame>kristerw/spirv-tools \"\"\"Removes unused instructions. The definition of \"unused instruction\" is an instruction having",
"in an already processed basic block. rerun = True while rerun: rerun =",
"debug instruction if it is not used.\"\"\" assert inst.op_name in ir.DEBUG_INSTRUCTIONS if inst.op_name",
"is not used.\"\"\" assert inst.op_name in ir.DECORATION_INSTRUCTIONS if inst.op_name != 'OpDecorationGroup': if inst.operands[0].inst",
"for operand in operands: if (operand.inst.op_name != 'OpLabel' and operand.inst.basic_block in processed_bbs and",
"not used by any non-debug and non-decoration instruction, and does not have side",
"unused debug and decoration instructions. # This is done before the real pass",
"have side effects.\"\"\" from spirv_tools import ir def remove_debug_if_dead(inst): \"\"\"Remove debug instruction if",
"removed. # Note: the debug and decoration instructions that are live at the",
"live at the start # of this pass is handled by the real",
"They come in the wrong order with regard to constants, so we would",
"if it is not used.\"\"\" assert inst.op_name in ir.DECORATION_INSTRUCTIONS if inst.op_name != 'OpDecorationGroup':",
"assert inst.op_name in ir.DECORATION_INSTRUCTIONS if inst.op_name != 'OpDecorationGroup': if inst.operands[0].inst is None: inst.destroy()",
"pass because: # * They need some special handling, as they do not",
"is not used by any non-debug and non-decoration instruction, and does not have",
"remove_debug_if_dead(inst) for inst in reversed(module.global_instructions.decoration_insts): remove_decoration_if_dead(inst) # Remove unused instructions in functions. for",
"done before the real pass because: # * They need some special handling,",
"handled by the real pass when the instruction they # point to is",
"processed_bbs.add(inst.basic_block) if not inst.has_side_effects() and not inst.uses(): if inst.op_name == 'OpPhi': processed_bbs.add(inst.basic_block) operands",
"# point to is removed. for inst in module.global_instructions.name_insts: remove_debug_if_dead(inst) for inst in",
"inst in module.global_instructions.op_string_insts: remove_debug_if_dead(inst) for inst in reversed(module.global_instructions.decoration_insts): remove_decoration_if_dead(inst) # Remove unused instructions",
"instructions. The definition of \"unused instruction\" is an instruction having a return ID",
"# Garbage collect old unused debug and decoration instructions. # This is done",
"unused instructions.\"\"\" # Garbage collect old unused debug and decoration instructions. # This",
"# of this pass is handled by the real pass when the instruction",
"code in the real pass to ensure constants used in OpLine # are",
"in module.global_instructions.op_string_insts: remove_debug_if_dead(inst) for inst in reversed(module.global_instructions.decoration_insts): remove_decoration_if_dead(inst) # Remove unused instructions in",
"module.global_instructions.op_string_insts: remove_debug_if_dead(inst) for inst in reversed(module.global_instructions.decoration_insts): remove_decoration_if_dead(inst) # Remove unused instructions in functions.",
"function) # Remove unused global instructions. for inst in module.global_instructions.instructions_reversed(): if not inst.has_side_effects()",
"in ir.DECORATION_INSTRUCTIONS if inst.op_name != 'OpDecorationGroup': if inst.operands[0].inst is None: inst.destroy() def process_function(module,",
"if not inst.has_side_effects() and not inst.uses(): if inst.op_name == 'OpPhi': processed_bbs.add(inst.basic_block) operands =",
"'OpLabel': processed_bbs.add(inst.basic_block) if not inst.has_side_effects() and not inst.uses(): if inst.op_name == 'OpPhi': processed_bbs.add(inst.basic_block)",
"inst in function.instructions_reversed(): if inst.op_name == 'OpLabel': processed_bbs.add(inst.basic_block) if not inst.has_side_effects() and not",
"not operand.inst.uses()): rerun = True break else: inst.destroy() def run(module): \"\"\"Remove all unused",
"the start # of this pass is handled by the real pass when",
"Note: the debug and decoration instructions that are live at the start #",
"need extra code in the real pass to ensure constants used in OpLine",
"are live at the start # of this pass is handled by the",
"functions. for function in module.functions: process_function(module, function) # Remove unused global instructions. for",
"collect old unused debug and decoration instructions. # This is done before the",
"instruction\" is an instruction having a return ID that is not used by",
"used by any non-debug and non-decoration instruction, and does not have side effects.\"\"\"",
"def remove_decoration_if_dead(inst): \"\"\"Remove decoration instruction if it is not used.\"\"\" assert inst.op_name in",
"!= 'OpLabel' and operand.inst.basic_block in processed_bbs and not operand.inst.uses()): rerun = True break",
"'OpPhi': processed_bbs.add(inst.basic_block) operands = inst.operands[:] inst.destroy() for operand in operands: if (operand.inst.op_name !=",
"inst.destroy() for operand in operands: if (operand.inst.op_name != 'OpLabel' and operand.inst.basic_block in processed_bbs",
"operands = inst.operands[:] inst.destroy() for operand in operands: if (operand.inst.op_name != 'OpLabel' and",
"inst.has_side_effects() and not inst.uses(): if inst.op_name == 'OpPhi': processed_bbs.add(inst.basic_block) operands = inst.operands[:] inst.destroy()",
"else: inst.destroy() def run(module): \"\"\"Remove all unused instructions.\"\"\" # Garbage collect old unused",
"definition of \"unused instruction\" is an instruction having a return ID that is",
"== 'OpName': if inst.operands[0].inst is None: inst.destroy() def remove_decoration_if_dead(inst): \"\"\"Remove decoration instruction if",
"have inst.result_id # * They come in the wrong order with regard to",
"old unused debug and decoration instructions. # This is done before the real",
"is removed. for inst in module.global_instructions.name_insts: remove_debug_if_dead(inst) for inst in module.global_instructions.op_string_insts: remove_debug_if_dead(inst) for",
"it is not used.\"\"\" assert inst.op_name in ir.DECORATION_INSTRUCTIONS if inst.op_name != 'OpDecorationGroup': if",
"remove_debug_if_dead(inst): \"\"\"Remove debug instruction if it is not used.\"\"\" assert inst.op_name in ir.DEBUG_INSTRUCTIONS",
"set() for inst in function.instructions_reversed(): if inst.op_name == 'OpLabel': processed_bbs.add(inst.basic_block) if not inst.has_side_effects()",
"debug and decoration instructions that are live at the start # of this",
"inst.operands[0].inst is None: inst.destroy() def remove_decoration_if_dead(inst): \"\"\"Remove decoration instruction if it is not",
"run(module): \"\"\"Remove all unused instructions.\"\"\" # Garbage collect old unused debug and decoration",
"* They need some special handling, as they do not have inst.result_id #",
"and not operand.inst.uses()): rerun = True break else: inst.destroy() def run(module): \"\"\"Remove all",
"used in OpLine # are removed. # Note: the debug and decoration instructions",
"because: # * They need some special handling, as they do not have",
"in OpLine # are removed. # Note: the debug and decoration instructions that",
"# Note: the debug and decoration instructions that are live at the start",
"in the wrong order with regard to constants, so we would # need",
"in module.global_instructions.name_insts: remove_debug_if_dead(inst) for inst in module.global_instructions.op_string_insts: remove_debug_if_dead(inst) for inst in reversed(module.global_instructions.decoration_insts): remove_decoration_if_dead(inst)",
"is None: inst.destroy() def process_function(module, function): \"\"\"Run the pass on one function.\"\"\" #",
"constants, so we would # need extra code in the real pass to",
"for inst in reversed(module.global_instructions.decoration_insts): remove_decoration_if_dead(inst) # Remove unused instructions in functions. for function",
"\"\"\"Run the pass on one function.\"\"\" # We need to re-run the pass",
"'OpDecorationGroup': if inst.operands[0].inst is None: inst.destroy() def process_function(module, function): \"\"\"Run the pass on",
"we would # need extra code in the real pass to ensure constants",
"inst in module.global_instructions.name_insts: remove_debug_if_dead(inst) for inst in module.global_instructions.op_string_insts: remove_debug_if_dead(inst) for inst in reversed(module.global_instructions.decoration_insts):",
"instructions. # This is done before the real pass because: # * They",
"rerun: rerun = False processed_bbs = set() for inst in function.instructions_reversed(): if inst.op_name",
"unused instructions. The definition of \"unused instruction\" is an instruction having a return",
"used.\"\"\" assert inst.op_name in ir.DECORATION_INSTRUCTIONS if inst.op_name != 'OpDecorationGroup': if inst.operands[0].inst is None:",
"inst.uses(): if inst.op_name == 'OpPhi': processed_bbs.add(inst.basic_block) operands = inst.operands[:] inst.destroy() for operand in",
"'OpLabel' and operand.inst.basic_block in processed_bbs and not operand.inst.uses()): rerun = True break else:",
"True while rerun: rerun = False processed_bbs = set() for inst in function.instructions_reversed():",
"as they do not have inst.result_id # * They come in the wrong",
"to constants, so we would # need extra code in the real pass",
"is not used.\"\"\" assert inst.op_name in ir.DEBUG_INSTRUCTIONS if inst.op_name == 'OpName': if inst.operands[0].inst",
"if inst.op_name == 'OpLabel': processed_bbs.add(inst.basic_block) if not inst.has_side_effects() and not inst.uses(): if inst.op_name",
"while rerun: rerun = False processed_bbs = set() for inst in function.instructions_reversed(): if",
"break else: inst.destroy() def run(module): \"\"\"Remove all unused instructions.\"\"\" # Garbage collect old",
"instructions in functions. for function in module.functions: process_function(module, function) # Remove unused global",
"for function in module.functions: process_function(module, function) # Remove unused global instructions. for inst",
"is done before the real pass because: # * They need some special",
"rerun = True break else: inst.destroy() def run(module): \"\"\"Remove all unused instructions.\"\"\" #",
"real pass because: # * They need some special handling, as they do",
"if inst.operands[0].inst is None: inst.destroy() def process_function(module, function): \"\"\"Run the pass on one",
"\"unused instruction\" is an instruction having a return ID that is not used",
"\"\"\"Remove decoration instruction if it is not used.\"\"\" assert inst.op_name in ir.DECORATION_INSTRUCTIONS if",
"instruction if it is not used.\"\"\" assert inst.op_name in ir.DEBUG_INSTRUCTIONS if inst.op_name ==",
"rerun = True while rerun: rerun = False processed_bbs = set() for inst",
"and not inst.uses(): if inst.op_name == 'OpPhi': processed_bbs.add(inst.basic_block) operands = inst.operands[:] inst.destroy() for",
"the real pass when the instruction they # point to is removed. for",
"# need extra code in the real pass to ensure constants used in",
"at the start # of this pass is handled by the real pass",
"None: inst.destroy() def remove_decoration_if_dead(inst): \"\"\"Remove decoration instruction if it is not used.\"\"\" assert",
"the real pass to ensure constants used in OpLine # are removed. #",
"is handled by the real pass when the instruction they # point to",
"for inst in module.global_instructions.name_insts: remove_debug_if_dead(inst) for inst in module.global_instructions.op_string_insts: remove_debug_if_dead(inst) for inst in",
"and operand.inst.basic_block in processed_bbs and not operand.inst.uses()): rerun = True break else: inst.destroy()",
"not inst.has_side_effects() and not inst.uses(): if inst.op_name == 'OpPhi': processed_bbs.add(inst.basic_block) operands = inst.operands[:]",
"in operands: if (operand.inst.op_name != 'OpLabel' and operand.inst.basic_block in processed_bbs and not operand.inst.uses()):",
"if inst.op_name == 'OpPhi': processed_bbs.add(inst.basic_block) operands = inst.operands[:] inst.destroy() for operand in operands:",
"The definition of \"unused instruction\" is an instruction having a return ID that",
"ir.DEBUG_INSTRUCTIONS if inst.op_name == 'OpName': if inst.operands[0].inst is None: inst.destroy() def remove_decoration_if_dead(inst): \"\"\"Remove",
"ir def remove_debug_if_dead(inst): \"\"\"Remove debug instruction if it is not used.\"\"\" assert inst.op_name",
"def process_function(module, function): \"\"\"Run the pass on one function.\"\"\" # We need to",
"pass on one function.\"\"\" # We need to re-run the pass if elimination",
"None: inst.destroy() def process_function(module, function): \"\"\"Run the pass on one function.\"\"\" # We",
"processed_bbs = set() for inst in function.instructions_reversed(): if inst.op_name == 'OpLabel': processed_bbs.add(inst.basic_block) if",
"handling, as they do not have inst.result_id # * They come in the",
"module.global_instructions.name_insts: remove_debug_if_dead(inst) for inst in module.global_instructions.op_string_insts: remove_debug_if_dead(inst) for inst in reversed(module.global_instructions.decoration_insts): remove_decoration_if_dead(inst) #",
"True break else: inst.destroy() def run(module): \"\"\"Remove all unused instructions.\"\"\" # Garbage collect",
"a phi-node makes # instructions dead in an already processed basic block. rerun",
"inst.destroy() def process_function(module, function): \"\"\"Run the pass on one function.\"\"\" # We need",
"re-run the pass if elimination of a phi-node makes # instructions dead in",
"by any non-debug and non-decoration instruction, and does not have side effects.\"\"\" from",
"module.functions: process_function(module, function) # Remove unused global instructions. for inst in module.global_instructions.instructions_reversed(): if",
"in processed_bbs and not operand.inst.uses()): rerun = True break else: inst.destroy() def run(module):",
"a return ID that is not used by any non-debug and non-decoration instruction,",
"and decoration instructions. # This is done before the real pass because: #",
"inst.result_id # * They come in the wrong order with regard to constants,",
"phi-node makes # instructions dead in an already processed basic block. rerun =",
"Remove unused global instructions. for inst in module.global_instructions.instructions_reversed(): if not inst.has_side_effects() and not",
"global instructions. for inst in module.global_instructions.instructions_reversed(): if not inst.has_side_effects() and not inst.uses(): inst.destroy()",
"need to re-run the pass if elimination of a phi-node makes # instructions",
"are removed. # Note: the debug and decoration instructions that are live at",
"not used.\"\"\" assert inst.op_name in ir.DEBUG_INSTRUCTIONS if inst.op_name == 'OpName': if inst.operands[0].inst is",
"they # point to is removed. for inst in module.global_instructions.name_insts: remove_debug_if_dead(inst) for inst",
"in ir.DEBUG_INSTRUCTIONS if inst.op_name == 'OpName': if inst.operands[0].inst is None: inst.destroy() def remove_decoration_if_dead(inst):",
"OpLine # are removed. # Note: the debug and decoration instructions that are",
"inst.destroy() def remove_decoration_if_dead(inst): \"\"\"Remove decoration instruction if it is not used.\"\"\" assert inst.op_name",
"inst.op_name != 'OpDecorationGroup': if inst.operands[0].inst is None: inst.destroy() def process_function(module, function): \"\"\"Run the",
"come in the wrong order with regard to constants, so we would #",
"def remove_debug_if_dead(inst): \"\"\"Remove debug instruction if it is not used.\"\"\" assert inst.op_name in",
"# Remove unused global instructions. for inst in module.global_instructions.instructions_reversed(): if not inst.has_side_effects() and",
"instruction having a return ID that is not used by any non-debug and",
"regard to constants, so we would # need extra code in the real",
"an instruction having a return ID that is not used by any non-debug",
"point to is removed. for inst in module.global_instructions.name_insts: remove_debug_if_dead(inst) for inst in module.global_instructions.op_string_insts:",
"unused global instructions. for inst in module.global_instructions.instructions_reversed(): if not inst.has_side_effects() and not inst.uses():",
"so we would # need extra code in the real pass to ensure",
"when the instruction they # point to is removed. for inst in module.global_instructions.name_insts:",
"and does not have side effects.\"\"\" from spirv_tools import ir def remove_debug_if_dead(inst): \"\"\"Remove",
"def run(module): \"\"\"Remove all unused instructions.\"\"\" # Garbage collect old unused debug and",
"= set() for inst in function.instructions_reversed(): if inst.op_name == 'OpLabel': processed_bbs.add(inst.basic_block) if not",
"# are removed. # Note: the debug and decoration instructions that are live",
"and non-decoration instruction, and does not have side effects.\"\"\" from spirv_tools import ir",
"# * They come in the wrong order with regard to constants, so",
"'OpName': if inst.operands[0].inst is None: inst.destroy() def remove_decoration_if_dead(inst): \"\"\"Remove decoration instruction if it",
"by the real pass when the instruction they # point to is removed.",
"side effects.\"\"\" from spirv_tools import ir def remove_debug_if_dead(inst): \"\"\"Remove debug instruction if it",
"if elimination of a phi-node makes # instructions dead in an already processed",
"elimination of a phi-node makes # instructions dead in an already processed basic",
"this pass is handled by the real pass when the instruction they #",
"used.\"\"\" assert inst.op_name in ir.DEBUG_INSTRUCTIONS if inst.op_name == 'OpName': if inst.operands[0].inst is None:",
"We need to re-run the pass if elimination of a phi-node makes #",
"instructions.\"\"\" # Garbage collect old unused debug and decoration instructions. # This is",
"instruction they # point to is removed. for inst in module.global_instructions.name_insts: remove_debug_if_dead(inst) for",
"block. rerun = True while rerun: rerun = False processed_bbs = set() for",
"they do not have inst.result_id # * They come in the wrong order",
"\"\"\"Remove all unused instructions.\"\"\" # Garbage collect old unused debug and decoration instructions.",
"unused instructions in functions. for function in module.functions: process_function(module, function) # Remove unused"
] |
[
"token looks like this. result = None # First, the code looks up",
": 'Bearer ' + result['access_token'], 'Accept' : 'application/json', 'Content-Type' : 'application/json' } #",
"have a site try: our_site = None for a_site in siteq['value']: if a_site['name']",
"import requests import json config = { \"authority\": \"https://login.microsoftonline.com/9f4083b0-0ac6-4dee-b0bb-b78b1436f9f3\", \"client_id\": \"d584a43a-c4c1-4fbe-9c1c-3cae87420e6e\", \"scope\": [",
"acquire a token looks like this. result = None # First, the code",
"'another test field' } } payload = json.dumps(new_item) new_item_ep = '{}/sites/{}/lists/{}/items'.format(endpoint_root, our_site['id'], the_list['id'])",
"json.dumps(new_item) new_item_ep = '{}/sites/{}/lists/{}/items'.format(endpoint_root, our_site['id'], the_list['id']) make_new_item = requests.post(new_item_ep, headers=http_headers, data=payload, stream=False).json() except",
"for the current app, not for a user, # use None for the",
"headers=http_headers, stream=False).json() an_item_ep = '{}/sites/{}/lists/{}/items/1'.format(endpoint_root, our_site['id'], the_list['id']) an_item = requests.get(an_item_ep, headers=http_headers, stream=False).json() new_item",
"may not have a site try: our_site = None for a_site in siteq['value']:",
"field' } } payload = json.dumps(new_item) new_item_ep = '{}/sites/{}/lists/{}/items'.format(endpoint_root, our_site['id'], the_list['id']) make_new_item =",
"list_ep = '{}/sites/{}/lists/{}'.format(endpoint_root, our_site['id'], 'Test%20List') the_list = requests.get(list_ep, headers=http_headers, stream=False).json() listitem_ep = '{}/sites/{}/lists/{}/items'.format(endpoint_root,",
"a protected API with the access token. endpoint_root = 'https://graph.microsoft.com/v1.0' http_headers = {",
": 'application/json' } # Look for our site siteName='MFPersonal' endpoint = '{}/sites?search={}'.format(endpoint_root, siteName)",
"the code looks up a token from the cache. # Because we're looking",
"access token. endpoint_root = 'https://graph.microsoft.com/v1.0' http_headers = { 'Authorization' : 'Bearer ' +",
"if a_site['name'] == siteName: our_site = a_site break list_ep = '{}/sites/{}/lists/{}'.format(endpoint_root, our_site['id'], 'Test%20List')",
"up a token from the cache. # Because we're looking for a token",
"'https://graph.microsoft.com/v1.0' http_headers = { 'Authorization' : 'Bearer ' + result['access_token'], 'Accept' : 'application/json',",
"Call a protected API with the access token. endpoint_root = 'https://graph.microsoft.com/v1.0' http_headers =",
"# use None for the account parameter. result = app.acquire_token_silent(config[\"scope\"], account=None) if not",
"we're looking for a token for the current app, not for a user,",
"= '{}/sites/{}/lists/{}'.format(endpoint_root, our_site['id'], 'Test%20List') the_list = requests.get(list_ep, headers=http_headers, stream=False).json() listitem_ep = '{}/sites/{}/lists/{}/items'.format(endpoint_root, our_site['id'],",
"\"access_token\" in result: # Call a protected API with the access token. endpoint_root",
"None # First, the code looks up a token from the cache. #",
"= '{}/sites/{}/lists/{}/items/1'.format(endpoint_root, our_site['id'], the_list['id']) an_item = requests.get(an_item_ep, headers=http_headers, stream=False).json() new_item = { 'fields':",
"result = app.acquire_token_silent(config[\"scope\"], account=None) if not result: logging.info(\"No suitable token exists in cache.",
"in siteq['value']: if a_site['name'] == siteName: our_site = a_site break list_ep = '{}/sites/{}/lists/{}'.format(endpoint_root,",
"' + result['access_token'], 'Accept' : 'application/json', 'Content-Type' : 'application/json' } # Look for",
"Because we're looking for a token for the current app, not for a",
"stream=False).json() listitem_ep = '{}/sites/{}/lists/{}/items'.format(endpoint_root, our_site['id'], the_list['id']) the_items = requests.get(listitem_ep, headers=http_headers, stream=False).json() an_item_ep =",
"the_items = requests.get(listitem_ep, headers=http_headers, stream=False).json() an_item_ep = '{}/sites/{}/lists/{}/items/1'.format(endpoint_root, our_site['id'], the_list['id']) an_item = requests.get(an_item_ep,",
"a token for the current app, not for a user, # use None",
"requests.post(new_item_ep, headers=http_headers, data=payload, stream=False).json() except Error as e: print(str(e)) print(result[\"token_type\"]) else: print(result.get(\"error\")) print(result.get(\"error_description\"))",
"AAD.\") result = app.acquire_token_for_client(scopes=config[\"scope\"]) if \"access_token\" in result: # Call a protected API",
"First, the code looks up a token from the cache. # Because we're",
"a token from the cache. # Because we're looking for a token for",
"'Authorization' : 'Bearer ' + result['access_token'], 'Accept' : 'application/json', 'Content-Type' : 'application/json' }",
"one from AAD.\") result = app.acquire_token_for_client(scopes=config[\"scope\"]) if \"access_token\" in result: # Call a",
"'application/json', 'Content-Type' : 'application/json' } # Look for our site siteName='MFPersonal' endpoint =",
"= '{}/sites/{}/lists/{}/items'.format(endpoint_root, our_site['id'], the_list['id']) the_items = requests.get(listitem_ep, headers=http_headers, stream=False).json() an_item_ep = '{}/sites/{}/lists/{}/items/1'.format(endpoint_root, our_site['id'],",
"http_headers = { 'Authorization' : 'Bearer ' + result['access_token'], 'Accept' : 'application/json', 'Content-Type'",
"for a user, # use None for the account parameter. result = app.acquire_token_silent(config[\"scope\"],",
"requests.get(an_item_ep, headers=http_headers, stream=False).json() new_item = { 'fields': { 'Title' : 'Another item', 'testfield'",
"long-lived app instance that maintains a token cache. app = msal.ConfidentialClientApplication( config[\"client_id\"], authority=config[\"authority\"],",
"= { 'fields': { 'Title' : 'Another item', 'testfield' : 'another test field'",
"# The pattern to acquire a token looks like this. result = None",
"for a token for the current app, not for a user, # use",
"our_site['id'], the_list['id']) an_item = requests.get(an_item_ep, headers=http_headers, stream=False).json() new_item = { 'fields': { 'Title'",
"not have a site try: our_site = None for a_site in siteq['value']: if",
"= requests.get(listitem_ep, headers=http_headers, stream=False).json() an_item_ep = '{}/sites/{}/lists/{}/items/1'.format(endpoint_root, our_site['id'], the_list['id']) an_item = requests.get(an_item_ep, headers=http_headers,",
"= requests.get(list_ep, headers=http_headers, stream=False).json() listitem_ep = '{}/sites/{}/lists/{}/items'.format(endpoint_root, our_site['id'], the_list['id']) the_items = requests.get(listitem_ep, headers=http_headers,",
"test field' } } payload = json.dumps(new_item) new_item_ep = '{}/sites/{}/lists/{}/items'.format(endpoint_root, our_site['id'], the_list['id']) make_new_item",
"our_site = None for a_site in siteq['value']: if a_site['name'] == siteName: our_site =",
"None for a_site in siteq['value']: if a_site['name'] == siteName: our_site = a_site break",
"stream=False).json() an_item_ep = '{}/sites/{}/lists/{}/items/1'.format(endpoint_root, our_site['id'], the_list['id']) an_item = requests.get(an_item_ep, headers=http_headers, stream=False).json() new_item =",
"Create a preferably long-lived app instance that maintains a token cache. app =",
"logging import requests import json config = { \"authority\": \"https://login.microsoftonline.com/9f4083b0-0ac6-4dee-b0bb-b78b1436f9f3\", \"client_id\": \"d584a43a-c4c1-4fbe-9c1c-3cae87420e6e\", \"scope\":",
"msal import logging import requests import json config = { \"authority\": \"https://login.microsoftonline.com/9f4083b0-0ac6-4dee-b0bb-b78b1436f9f3\", \"client_id\":",
"app.acquire_token_silent(config[\"scope\"], account=None) if not result: logging.info(\"No suitable token exists in cache. Let's get",
"= app.acquire_token_for_client(scopes=config[\"scope\"]) if \"access_token\" in result: # Call a protected API with the",
"the cache. # Because we're looking for a token for the current app,",
"that maintains a token cache. app = msal.ConfidentialClientApplication( config[\"client_id\"], authority=config[\"authority\"], client_credential=config[\"secret\"] ) #",
"'{}/sites/{}/lists/{}'.format(endpoint_root, our_site['id'], 'Test%20List') the_list = requests.get(list_ep, headers=http_headers, stream=False).json() listitem_ep = '{}/sites/{}/lists/{}/items'.format(endpoint_root, our_site['id'], the_list['id'])",
"= { \"authority\": \"https://login.microsoftonline.com/9f4083b0-0ac6-4dee-b0bb-b78b1436f9f3\", \"client_id\": \"d584a43a-c4c1-4fbe-9c1c-3cae87420e6e\", \"scope\": [ \"https://graph.microsoft.com/.default\" ], \"secret\": \"<KEY>\", \"endpoint\":",
"the_list['id']) an_item = requests.get(an_item_ep, headers=http_headers, stream=False).json() new_item = { 'fields': { 'Title' :",
"this. result = None # First, the code looks up a token from",
"new_item = { 'fields': { 'Title' : 'Another item', 'testfield' : 'another test",
"# Because we're looking for a token for the current app, not for",
"else: print(result.get(\"error\")) print(result.get(\"error_description\")) print(result.get(\"correlation_id\")) # You might need this when reporting a bug.",
"a site try: our_site = None for a_site in siteq['value']: if a_site['name'] ==",
"import json config = { \"authority\": \"https://login.microsoftonline.com/9f4083b0-0ac6-4dee-b0bb-b78b1436f9f3\", \"client_id\": \"d584a43a-c4c1-4fbe-9c1c-3cae87420e6e\", \"scope\": [ \"https://graph.microsoft.com/.default\" ],",
": 'Another item', 'testfield' : 'another test field' } } payload = json.dumps(new_item)",
"maintains a token cache. app = msal.ConfidentialClientApplication( config[\"client_id\"], authority=config[\"authority\"], client_credential=config[\"secret\"] ) # The",
"app = msal.ConfidentialClientApplication( config[\"client_id\"], authority=config[\"authority\"], client_credential=config[\"secret\"] ) # The pattern to acquire a",
"= requests.post(new_item_ep, headers=http_headers, data=payload, stream=False).json() except Error as e: print(str(e)) print(result[\"token_type\"]) else: print(result.get(\"error\"))",
"token for the current app, not for a user, # use None for",
"headers=http_headers, stream=False).json() listitem_ep = '{}/sites/{}/lists/{}/items'.format(endpoint_root, our_site['id'], the_list['id']) the_items = requests.get(listitem_ep, headers=http_headers, stream=False).json() an_item_ep",
"print(result[\"token_type\"]) else: print(result.get(\"error\")) print(result.get(\"error_description\")) print(result.get(\"correlation_id\")) # You might need this when reporting a",
"+ result['access_token'], 'Accept' : 'application/json', 'Content-Type' : 'application/json' } # Look for our",
"Error as e: print(str(e)) print(result[\"token_type\"]) else: print(result.get(\"error\")) print(result.get(\"error_description\")) print(result.get(\"correlation_id\")) # You might need",
"stream=False).json() except Error as e: print(str(e)) print(result[\"token_type\"]) else: print(result.get(\"error\")) print(result.get(\"error_description\")) print(result.get(\"correlation_id\")) # You",
"headers=http_headers, data=payload, stream=False).json() except Error as e: print(str(e)) print(result[\"token_type\"]) else: print(result.get(\"error\")) print(result.get(\"error_description\")) print(result.get(\"correlation_id\"))",
"data=payload, stream=False).json() except Error as e: print(str(e)) print(result[\"token_type\"]) else: print(result.get(\"error\")) print(result.get(\"error_description\")) print(result.get(\"correlation_id\")) #",
"client_credential=config[\"secret\"] ) # The pattern to acquire a token looks like this. result",
"\"https://graph.microsoft.com/v1.0/users\" } # Create a preferably long-lived app instance that maintains a token",
"requests import json config = { \"authority\": \"https://login.microsoftonline.com/9f4083b0-0ac6-4dee-b0bb-b78b1436f9f3\", \"client_id\": \"d584a43a-c4c1-4fbe-9c1c-3cae87420e6e\", \"scope\": [ \"https://graph.microsoft.com/.default\"",
"preferably long-lived app instance that maintains a token cache. app = msal.ConfidentialClientApplication( config[\"client_id\"],",
"result: logging.info(\"No suitable token exists in cache. Let's get a new one from",
"= None for a_site in siteq['value']: if a_site['name'] == siteName: our_site = a_site",
"'testfield' : 'another test field' } } payload = json.dumps(new_item) new_item_ep = '{}/sites/{}/lists/{}/items'.format(endpoint_root,",
"\"d584a43a-c4c1-4fbe-9c1c-3cae87420e6e\", \"scope\": [ \"https://graph.microsoft.com/.default\" ], \"secret\": \"<KEY>\", \"endpoint\": \"https://graph.microsoft.com/v1.0/users\" } # Create a",
"exists in cache. Let's get a new one from AAD.\") result = app.acquire_token_for_client(scopes=config[\"scope\"])",
"= { 'Authorization' : 'Bearer ' + result['access_token'], 'Accept' : 'application/json', 'Content-Type' :",
"siteName) siteq = requests.get(endpoint, headers=http_headers, stream=False).json() # We may not have a site",
"endpoint_root = 'https://graph.microsoft.com/v1.0' http_headers = { 'Authorization' : 'Bearer ' + result['access_token'], 'Accept'",
"{ \"authority\": \"https://login.microsoftonline.com/9f4083b0-0ac6-4dee-b0bb-b78b1436f9f3\", \"client_id\": \"d584a43a-c4c1-4fbe-9c1c-3cae87420e6e\", \"scope\": [ \"https://graph.microsoft.com/.default\" ], \"secret\": \"<KEY>\", \"endpoint\": \"https://graph.microsoft.com/v1.0/users\"",
"like this. result = None # First, the code looks up a token",
"# Look for our site siteName='MFPersonal' endpoint = '{}/sites?search={}'.format(endpoint_root, siteName) siteq = requests.get(endpoint,",
"== siteName: our_site = a_site break list_ep = '{}/sites/{}/lists/{}'.format(endpoint_root, our_site['id'], 'Test%20List') the_list =",
"= a_site break list_ep = '{}/sites/{}/lists/{}'.format(endpoint_root, our_site['id'], 'Test%20List') the_list = requests.get(list_ep, headers=http_headers, stream=False).json()",
"'fields': { 'Title' : 'Another item', 'testfield' : 'another test field' } }",
"config = { \"authority\": \"https://login.microsoftonline.com/9f4083b0-0ac6-4dee-b0bb-b78b1436f9f3\", \"client_id\": \"d584a43a-c4c1-4fbe-9c1c-3cae87420e6e\", \"scope\": [ \"https://graph.microsoft.com/.default\" ], \"secret\": \"<KEY>\",",
"= 'https://graph.microsoft.com/v1.0' http_headers = { 'Authorization' : 'Bearer ' + result['access_token'], 'Accept' :",
"our_site['id'], the_list['id']) the_items = requests.get(listitem_ep, headers=http_headers, stream=False).json() an_item_ep = '{}/sites/{}/lists/{}/items/1'.format(endpoint_root, our_site['id'], the_list['id']) an_item",
"[ \"https://graph.microsoft.com/.default\" ], \"secret\": \"<KEY>\", \"endpoint\": \"https://graph.microsoft.com/v1.0/users\" } # Create a preferably long-lived",
"= requests.get(an_item_ep, headers=http_headers, stream=False).json() new_item = { 'fields': { 'Title' : 'Another item',",
"the_list = requests.get(list_ep, headers=http_headers, stream=False).json() listitem_ep = '{}/sites/{}/lists/{}/items'.format(endpoint_root, our_site['id'], the_list['id']) the_items = requests.get(listitem_ep,",
"a token cache. app = msal.ConfidentialClientApplication( config[\"client_id\"], authority=config[\"authority\"], client_credential=config[\"secret\"] ) # The pattern",
"get a new one from AAD.\") result = app.acquire_token_for_client(scopes=config[\"scope\"]) if \"access_token\" in result:",
"requests.get(endpoint, headers=http_headers, stream=False).json() # We may not have a site try: our_site =",
"break list_ep = '{}/sites/{}/lists/{}'.format(endpoint_root, our_site['id'], 'Test%20List') the_list = requests.get(list_ep, headers=http_headers, stream=False).json() listitem_ep =",
"{ 'Authorization' : 'Bearer ' + result['access_token'], 'Accept' : 'application/json', 'Content-Type' : 'application/json'",
"import logging import requests import json config = { \"authority\": \"https://login.microsoftonline.com/9f4083b0-0ac6-4dee-b0bb-b78b1436f9f3\", \"client_id\": \"d584a43a-c4c1-4fbe-9c1c-3cae87420e6e\",",
"Let's get a new one from AAD.\") result = app.acquire_token_for_client(scopes=config[\"scope\"]) if \"access_token\" in",
"app, not for a user, # use None for the account parameter. result",
"token exists in cache. Let's get a new one from AAD.\") result =",
"from AAD.\") result = app.acquire_token_for_client(scopes=config[\"scope\"]) if \"access_token\" in result: # Call a protected",
"'Title' : 'Another item', 'testfield' : 'another test field' } } payload =",
"suitable token exists in cache. Let's get a new one from AAD.\") result",
"pattern to acquire a token looks like this. result = None # First,",
"# First, the code looks up a token from the cache. # Because",
"in cache. Let's get a new one from AAD.\") result = app.acquire_token_for_client(scopes=config[\"scope\"]) if",
"a_site break list_ep = '{}/sites/{}/lists/{}'.format(endpoint_root, our_site['id'], 'Test%20List') the_list = requests.get(list_ep, headers=http_headers, stream=False).json() listitem_ep",
"} } payload = json.dumps(new_item) new_item_ep = '{}/sites/{}/lists/{}/items'.format(endpoint_root, our_site['id'], the_list['id']) make_new_item = requests.post(new_item_ep,",
"e: print(str(e)) print(result[\"token_type\"]) else: print(result.get(\"error\")) print(result.get(\"error_description\")) print(result.get(\"correlation_id\")) # You might need this when",
"the account parameter. result = app.acquire_token_silent(config[\"scope\"], account=None) if not result: logging.info(\"No suitable token",
"Look for our site siteName='MFPersonal' endpoint = '{}/sites?search={}'.format(endpoint_root, siteName) siteq = requests.get(endpoint, headers=http_headers,",
"for our site siteName='MFPersonal' endpoint = '{}/sites?search={}'.format(endpoint_root, siteName) siteq = requests.get(endpoint, headers=http_headers, stream=False).json()",
"\"client_id\": \"d584a43a-c4c1-4fbe-9c1c-3cae87420e6e\", \"scope\": [ \"https://graph.microsoft.com/.default\" ], \"secret\": \"<KEY>\", \"endpoint\": \"https://graph.microsoft.com/v1.0/users\" } # Create",
"our site siteName='MFPersonal' endpoint = '{}/sites?search={}'.format(endpoint_root, siteName) siteq = requests.get(endpoint, headers=http_headers, stream=False).json() #",
"the_list['id']) the_items = requests.get(listitem_ep, headers=http_headers, stream=False).json() an_item_ep = '{}/sites/{}/lists/{}/items/1'.format(endpoint_root, our_site['id'], the_list['id']) an_item =",
"'Another item', 'testfield' : 'another test field' } } payload = json.dumps(new_item) new_item_ep",
"instance that maintains a token cache. app = msal.ConfidentialClientApplication( config[\"client_id\"], authority=config[\"authority\"], client_credential=config[\"secret\"] )",
"an_item = requests.get(an_item_ep, headers=http_headers, stream=False).json() new_item = { 'fields': { 'Title' : 'Another",
"for the account parameter. result = app.acquire_token_silent(config[\"scope\"], account=None) if not result: logging.info(\"No suitable",
"'Bearer ' + result['access_token'], 'Accept' : 'application/json', 'Content-Type' : 'application/json' } # Look",
"if \"access_token\" in result: # Call a protected API with the access token.",
"} # Create a preferably long-lived app instance that maintains a token cache.",
"a_site in siteq['value']: if a_site['name'] == siteName: our_site = a_site break list_ep =",
"listitem_ep = '{}/sites/{}/lists/{}/items'.format(endpoint_root, our_site['id'], the_list['id']) the_items = requests.get(listitem_ep, headers=http_headers, stream=False).json() an_item_ep = '{}/sites/{}/lists/{}/items/1'.format(endpoint_root,",
"payload = json.dumps(new_item) new_item_ep = '{}/sites/{}/lists/{}/items'.format(endpoint_root, our_site['id'], the_list['id']) make_new_item = requests.post(new_item_ep, headers=http_headers, data=payload,",
"'{}/sites?search={}'.format(endpoint_root, siteName) siteq = requests.get(endpoint, headers=http_headers, stream=False).json() # We may not have a",
"a token looks like this. result = None # First, the code looks",
"new one from AAD.\") result = app.acquire_token_for_client(scopes=config[\"scope\"]) if \"access_token\" in result: # Call",
"stream=False).json() # We may not have a site try: our_site = None for",
"cache. app = msal.ConfidentialClientApplication( config[\"client_id\"], authority=config[\"authority\"], client_credential=config[\"secret\"] ) # The pattern to acquire",
"app instance that maintains a token cache. app = msal.ConfidentialClientApplication( config[\"client_id\"], authority=config[\"authority\"], client_credential=config[\"secret\"]",
"site siteName='MFPersonal' endpoint = '{}/sites?search={}'.format(endpoint_root, siteName) siteq = requests.get(endpoint, headers=http_headers, stream=False).json() # We",
"new_item_ep = '{}/sites/{}/lists/{}/items'.format(endpoint_root, our_site['id'], the_list['id']) make_new_item = requests.post(new_item_ep, headers=http_headers, data=payload, stream=False).json() except Error",
"token. endpoint_root = 'https://graph.microsoft.com/v1.0' http_headers = { 'Authorization' : 'Bearer ' + result['access_token'],",
"siteName: our_site = a_site break list_ep = '{}/sites/{}/lists/{}'.format(endpoint_root, our_site['id'], 'Test%20List') the_list = requests.get(list_ep,",
"looking for a token for the current app, not for a user, #",
"our_site['id'], 'Test%20List') the_list = requests.get(list_ep, headers=http_headers, stream=False).json() listitem_ep = '{}/sites/{}/lists/{}/items'.format(endpoint_root, our_site['id'], the_list['id']) the_items",
"= msal.ConfidentialClientApplication( config[\"client_id\"], authority=config[\"authority\"], client_credential=config[\"secret\"] ) # The pattern to acquire a token",
"The pattern to acquire a token looks like this. result = None #",
"with the access token. endpoint_root = 'https://graph.microsoft.com/v1.0' http_headers = { 'Authorization' : 'Bearer",
"'Content-Type' : 'application/json' } # Look for our site siteName='MFPersonal' endpoint = '{}/sites?search={}'.format(endpoint_root,",
"endpoint = '{}/sites?search={}'.format(endpoint_root, siteName) siteq = requests.get(endpoint, headers=http_headers, stream=False).json() # We may not",
"= json.dumps(new_item) new_item_ep = '{}/sites/{}/lists/{}/items'.format(endpoint_root, our_site['id'], the_list['id']) make_new_item = requests.post(new_item_ep, headers=http_headers, data=payload, stream=False).json()",
"} # Look for our site siteName='MFPersonal' endpoint = '{}/sites?search={}'.format(endpoint_root, siteName) siteq =",
"looks like this. result = None # First, the code looks up a",
"'application/json' } # Look for our site siteName='MFPersonal' endpoint = '{}/sites?search={}'.format(endpoint_root, siteName) siteq",
"We may not have a site try: our_site = None for a_site in",
"a preferably long-lived app instance that maintains a token cache. app = msal.ConfidentialClientApplication(",
"'{}/sites/{}/lists/{}/items'.format(endpoint_root, our_site['id'], the_list['id']) the_items = requests.get(listitem_ep, headers=http_headers, stream=False).json() an_item_ep = '{}/sites/{}/lists/{}/items/1'.format(endpoint_root, our_site['id'], the_list['id'])",
"\"https://login.microsoftonline.com/9f4083b0-0ac6-4dee-b0bb-b78b1436f9f3\", \"client_id\": \"d584a43a-c4c1-4fbe-9c1c-3cae87420e6e\", \"scope\": [ \"https://graph.microsoft.com/.default\" ], \"secret\": \"<KEY>\", \"endpoint\": \"https://graph.microsoft.com/v1.0/users\" } #",
"token from the cache. # Because we're looking for a token for the",
"siteq = requests.get(endpoint, headers=http_headers, stream=False).json() # We may not have a site try:",
"not result: logging.info(\"No suitable token exists in cache. Let's get a new one",
"app.acquire_token_for_client(scopes=config[\"scope\"]) if \"access_token\" in result: # Call a protected API with the access",
"config[\"client_id\"], authority=config[\"authority\"], client_credential=config[\"secret\"] ) # The pattern to acquire a token looks like",
"result = None # First, the code looks up a token from the",
"user, # use None for the account parameter. result = app.acquire_token_silent(config[\"scope\"], account=None) if",
"our_site['id'], the_list['id']) make_new_item = requests.post(new_item_ep, headers=http_headers, data=payload, stream=False).json() except Error as e: print(str(e))",
"siteName='MFPersonal' endpoint = '{}/sites?search={}'.format(endpoint_root, siteName) siteq = requests.get(endpoint, headers=http_headers, stream=False).json() # We may",
"# We may not have a site try: our_site = None for a_site",
"account parameter. result = app.acquire_token_silent(config[\"scope\"], account=None) if not result: logging.info(\"No suitable token exists",
"result: # Call a protected API with the access token. endpoint_root = 'https://graph.microsoft.com/v1.0'",
"parameter. result = app.acquire_token_silent(config[\"scope\"], account=None) if not result: logging.info(\"No suitable token exists in",
"in result: # Call a protected API with the access token. endpoint_root =",
"= '{}/sites?search={}'.format(endpoint_root, siteName) siteq = requests.get(endpoint, headers=http_headers, stream=False).json() # We may not have",
") # The pattern to acquire a token looks like this. result =",
"'{}/sites/{}/lists/{}/items/1'.format(endpoint_root, our_site['id'], the_list['id']) an_item = requests.get(an_item_ep, headers=http_headers, stream=False).json() new_item = { 'fields': {",
"item', 'testfield' : 'another test field' } } payload = json.dumps(new_item) new_item_ep =",
"\"secret\": \"<KEY>\", \"endpoint\": \"https://graph.microsoft.com/v1.0/users\" } # Create a preferably long-lived app instance that",
"\"endpoint\": \"https://graph.microsoft.com/v1.0/users\" } # Create a preferably long-lived app instance that maintains a",
"an_item_ep = '{}/sites/{}/lists/{}/items/1'.format(endpoint_root, our_site['id'], the_list['id']) an_item = requests.get(an_item_ep, headers=http_headers, stream=False).json() new_item = {",
": 'application/json', 'Content-Type' : 'application/json' } # Look for our site siteName='MFPersonal' endpoint",
"} payload = json.dumps(new_item) new_item_ep = '{}/sites/{}/lists/{}/items'.format(endpoint_root, our_site['id'], the_list['id']) make_new_item = requests.post(new_item_ep, headers=http_headers,",
"], \"secret\": \"<KEY>\", \"endpoint\": \"https://graph.microsoft.com/v1.0/users\" } # Create a preferably long-lived app instance",
"# Create a preferably long-lived app instance that maintains a token cache. app",
"import msal import logging import requests import json config = { \"authority\": \"https://login.microsoftonline.com/9f4083b0-0ac6-4dee-b0bb-b78b1436f9f3\",",
"token cache. app = msal.ConfidentialClientApplication( config[\"client_id\"], authority=config[\"authority\"], client_credential=config[\"secret\"] ) # The pattern to",
"make_new_item = requests.post(new_item_ep, headers=http_headers, data=payload, stream=False).json() except Error as e: print(str(e)) print(result[\"token_type\"]) else:",
"# Call a protected API with the access token. endpoint_root = 'https://graph.microsoft.com/v1.0' http_headers",
"the current app, not for a user, # use None for the account",
"cache. # Because we're looking for a token for the current app, not",
"= app.acquire_token_silent(config[\"scope\"], account=None) if not result: logging.info(\"No suitable token exists in cache. Let's",
"except Error as e: print(str(e)) print(result[\"token_type\"]) else: print(result.get(\"error\")) print(result.get(\"error_description\")) print(result.get(\"correlation_id\")) # You might",
"= None # First, the code looks up a token from the cache.",
"a_site['name'] == siteName: our_site = a_site break list_ep = '{}/sites/{}/lists/{}'.format(endpoint_root, our_site['id'], 'Test%20List') the_list",
"msal.ConfidentialClientApplication( config[\"client_id\"], authority=config[\"authority\"], client_credential=config[\"secret\"] ) # The pattern to acquire a token looks",
"headers=http_headers, stream=False).json() # We may not have a site try: our_site = None",
"\"https://graph.microsoft.com/.default\" ], \"secret\": \"<KEY>\", \"endpoint\": \"https://graph.microsoft.com/v1.0/users\" } # Create a preferably long-lived app",
"None for the account parameter. result = app.acquire_token_silent(config[\"scope\"], account=None) if not result: logging.info(\"No",
"stream=False).json() new_item = { 'fields': { 'Title' : 'Another item', 'testfield' : 'another",
"current app, not for a user, # use None for the account parameter.",
"the access token. endpoint_root = 'https://graph.microsoft.com/v1.0' http_headers = { 'Authorization' : 'Bearer '",
"for a_site in siteq['value']: if a_site['name'] == siteName: our_site = a_site break list_ep",
"siteq['value']: if a_site['name'] == siteName: our_site = a_site break list_ep = '{}/sites/{}/lists/{}'.format(endpoint_root, our_site['id'],",
"a user, # use None for the account parameter. result = app.acquire_token_silent(config[\"scope\"], account=None)",
"if not result: logging.info(\"No suitable token exists in cache. Let's get a new",
"= requests.get(endpoint, headers=http_headers, stream=False).json() # We may not have a site try: our_site",
"result['access_token'], 'Accept' : 'application/json', 'Content-Type' : 'application/json' } # Look for our site",
"not for a user, # use None for the account parameter. result =",
"headers=http_headers, stream=False).json() new_item = { 'fields': { 'Title' : 'Another item', 'testfield' :",
"logging.info(\"No suitable token exists in cache. Let's get a new one from AAD.\")",
"requests.get(listitem_ep, headers=http_headers, stream=False).json() an_item_ep = '{}/sites/{}/lists/{}/items/1'.format(endpoint_root, our_site['id'], the_list['id']) an_item = requests.get(an_item_ep, headers=http_headers, stream=False).json()",
"authority=config[\"authority\"], client_credential=config[\"secret\"] ) # The pattern to acquire a token looks like this.",
"json config = { \"authority\": \"https://login.microsoftonline.com/9f4083b0-0ac6-4dee-b0bb-b78b1436f9f3\", \"client_id\": \"d584a43a-c4c1-4fbe-9c1c-3cae87420e6e\", \"scope\": [ \"https://graph.microsoft.com/.default\" ], \"secret\":",
"'{}/sites/{}/lists/{}/items'.format(endpoint_root, our_site['id'], the_list['id']) make_new_item = requests.post(new_item_ep, headers=http_headers, data=payload, stream=False).json() except Error as e:",
"a new one from AAD.\") result = app.acquire_token_for_client(scopes=config[\"scope\"]) if \"access_token\" in result: #",
"= '{}/sites/{}/lists/{}/items'.format(endpoint_root, our_site['id'], the_list['id']) make_new_item = requests.post(new_item_ep, headers=http_headers, data=payload, stream=False).json() except Error as",
"API with the access token. endpoint_root = 'https://graph.microsoft.com/v1.0' http_headers = { 'Authorization' :",
"'Accept' : 'application/json', 'Content-Type' : 'application/json' } # Look for our site siteName='MFPersonal'",
"{ 'Title' : 'Another item', 'testfield' : 'another test field' } } payload",
"result = app.acquire_token_for_client(scopes=config[\"scope\"]) if \"access_token\" in result: # Call a protected API with",
"requests.get(list_ep, headers=http_headers, stream=False).json() listitem_ep = '{}/sites/{}/lists/{}/items'.format(endpoint_root, our_site['id'], the_list['id']) the_items = requests.get(listitem_ep, headers=http_headers, stream=False).json()",
"cache. Let's get a new one from AAD.\") result = app.acquire_token_for_client(scopes=config[\"scope\"]) if \"access_token\"",
"to acquire a token looks like this. result = None # First, the",
"use None for the account parameter. result = app.acquire_token_silent(config[\"scope\"], account=None) if not result:",
"our_site = a_site break list_ep = '{}/sites/{}/lists/{}'.format(endpoint_root, our_site['id'], 'Test%20List') the_list = requests.get(list_ep, headers=http_headers,",
"the_list['id']) make_new_item = requests.post(new_item_ep, headers=http_headers, data=payload, stream=False).json() except Error as e: print(str(e)) print(result[\"token_type\"])",
"as e: print(str(e)) print(result[\"token_type\"]) else: print(result.get(\"error\")) print(result.get(\"error_description\")) print(result.get(\"correlation_id\")) # You might need this",
"account=None) if not result: logging.info(\"No suitable token exists in cache. Let's get a",
"print(str(e)) print(result[\"token_type\"]) else: print(result.get(\"error\")) print(result.get(\"error_description\")) print(result.get(\"correlation_id\")) # You might need this when reporting",
"\"<KEY>\", \"endpoint\": \"https://graph.microsoft.com/v1.0/users\" } # Create a preferably long-lived app instance that maintains",
"try: our_site = None for a_site in siteq['value']: if a_site['name'] == siteName: our_site",
": 'another test field' } } payload = json.dumps(new_item) new_item_ep = '{}/sites/{}/lists/{}/items'.format(endpoint_root, our_site['id'],",
"'Test%20List') the_list = requests.get(list_ep, headers=http_headers, stream=False).json() listitem_ep = '{}/sites/{}/lists/{}/items'.format(endpoint_root, our_site['id'], the_list['id']) the_items =",
"\"scope\": [ \"https://graph.microsoft.com/.default\" ], \"secret\": \"<KEY>\", \"endpoint\": \"https://graph.microsoft.com/v1.0/users\" } # Create a preferably",
"looks up a token from the cache. # Because we're looking for a",
"site try: our_site = None for a_site in siteq['value']: if a_site['name'] == siteName:",
"code looks up a token from the cache. # Because we're looking for",
"protected API with the access token. endpoint_root = 'https://graph.microsoft.com/v1.0' http_headers = { 'Authorization'",
"\"authority\": \"https://login.microsoftonline.com/9f4083b0-0ac6-4dee-b0bb-b78b1436f9f3\", \"client_id\": \"d584a43a-c4c1-4fbe-9c1c-3cae87420e6e\", \"scope\": [ \"https://graph.microsoft.com/.default\" ], \"secret\": \"<KEY>\", \"endpoint\": \"https://graph.microsoft.com/v1.0/users\" }",
"from the cache. # Because we're looking for a token for the current",
"{ 'fields': { 'Title' : 'Another item', 'testfield' : 'another test field' }"
] |
[
"v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.5, 0, 0.5)) self.assertTrue(inside_tetrahedra(v1x,",
"EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES",
"the distribution. # # 3. Neither the name of the Raysect Project nor",
"v3z, v4x, v4y, v4z, -0.5, -0.5, -0.5)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z,",
"v2y, v2z = 1, 0, 0 v3x, v3y, v3z = 0, 1, 0",
"0.5, 0, 0)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x,",
"without # modification, are permitted provided that the following conditions are met: #",
"algorithm.\"\"\" # defining triangle vertices v1x, v1y, v1z = 0, 0, 0 v2x,",
"v4z)) # check line segments are inside self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z,",
"All rights reserved. # # Redistribution and use in source and binary forms,",
"software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY",
"# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE",
"retain the above copyright notice, # this list of conditions and the following",
"0 v3x, v3y, v3z = 0, 1, 0 v4x, v4y, v4z = 0,",
"PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" # AND ANY EXPRESS",
"# this software without specific prior written permission. # # THIS SOFTWARE IS",
"# documentation and/or other materials provided with the distribution. # # 3. Neither",
"v3x, v3y, v3z, v4x, v4y, v4z, 0.5, 0, 0.5)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x,",
"documentation and/or other materials provided with the distribution. # # 3. Neither the",
"v2z, v3x, v3y, v3z, v4x, v4y, v4z, v2x, v2y, v2z)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z,",
"with the distribution. # # 3. Neither the name of the Raysect Project",
"v2y, v2z)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y,",
"self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 1.0,",
"v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, -0.01, 0.5, 0.5))",
"v4y, v4z, 1.0, 1.0, 1.0)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y,",
"AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE #",
"v4z, 0.5, 0, 0)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z,",
"v3z, v4x, v4y, v4z, 0, 0.5, 0.5)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z,",
"THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,",
"DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED",
"v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.5, 0, 0.5)) self.assertTrue(inside_tetrahedra(v1x, v1y,",
"v4z, 0.5, -0.01, 0.5)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z,",
"v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.3333, 0.3333, 0.335)) if __name__",
"# check an exterior point self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y,",
"provided that the following conditions are met: # # 1. Redistributions of source",
"v3x, v3y, v3z)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x,",
"CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR #",
"are met: # # 1. Redistributions of source code must retain the above",
"class TestTetrahedra(unittest.TestCase): def test_inside_tetrahedra(self): \"\"\"Tests the inside tetrahedra algorithm.\"\"\" # defining triangle vertices",
"conditions and the following disclaimer in the # documentation and/or other materials provided",
"\"\"\"Tests the inside tetrahedra algorithm.\"\"\" # defining triangle vertices v1x, v1y, v1z =",
"v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, v1x, v1y, v1z))",
"v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, -0.5, -0.5, -0.5))",
"and/or other materials provided with the distribution. # # 3. Neither the name",
"v3x, v3y, v3z, v4x, v4y, v4z, -0.5, -0.5, -0.5)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x,",
"v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.5, 0, 0.5))",
"v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.5, -0.01, 0.5))",
"BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF",
"v4x, v4y, v4z, 0.25, 0.25, 0.25)) # check an exterior point self.assertFalse(inside_tetrahedra(v1x, v1y,",
"v4z, -0.5, -0.5, -0.5)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z,",
"permitted provided that the following conditions are met: # # 1. Redistributions of",
"SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS #",
"LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR",
"THE # POSSIBILITY OF SUCH DAMAGE. \"\"\" Unit tests for the Vector3D object.",
"v4x, v4y, v4z, 0.5, 0.5, 0)) # check an interior point self.assertTrue(inside_tetrahedra(v1x, v1y,",
"-0.5, -0.5)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y,",
"notice, this list of conditions and the following disclaimer in the # documentation",
"provided with the distribution. # # 3. Neither the name of the Raysect",
"v3x, v3y, v3z, v4x, v4y, v4z, 0.5, 0.5, 0.0001)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x,",
"Neither the name of the Raysect Project nor the names of its #",
"this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED",
"v3x, v3y, v3z, v4x, v4y, v4z, 0.25, 0.25, 0.25)) # check an exterior",
"v3x, v3y, v3z, v4x, v4y, v4z, -0.01, 0.5, 0.5)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x,",
"CONTRIBUTORS \"AS IS\" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT",
"v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.25, 0.25, 0.25)) # check",
"0, 0)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y,",
"v4z, 0, 0.5, 0)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z,",
"\"\"\" Unit tests for the Vector3D object. \"\"\" import unittest import numpy as",
"CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR",
"BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN",
"(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS",
"numpy as np # from raysect.core.math.cython.tetrahedra import _test_inside_tetrahedra as inside_tetrahedra from raysect.core.math.cython.tetrahedra import",
"Dr <NAME>, Raysect Project # All rights reserved. # # Redistribution and use",
"IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING",
"1, 0 v4x, v4y, v4z = 0, 0, 1 # test vertices are",
"inside tetrahedra algorithm.\"\"\" # defining triangle vertices v1x, v1y, v1z = 0, 0,",
"TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR",
"EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. \"\"\" Unit tests",
"with or without # modification, are permitted provided that the following conditions are",
"v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.5, 0, 0))",
"0, 0, 0.5)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x,",
"v3z, v4x, v4y, v4z, 0.5, -0.01, 0.5)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z,",
"EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF #",
"v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, v2x, v2y, v2z)) self.assertTrue(inside_tetrahedra(v1x, v1y,",
"BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL",
"v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.5, 0.5, 0.0001)) self.assertFalse(inside_tetrahedra(v1x,",
"v4y, v4z, v2x, v2y, v2z)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y,",
"# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS",
"following conditions are met: # # 1. Redistributions of source code must retain",
"# # 1. Redistributions of source code must retain the above copyright notice,",
"disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright",
"v4z, v2x, v2y, v2z)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z,",
"self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.5,",
"# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN",
"endorse or promote products derived from # this software without specific prior written",
"v3y, v3z, v4x, v4y, v4z, 0.5, 0, 0)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y,",
"v3z, v4x, v4y, v4z, v2x, v2y, v2z)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z,",
"0.5, 0)) # check an interior point self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z,",
"# # 3. Neither the name of the Raysect Project nor the names",
"v4y, v4z, 0.5, 0.5, 0.0001)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y,",
"modification, are permitted provided that the following conditions are met: # # 1.",
"OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO",
"OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF",
"v1x, v1y, v1z = 0, 0, 0 v2x, v2y, v2z = 1, 0,",
"NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A",
"v2z, v3x, v3y, v3z, v4x, v4y, v4z, v1x, v1y, v1z)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z,",
"rights reserved. # # Redistribution and use in source and binary forms, with",
"v4x, v4y, v4z, 0.5, 0.5, 0.0001)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x,",
"v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0, 0,",
"OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY",
"point self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z,",
"permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS",
"v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, -0.01, 0.5, 0.5)) self.assertFalse(inside_tetrahedra(v1x,",
"0, 0.5, 0.5)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x,",
"Redistributions in binary form must reproduce the above copyright # notice, this list",
"v4x, v4y, v4z, v3x, v3y, v3z)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x,",
"0.5)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z,",
"following disclaimer. # # 2. Redistributions in binary form must reproduce the above",
"v3x, v3y, v3z, v4x, v4y, v4z, 0.5, 0.5, 0)) # check an interior",
"0.25)) # check an exterior point self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x,",
"self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, v3x,",
"self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0,",
"v4y, v4z, 0, 0.5, 0.5)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y,",
"PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS",
"ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT",
"v4z = 0, 0, 1 # test vertices are inside self.assertTrue(inside_tetrahedra(v1x, v1y, v1z,",
"HOLDERS AND CONTRIBUTORS \"AS IS\" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,",
"of source code must retain the above copyright notice, # this list of",
"0.25, 0.25, 0.25)) # check an exterior point self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y,",
"v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, v4x, v4y,",
"TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE",
"v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, v3x, v3y, v3z)) self.assertTrue(inside_tetrahedra(v1x, v1y,",
"v4y, v4z, v1x, v1y, v1z)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y,",
"0.5, 0.5)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y,",
"Redistributions of source code must retain the above copyright notice, # this list",
"DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY",
"IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" # AND ANY",
"or without # modification, are permitted provided that the following conditions are met:",
"# check an interior point self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y,",
"v3x, v3y, v3z, v4x, v4y, v4z, v2x, v2y, v2z)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x,",
"v4z, 0.25, 0.25, -0.01)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z,",
"without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE",
"v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, v2x, v2y, v2z)) self.assertTrue(inside_tetrahedra(v1x,",
"0, 0 v3x, v3y, v3z = 0, 1, 0 v4x, v4y, v4z =",
"notice, # this list of conditions and the following disclaimer. # # 2.",
"INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,",
"self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.25,",
"NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF",
"met: # # 1. Redistributions of source code must retain the above copyright",
"# test vertices are inside self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y,",
"# notice, this list of conditions and the following disclaimer in the #",
"v3y, v3z = 0, 1, 0 v4x, v4y, v4z = 0, 0, 1",
"HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,",
"WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN",
"inside_tetrahedra from raysect.core.math.cython.tetrahedra import _test_barycentric_inside_tetrahedra as inside_tetrahedra class TestTetrahedra(unittest.TestCase): def test_inside_tetrahedra(self): \"\"\"Tests the",
"WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) #",
"def test_inside_tetrahedra(self): \"\"\"Tests the inside tetrahedra algorithm.\"\"\" # defining triangle vertices v1x, v1y,",
"v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0, 0, 0.5)) self.assertTrue(inside_tetrahedra(v1x, v1y,",
"v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.5, -0.01, 0.5)) self.assertFalse(inside_tetrahedra(v1x, v1y,",
"1.0, 1.0)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y,",
"v2z, v3x, v3y, v3z, v4x, v4y, v4z, v4x, v4y, v4z)) # check line",
"v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.25, 0.25, -0.01))",
"0, 0, 1 # test vertices are inside self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y,",
"0.5, 0.5, 0)) # check an interior point self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y,",
"v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.5, 0.5, 0))",
"np # from raysect.core.math.cython.tetrahedra import _test_inside_tetrahedra as inside_tetrahedra from raysect.core.math.cython.tetrahedra import _test_barycentric_inside_tetrahedra as",
"LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING,",
"v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.25, 0.25, -0.01)) self.assertFalse(inside_tetrahedra(v1x, v1y,",
"import _test_inside_tetrahedra as inside_tetrahedra from raysect.core.math.cython.tetrahedra import _test_barycentric_inside_tetrahedra as inside_tetrahedra class TestTetrahedra(unittest.TestCase): def",
"distribution. # # 3. Neither the name of the Raysect Project nor the",
"for the Vector3D object. \"\"\" import unittest import numpy as np # from",
"v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.5, 0.5, 0)) #",
"AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR",
"ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE #",
"PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;",
"0.5, 0.5, 0.0001)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x,",
"v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.5, 0, 0)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z,",
"v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.25, 0.25,",
"v3y, v3z, v4x, v4y, v4z, 0.5, 0, 0.5)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y,",
"vertices v1x, v1y, v1z = 0, 0, 0 v2x, v2y, v2z = 1,",
"v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, v3x, v3y, v3z)) self.assertTrue(inside_tetrahedra(v1x,",
"v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, v2x, v2y,",
"v4y, v4z, 0.25, 0.25, 0.25)) # check an exterior point self.assertFalse(inside_tetrahedra(v1x, v1y, v1z,",
"v3y, v3z, v4x, v4y, v4z, 0.5, 0.5, 0)) # check an interior point",
"v4y, v4z, 0, 0.5, 0)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y,",
"Raysect Project nor the names of its # contributors may be used to",
"v4z, 1.0, 1.0, 1.0)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z,",
"inside self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z,",
"1 # test vertices are inside self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x,",
"conditions and the following disclaimer. # # 2. Redistributions in binary form must",
"and use in source and binary forms, with or without # modification, are",
"v3z, v4x, v4y, v4z, v1x, v1y, v1z)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z,",
"are inside self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y,",
"import numpy as np # from raysect.core.math.cython.tetrahedra import _test_inside_tetrahedra as inside_tetrahedra from raysect.core.math.cython.tetrahedra",
"v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.5, 0,",
"v3x, v3y, v3z, v4x, v4y, v4z, 0, 0, 0.5)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x,",
"v4z, v3x, v3y, v3z)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z,",
"conditions are met: # # 1. Redistributions of source code must retain the",
"this list of conditions and the following disclaimer in the # documentation and/or",
"v2z, v3x, v3y, v3z, v4x, v4y, v4z, -0.01, 0.5, 0.5)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z,",
"ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT",
"v3y, v3z, v4x, v4y, v4z, -0.5, -0.5, -0.5)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y,",
"-0.01, 0.5, 0.5)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x,",
"v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0, 0.5, 0)) self.assertTrue(inside_tetrahedra(v1x, v1y,",
"OF SUCH DAMAGE. \"\"\" Unit tests for the Vector3D object. \"\"\" import unittest",
"Copyright (c) 2014-2021, Dr <NAME>, Raysect Project # All rights reserved. # #",
"HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT",
"of its # contributors may be used to endorse or promote products derived",
"EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT,",
"copyright notice, # this list of conditions and the following disclaimer. # #",
"the # documentation and/or other materials provided with the distribution. # # 3.",
"its # contributors may be used to endorse or promote products derived from",
"POSSIBILITY OF SUCH DAMAGE. \"\"\" Unit tests for the Vector3D object. \"\"\" import",
"disclaimer in the # documentation and/or other materials provided with the distribution. #",
"v4y, v4z, v4x, v4y, v4z)) # check line segments are inside self.assertTrue(inside_tetrahedra(v1x, v1y,",
"-0.01, 0.5)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y,",
"MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT",
"v3z, v4x, v4y, v4z, 0.5, 0, 0.5)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z,",
"v4z, 0.5, 0.5, 0.0001)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z,",
"v3x, v3y, v3z, v4x, v4y, v4z, v4x, v4y, v4z)) # check line segments",
"DAMAGE. \"\"\" Unit tests for the Vector3D object. \"\"\" import unittest import numpy",
"v3x, v3y, v3z, v4x, v4y, v4z, 0.25, 0.25, -0.01)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x,",
"0.25, -0.01)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y,",
"LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT",
"self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.5,",
"contributors may be used to endorse or promote products derived from # this",
"v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0, 0.5, 0)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z,",
"INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT,",
"OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE.",
"test vertices are inside self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z,",
"v3z, v4x, v4y, v4z, 0.25, 0.25, 0.25)) # check an exterior point self.assertFalse(inside_tetrahedra(v1x,",
"Redistribution and use in source and binary forms, with or without # modification,",
"v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, v4x, v4y, v4z)) # check",
"v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0, 0.5, 0))",
"source and binary forms, with or without # modification, are permitted provided that",
"BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR",
"SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED",
"v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.5, 0.5, 0.0001)) self.assertFalse(inside_tetrahedra(v1x, v1y,",
"above copyright # notice, this list of conditions and the following disclaimer in",
"FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT",
"NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,",
"v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.3333, 0.3333, 0.335)) if",
"v4x, v4y, v4z, 0, 0, 0.5)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x,",
"v4z, 0.5, 0, 0.5)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z,",
"IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR",
"check an exterior point self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z,",
"binary form must reproduce the above copyright # notice, this list of conditions",
"(INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE",
"exterior point self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y,",
"form must reproduce the above copyright # notice, this list of conditions and",
"PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,",
"(c) 2014-2021, Dr <NAME>, Raysect Project # All rights reserved. # # Redistribution",
"from raysect.core.math.cython.tetrahedra import _test_barycentric_inside_tetrahedra as inside_tetrahedra class TestTetrahedra(unittest.TestCase): def test_inside_tetrahedra(self): \"\"\"Tests the inside",
"# # 2. Redistributions in binary form must reproduce the above copyright #",
"derived from # this software without specific prior written permission. # # THIS",
"Project nor the names of its # contributors may be used to endorse",
"v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, -0.5, -0.5, -0.5)) self.assertFalse(inside_tetrahedra(v1x,",
"v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, -0.01, 0.5, 0.5)) self.assertFalse(inside_tetrahedra(v1x, v1y,",
"v4x, v4y, v4z, 1.0, 1.0, 1.0)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x,",
"v3y, v3z, v4x, v4y, v4z, 0, 0.5, 0)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y,",
"0.5, -0.01, 0.5)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x,",
"# contributors may be used to endorse or promote products derived from #",
"v4x, v4y, v4z, 0.25, 0.25, -0.01)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x,",
"v3x, v3y, v3z, v4x, v4y, v4z, 0.5, -0.01, 0.5)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x,",
"the above copyright # notice, this list of conditions and the following disclaimer",
"v3y, v3z, v4x, v4y, v4z, 0, 0.5, 0.5)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y,",
"v4y, v4z, -0.5, -0.5, -0.5)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y,",
"v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.25, 0.25, -0.01)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z,",
"0.5)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z,",
"\"\"\" import unittest import numpy as np # from raysect.core.math.cython.tetrahedra import _test_inside_tetrahedra as",
"v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 1.0, 1.0,",
"COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" # AND ANY EXPRESS OR IMPLIED WARRANTIES,",
"of the Raysect Project nor the names of its # contributors may be",
"OF THE # POSSIBILITY OF SUCH DAMAGE. \"\"\" Unit tests for the Vector3D",
"this list of conditions and the following disclaimer. # # 2. Redistributions in",
"v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, v1x, v1y,",
"and the following disclaimer in the # documentation and/or other materials provided with",
"v3y, v3z, v4x, v4y, v4z, -0.01, 0.5, 0.5)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y,",
"v4y, v4z, 0.5, 0.5, 0)) # check an interior point self.assertTrue(inside_tetrahedra(v1x, v1y, v1z,",
"self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, v1x,",
"v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.5, -0.01, 0.5)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z,",
"forms, with or without # modification, are permitted provided that the following conditions",
"# POSSIBILITY OF SUCH DAMAGE. \"\"\" Unit tests for the Vector3D object. \"\"\"",
"v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, v1x, v1y, v1z)) self.assertTrue(inside_tetrahedra(v1x,",
"v4z, 0, 0, 0.5)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z,",
"test_inside_tetrahedra(self): \"\"\"Tests the inside tetrahedra algorithm.\"\"\" # defining triangle vertices v1x, v1y, v1z",
"import unittest import numpy as np # from raysect.core.math.cython.tetrahedra import _test_inside_tetrahedra as inside_tetrahedra",
"v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, -0.5, -0.5,",
"in source and binary forms, with or without # modification, are permitted provided",
"v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.25, 0.25, 0.25))",
"the inside tetrahedra algorithm.\"\"\" # defining triangle vertices v1x, v1y, v1z = 0,",
"triangle vertices v1x, v1y, v1z = 0, 0, 0 v2x, v2y, v2z =",
"v3z, v4x, v4y, v4z, 0.25, 0.25, -0.01)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z,",
"_test_barycentric_inside_tetrahedra as inside_tetrahedra class TestTetrahedra(unittest.TestCase): def test_inside_tetrahedra(self): \"\"\"Tests the inside tetrahedra algorithm.\"\"\" #",
"v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.25, 0.25, 0.25)) #",
"# # Redistribution and use in source and binary forms, with or without",
"v4y, v4z, 0.25, 0.25, -0.01)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y,",
"ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF",
"v4y, v4z)) # check line segments are inside self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y,",
"other materials provided with the distribution. # # 3. Neither the name of",
"v4x, v4y, v4z, 0.5, 0, 0)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x,",
"v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.5, 0.5, 0.0001))",
"name of the Raysect Project nor the names of its # contributors may",
"in the # documentation and/or other materials provided with the distribution. # #",
"v3y, v3z, v4x, v4y, v4z, 0, 0, 0.5)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y,",
"used to endorse or promote products derived from # this software without specific",
"the Vector3D object. \"\"\" import unittest import numpy as np # from raysect.core.math.cython.tetrahedra",
"v4x, v4y, v4z, -0.01, 0.5, 0.5)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x,",
"COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,",
"2014-2021, Dr <NAME>, Raysect Project # All rights reserved. # # Redistribution and",
"v4z, 0.5, 0.5, 0)) # check an interior point self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x,",
"= 0, 0, 0 v2x, v2y, v2z = 1, 0, 0 v3x, v3y,",
"v4z, 0, 0.5, 0.5)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z,",
"self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, -0.01,",
"v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0, 0, 0.5))",
"promote products derived from # this software without specific prior written permission. #",
"prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS",
"v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0, 0.5, 0.5)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z,",
"0, 0 v2x, v2y, v2z = 1, 0, 0 v3x, v3y, v3z =",
"0, 1, 0 v4x, v4y, v4z = 0, 0, 1 # test vertices",
"0.5, 0)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y,",
"v3x, v3y, v3z, v4x, v4y, v4z, 1.0, 1.0, 1.0)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x,",
"v4z, -0.01, 0.5, 0.5)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z,",
"IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED.",
"# 1. Redistributions of source code must retain the above copyright notice, #",
"OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR",
"line segments are inside self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z,",
"v3x, v3y, v3z, v4x, v4y, v4z, v1x, v1y, v1z)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x,",
"v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, v4x, v4y, v4z)) #",
"v3z = 0, 1, 0 v4x, v4y, v4z = 0, 0, 1 #",
"# Copyright (c) 2014-2021, Dr <NAME>, Raysect Project # All rights reserved. #",
"vertices are inside self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x,",
"OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE",
"OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR",
"v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.5, 0.5, 0)) # check an",
"list of conditions and the following disclaimer. # # 2. Redistributions in binary",
"_test_inside_tetrahedra as inside_tetrahedra from raysect.core.math.cython.tetrahedra import _test_barycentric_inside_tetrahedra as inside_tetrahedra class TestTetrahedra(unittest.TestCase): def test_inside_tetrahedra(self):",
"v4x, v4y, v4z, 0, 0.5, 0)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x,",
"v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, -0.01, 0.5,",
"-0.01)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z,",
"INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT",
"ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING",
"v2z = 1, 0, 0 v3x, v3y, v3z = 0, 1, 0 v4x,",
"use in source and binary forms, with or without # modification, are permitted",
"in binary form must reproduce the above copyright # notice, this list of",
"THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" #",
"v3x, v3y, v3z, v4x, v4y, v4z, 0.5, 0, 0)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x,",
"v3x, v3y, v3z, v4x, v4y, v4z, v3x, v3y, v3z)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x,",
"v2z, v3x, v3y, v3z, v4x, v4y, v4z, 1.0, 1.0, 1.0)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z,",
"v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.5, -0.01, 0.5)) self.assertFalse(inside_tetrahedra(v1x,",
"v3z, v4x, v4y, v4z, v4x, v4y, v4z)) # check line segments are inside",
"# All rights reserved. # # Redistribution and use in source and binary",
"# this list of conditions and the following disclaimer. # # 2. Redistributions",
"check line segments are inside self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y,",
"0 v2x, v2y, v2z = 1, 0, 0 v3x, v3y, v3z = 0,",
"1. Redistributions of source code must retain the above copyright notice, # this",
"to endorse or promote products derived from # this software without specific prior",
"v4x, v4y, v4z, 0.5, -0.01, 0.5)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x,",
"# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN",
"-0.5, -0.5, -0.5)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x,",
"v3y, v3z, v4x, v4y, v4z, v2x, v2y, v2z)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y,",
"self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, -0.5,",
"v4z, v1x, v1y, v1z)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z,",
"v4x, v4y, v4z, v4x, v4y, v4z)) # check line segments are inside self.assertTrue(inside_tetrahedra(v1x,",
"v4y, v4z, 0.5, 0, 0)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y,",
"= 1, 0, 0 v3x, v3y, v3z = 0, 1, 0 v4x, v4y,",
"v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 1.0, 1.0, 1.0)) self.assertFalse(inside_tetrahedra(v1x, v1y,",
"0.5, 0.5)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y,",
"v4y, v4z, 0.5, 0, 0.5)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y,",
"SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF",
"1.0, 1.0, 1.0)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x,",
"v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, v2x, v2y, v2z))",
"GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION)",
"v3y, v3z, v4x, v4y, v4z, v1x, v1y, v1z)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y,",
"IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED",
"AND CONTRIBUTORS \"AS IS\" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT",
"products derived from # this software without specific prior written permission. # #",
"# from raysect.core.math.cython.tetrahedra import _test_inside_tetrahedra as inside_tetrahedra from raysect.core.math.cython.tetrahedra import _test_barycentric_inside_tetrahedra as inside_tetrahedra",
"# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN #",
"CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY",
"# 3. Neither the name of the Raysect Project nor the names of",
"A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER",
"v2x, v2y, v2z = 1, 0, 0 v3x, v3y, v3z = 0, 1,",
"v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, v4x, v4y, v4z))",
"OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF",
"from # this software without specific prior written permission. # # THIS SOFTWARE",
"v2z)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z,",
"# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE",
"v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0, 0.5,",
"v3z, v4x, v4y, v4z, 0, 0.5, 0)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z,",
"v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 1.0, 1.0, 1.0))",
"of conditions and the following disclaimer. # # 2. Redistributions in binary form",
"2. Redistributions in binary form must reproduce the above copyright # notice, this",
"be used to endorse or promote products derived from # this software without",
"must retain the above copyright notice, # this list of conditions and the",
"IS\" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,",
"v3y, v3z, v4x, v4y, v4z, 0.25, 0.25, -0.01)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y,",
"tests for the Vector3D object. \"\"\" import unittest import numpy as np #",
"ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF",
"the Raysect Project nor the names of its # contributors may be used",
"= 0, 1, 0 v4x, v4y, v4z = 0, 0, 1 # test",
"IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY",
"v2z, v3x, v3y, v3z, v4x, v4y, v4z, v3x, v3y, v3z)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z,",
"TestTetrahedra(unittest.TestCase): def test_inside_tetrahedra(self): \"\"\"Tests the inside tetrahedra algorithm.\"\"\" # defining triangle vertices v1x,",
"v4x, v4y, v4z, v2x, v2y, v2z)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x,",
"AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL",
"SUCH DAMAGE. \"\"\" Unit tests for the Vector3D object. \"\"\" import unittest import",
"reproduce the above copyright # notice, this list of conditions and the following",
"v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.5, 0.5,",
"0, 1 # test vertices are inside self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z,",
"the above copyright notice, # this list of conditions and the following disclaimer.",
"<NAME>, Raysect Project # All rights reserved. # # Redistribution and use in",
"defining triangle vertices v1x, v1y, v1z = 0, 0, 0 v2x, v2y, v2z",
"v3y, v3z, v4x, v4y, v4z, 1.0, 1.0, 1.0)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y,",
"object. \"\"\" import unittest import numpy as np # from raysect.core.math.cython.tetrahedra import _test_inside_tetrahedra",
"and binary forms, with or without # modification, are permitted provided that the",
"names of its # contributors may be used to endorse or promote products",
"v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.5, 0, 0.5)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z,",
"v3z, v4x, v4y, v4z, 1.0, 1.0, 1.0)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z,",
"v4x, v4y, v4z, 0.5, 0, 0.5)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x,",
"SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" # AND",
"Vector3D object. \"\"\" import unittest import numpy as np # from raysect.core.math.cython.tetrahedra import",
"raysect.core.math.cython.tetrahedra import _test_inside_tetrahedra as inside_tetrahedra from raysect.core.math.cython.tetrahedra import _test_barycentric_inside_tetrahedra as inside_tetrahedra class TestTetrahedra(unittest.TestCase):",
"v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.3333, 0.3333,",
"SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT,",
"v4x, v4y, v4z, v1x, v1y, v1z)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x,",
"v3y, v3z, v4x, v4y, v4z, 0.25, 0.25, 0.25)) # check an exterior point",
"v3x, v3y, v3z, v4x, v4y, v4z, 0, 0.5, 0.5)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x,",
"following disclaimer in the # documentation and/or other materials provided with the distribution.",
"3. Neither the name of the Raysect Project nor the names of its",
"ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED",
"as np # from raysect.core.math.cython.tetrahedra import _test_inside_tetrahedra as inside_tetrahedra from raysect.core.math.cython.tetrahedra import _test_barycentric_inside_tetrahedra",
"0.5, 0.0001)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y,",
"1, 0, 0 v3x, v3y, v3z = 0, 1, 0 v4x, v4y, v4z",
"WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND",
"as inside_tetrahedra from raysect.core.math.cython.tetrahedra import _test_barycentric_inside_tetrahedra as inside_tetrahedra class TestTetrahedra(unittest.TestCase): def test_inside_tetrahedra(self): \"\"\"Tests",
"DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE",
"OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR",
"OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,",
"v4x, v4y, v4z, -0.5, -0.5, -0.5)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x,",
"of conditions and the following disclaimer in the # documentation and/or other materials",
"LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)",
"v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.5, 0.5, 0)) # check",
"# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES",
"raysect.core.math.cython.tetrahedra import _test_barycentric_inside_tetrahedra as inside_tetrahedra class TestTetrahedra(unittest.TestCase): def test_inside_tetrahedra(self): \"\"\"Tests the inside tetrahedra",
"v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.25, 0.25, 0.25)) # check an",
"OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON",
"and the following disclaimer. # # 2. Redistributions in binary form must reproduce",
"0)) # check an interior point self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x,",
"v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, -0.5, -0.5, -0.5)) self.assertFalse(inside_tetrahedra(v1x, v1y,",
"1.0)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z,",
"v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0, 0.5, 0.5)) self.assertTrue(inside_tetrahedra(v1x, v1y,",
"point self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z,",
"v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.5, -0.01,",
"v1z = 0, 0, 0 v2x, v2y, v2z = 1, 0, 0 v3x,",
"self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, v4x,",
"LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,",
"v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.5, 0, 0)) self.assertTrue(inside_tetrahedra(v1x, v1y,",
"v4z, 0.25, 0.25, 0.25)) # check an exterior point self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x,",
"# defining triangle vertices v1x, v1y, v1z = 0, 0, 0 v2x, v2y,",
"v2x, v2y, v2z)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x,",
"that the following conditions are met: # # 1. Redistributions of source code",
"DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES;",
"LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND",
"from raysect.core.math.cython.tetrahedra import _test_inside_tetrahedra as inside_tetrahedra from raysect.core.math.cython.tetrahedra import _test_barycentric_inside_tetrahedra as inside_tetrahedra class",
"v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0, 0, 0.5)) self.assertTrue(inside_tetrahedra(v1x,",
"-0.5)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z,",
"check an interior point self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z,",
"code must retain the above copyright notice, # this list of conditions and",
"v4z, v4x, v4y, v4z)) # check line segments are inside self.assertTrue(inside_tetrahedra(v1x, v1y, v1z,",
"the following conditions are met: # # 1. Redistributions of source code must",
"THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE #",
"v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, v3x, v3y, v3z))",
"OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS",
"THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE",
"0, 0, 0 v2x, v2y, v2z = 1, 0, 0 v3x, v3y, v3z",
"an exterior point self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x,",
"0.5, 0, 0.5)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x,",
"v1y, v1z = 0, 0, 0 v2x, v2y, v2z = 1, 0, 0",
"0.25, 0.25, -0.01)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x,",
"v4x, v4y, v4z, 0, 0.5, 0.5)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x,",
"IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. \"\"\" Unit tests for",
"import _test_barycentric_inside_tetrahedra as inside_tetrahedra class TestTetrahedra(unittest.TestCase): def test_inside_tetrahedra(self): \"\"\"Tests the inside tetrahedra algorithm.\"\"\"",
"# check line segments are inside self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x,",
"v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.3333, 0.3333, 0.335)) if __name__ ==",
"THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. \"\"\"",
"v3x, v3y, v3z = 0, 1, 0 v4x, v4y, v4z = 0, 0,",
"v1y, v1z)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y,",
"v3z, v4x, v4y, v4z, 0.5, 0.5, 0.0001)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z,",
"THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF",
"the following disclaimer in the # documentation and/or other materials provided with the",
"# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS",
"must reproduce the above copyright # notice, this list of conditions and the",
"Raysect Project # All rights reserved. # # Redistribution and use in source",
"v2z, v3x, v3y, v3z, v4x, v4y, v4z, -0.5, -0.5, -0.5)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z,",
"v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 1.0, 1.0, 1.0)) self.assertFalse(inside_tetrahedra(v1x,",
"self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, v2x,",
"OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER",
"= 0, 0, 1 # test vertices are inside self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x,",
"# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"",
"0, 0.5)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y,",
"self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.3333,",
"ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. \"\"\" Unit tests for the",
"# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE",
"interior point self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y,",
"tetrahedra algorithm.\"\"\" # defining triangle vertices v1x, v1y, v1z = 0, 0, 0",
"list of conditions and the following disclaimer in the # documentation and/or other",
"THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" # AND ANY EXPRESS OR IMPLIED",
"v4y, v4z, 0, 0, 0.5)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y,",
"the names of its # contributors may be used to endorse or promote",
"v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, v1x, v1y, v1z)) self.assertTrue(inside_tetrahedra(v1x, v1y,",
"v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0, 0.5, 0)) self.assertTrue(inside_tetrahedra(v1x,",
"v3z, v4x, v4y, v4z, 0.5, 0.5, 0)) # check an interior point self.assertTrue(inside_tetrahedra(v1x,",
"as inside_tetrahedra class TestTetrahedra(unittest.TestCase): def test_inside_tetrahedra(self): \"\"\"Tests the inside tetrahedra algorithm.\"\"\" # defining",
"TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE",
"v3z, v4x, v4y, v4z, 0.5, 0, 0)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z,",
"v3x, v3y, v3z, v4x, v4y, v4z, 0, 0.5, 0)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x,",
"0, 0.5, 0)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x,",
"USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY",
"STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY",
"reserved. # # Redistribution and use in source and binary forms, with or",
"0.25, 0.25)) # check an exterior point self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z,",
"\"AS IS\" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED",
"# Redistribution and use in source and binary forms, with or without #",
"INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS",
"segments are inside self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x,",
"v3z, v4x, v4y, v4z, -0.01, 0.5, 0.5)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z,",
"WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE",
"nor the names of its # contributors may be used to endorse or",
"FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT",
"0 v4x, v4y, v4z = 0, 0, 1 # test vertices are inside",
"v1z)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z,",
"v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0, 0, 0.5)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z,",
"v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.3333, 0.3333, 0.335))",
"are permitted provided that the following conditions are met: # # 1. Redistributions",
"BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" # AND ANY EXPRESS OR",
"v1x, v1y, v1z)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x,",
"OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER",
"0.0001)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z,",
"v4x, v4y, v4z = 0, 0, 1 # test vertices are inside self.assertTrue(inside_tetrahedra(v1x,",
"the following disclaimer. # # 2. Redistributions in binary form must reproduce the",
"v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.25, 0.25, -0.01)) self.assertFalse(inside_tetrahedra(v1x,",
"binary forms, with or without # modification, are permitted provided that the following",
"v4y, v4z, -0.01, 0.5, 0.5)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y,",
"inside_tetrahedra class TestTetrahedra(unittest.TestCase): def test_inside_tetrahedra(self): \"\"\"Tests the inside tetrahedra algorithm.\"\"\" # defining triangle",
"v3x, v3y, v3z, v4x, v4y, v4z, 0.3333, 0.3333, 0.335)) if __name__ == \"__main__\":",
"OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #",
"unittest import numpy as np # from raysect.core.math.cython.tetrahedra import _test_inside_tetrahedra as inside_tetrahedra from",
"# 2. Redistributions in binary form must reproduce the above copyright # notice,",
"v3y, v3z, v4x, v4y, v4z, 0.5, -0.01, 0.5)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y,",
"NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY",
"may be used to endorse or promote products derived from # this software",
"SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. \"\"\" Unit",
"v3y, v3z, v4x, v4y, v4z, v4x, v4y, v4z)) # check line segments are",
"v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.5, 0.5, 0.0001)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z,",
"# # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS",
"v4y, v4z, 0.5, -0.01, 0.5)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y,",
"v3y, v3z, v4x, v4y, v4z, v3x, v3y, v3z)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y,",
"v3y, v3z)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y,",
"v4y, v4z, v3x, v3y, v3z)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y,",
"an interior point self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x,",
"USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH",
"CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY,",
"the name of the Raysect Project nor the names of its # contributors",
"v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.5, 0, 0)) self.assertTrue(inside_tetrahedra(v1x,",
"Unit tests for the Vector3D object. \"\"\" import unittest import numpy as np",
"FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE",
"PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR",
"self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.25,",
"v3z)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z,",
"v3z, v4x, v4y, v4z, 0, 0, 0.5)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z,",
"materials provided with the distribution. # # 3. Neither the name of the",
"v4x, v4y, v4z)) # check line segments are inside self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x,",
"v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0, 0.5, 0.5))",
"v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, v3x, v3y,",
"0)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z,",
"# modification, are permitted provided that the following conditions are met: # #",
"written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND",
"v3y, v3z, v4x, v4y, v4z, 0.3333, 0.3333, 0.335)) if __name__ == \"__main__\": unittest.main()",
"v4y, v4z = 0, 0, 1 # test vertices are inside self.assertTrue(inside_tetrahedra(v1x, v1y,",
"source code must retain the above copyright notice, # this list of conditions",
"OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF",
"v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0, 0.5, 0.5)) self.assertTrue(inside_tetrahedra(v1x,",
"copyright # notice, this list of conditions and the following disclaimer in the",
"Project # All rights reserved. # # Redistribution and use in source and",
"or promote products derived from # this software without specific prior written permission.",
"v3y, v3z, v4x, v4y, v4z, 0.5, 0.5, 0.0001)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y,",
"above copyright notice, # this list of conditions and the following disclaimer. #",
"specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT",
"v3z, v4x, v4y, v4z, v3x, v3y, v3z)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z,"
] |
[
"167782, 34419], [84894, 119274, 153562, 92742, 32244], [75660, 111732, 142482, 98638, 29112], [80034,",
"class RelationTest(test.TestCase): @staticmethod def test_relation_layer(): backend.set_session(None) input_data = np.array([[[3, 2, 4], [1, 5,",
"6, 7, 8], [3, 1, 0, 8, 7], [4, 9, 8, 1, 9]],",
"11, 12], [4, 7, 7], [1, 9, 0]], [[14, 17, 1], [1, 9,",
"1, 0, 8, 7], [4, 9, 8, 1, 9]], dtype=np.float32) w2 = np.array([[3,",
"81458, 52805, 20745]], [[86352, 109637, 130294, 167782, 34419], [84894, 119274, 153562, 92742, 32244],",
"my_layer) weights = [w1, w2, g, bias] layer_cls.set_weights(weights) output = model.predict(input_data) if not",
"layer_cls.set_weights(weights) output = model.predict(input_data) if not np.array_equal(output, expected_output): raise AssertionError(\"The output is not",
"2]], [[30, 20, 40], [10, 50, 20]]], dtype=np.float32) weights = np.array([[1, 0], [5,",
"def test_relation_layer(): backend.set_session(None) input_data = np.array([[[3, 2, 4], [1, 5, 2]], [[30, 20,",
"= model.predict(input_data) if not np.array_equal(output, expected_output): raise AssertionError(\"The output is not equal with",
"tensorflow.python.keras.utils import get_custom_objects from tensorflow.python.platform import test from relation import Relation class RelationTest(test.TestCase):",
"output is not equal to our expected output') @staticmethod def test_my_case(): backend.set_session(None) input_data",
"testing_utils, backend from tensorflow.python.keras.utils import get_custom_objects from tensorflow.python.platform import test from relation import",
"[1, 5, 2]], [[30, 20, 40], [10, 50, 20]]], dtype=np.float32) weights = np.array([[1,",
"98638, 29112], [80034, 123045, 149130, 137230, 31983]]], dtype=np.float32) kwargs = {'relations': 5} layer_cls",
"0, 2, 3], [3, 8, 9, 5, 7], [3, 9, 7, 0, 7]],",
"1, 3]], dtype=np.float32) bias = np.array([0, 0, 0, 0, 0], dtype=np.float32) expected_output =",
"3]], dtype=np.float32) bias = np.array([0, 0, 0, 0, 0], dtype=np.float32) expected_output = np.array([[[128560,",
"def test_my_case(): backend.set_session(None) input_data = np.array([[[15, 0, 10], [13, 1, 10], [13, 5,",
"= np.array([[1, 0], [5, 6], [7, 8]], dtype=np.float32) bias = np.array([4, 7], dtype=np.float32)",
"14, 10], [9, 11, 12], [4, 7, 7], [1, 9, 0]], [[14, 17,",
"np.array([[3, 0, 0, 2, 3], [3, 8, 9, 5, 7], [3, 9, 7,",
"0]], [[14, 17, 1], [1, 9, 16], [7, 6, 9], [17, 7, 3]]],",
"dtype=np.float32) bias = np.array([4, 7], dtype=np.float32) expected_output = np.array([[[6926, 8642], [6845, 8822]], [[663440,",
"[13, 5, 19], [19, 19, 4]], [[5, 14, 10], [9, 11, 12], [4,",
"a = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) output = testing_utils.layer_test(Relation, kwargs=kwargs, input_data=input_data, expected_output=expected_output) if not np.array_equal(output, expected_output):",
"output') @staticmethod def test_my_case(): backend.set_session(None) input_data = np.array([[[15, 0, 10], [13, 1, 10],",
"dtype=np.float32) bias = np.array([0, 0, 0, 0, 0], dtype=np.float32) expected_output = np.array([[[128560, 219497,",
"import get_custom_objects from tensorflow.python.platform import test from relation import Relation class RelationTest(test.TestCase): @staticmethod",
"is not equal to our expected output') @staticmethod def test_my_case(): backend.set_session(None) input_data =",
"[80034, 123045, 149130, 137230, 31983]]], dtype=np.float32) kwargs = {'relations': 5} layer_cls = Relation(**kwargs)",
"9, 5, 7], [3, 9, 7, 0, 7]], dtype=np.float32) g = np.array([[7, 9,",
"0, 8, 7], [4, 9, 8, 1, 9]], dtype=np.float32) w2 = np.array([[3, 0,",
"83381, 36927], [49365, 66150, 117274, 57173, 27096], [38577, 45665, 81458, 52805, 20745]], [[86352,",
"bias = np.array([0, 0, 0, 0, 0], dtype=np.float32) expected_output = np.array([[[128560, 219497, 209334,",
"tf.keras.Model(my_i, my_layer) weights = [w1, w2, g, bias] layer_cls.set_weights(weights) output = model.predict(input_data) if",
"= testing_utils.layer_test(Relation, kwargs=kwargs, input_data=input_data, expected_output=expected_output) if not np.array_equal(output, expected_output): raise AssertionError('The output is",
"equal to our expected output') @staticmethod def test_my_case(): backend.set_session(None) input_data = np.array([[[15, 0,",
"tf.keras.layers.Input(input_data.shape[1:], dtype=tf.float32) my_layer = layer_cls(my_i) model = tf.keras.Model(my_i, my_layer) weights = [w1, w2,",
"weights = [w1, w2, g, bias] layer_cls.set_weights(weights) output = model.predict(input_data) if not np.array_equal(output,",
"[13, 1, 10], [13, 5, 19], [19, 19, 4]], [[5, 14, 10], [9,",
"raise AssertionError(\"The output is not equal with the expected output\") if __name__ ==",
"{'relations': 2, 'kernel_initializer': tf.constant_initializer(weights), 'bias_initializer': tf.constant_initializer(bias) } a = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) output = testing_utils.layer_test(Relation,",
"7], [4, 9, 8, 1, 9]], dtype=np.float32) w2 = np.array([[3, 0, 0, 2,",
"model = tf.keras.Model(my_i, my_layer) weights = [w1, w2, g, bias] layer_cls.set_weights(weights) output =",
"from tensorflow.python.keras.utils import get_custom_objects from tensorflow.python.platform import test from relation import Relation class",
"dtype=np.float32) expected_output = np.array([[[6926, 8642], [6845, 8822]], [[663440, 807500], [655340, 825500]]], dtype=np.float32) tf.reset_default_graph()",
"expected_output = np.array([[[128560, 219497, 209334, 128295, 48435], [126451, 210377, 201342, 124197, 47274], [160195,",
"dtype=tf.float32) my_layer = layer_cls(my_i) model = tf.keras.Model(my_i, my_layer) weights = [w1, w2, g,",
"np.array([[[15, 0, 10], [13, 1, 10], [13, 5, 19], [19, 19, 4]], [[5,",
"import testing_utils, backend from tensorflow.python.keras.utils import get_custom_objects from tensorflow.python.platform import test from relation",
"our expected output') @staticmethod def test_my_case(): backend.set_session(None) input_data = np.array([[[15, 0, 10], [13,",
"247137, 62754]], [[61893, 76272, 131794, 81197, 34404], [65721, 87599, 151154, 83381, 36927], [49365,",
"[1, 9, 16], [7, 6, 9], [17, 7, 3]]], dtype=np.float32) w1 = np.array([[3,",
"tf.constant_initializer(bias) } a = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) output = testing_utils.layer_test(Relation, kwargs=kwargs, input_data=input_data, expected_output=expected_output) if not",
"62754]], [[61893, 76272, 131794, 81197, 34404], [65721, 87599, 151154, 83381, 36927], [49365, 66150,",
"if not np.array_equal(output, expected_output): raise AssertionError(\"The output is not equal with the expected",
"= tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) output = testing_utils.layer_test(Relation, kwargs=kwargs, input_data=input_data, expected_output=expected_output) if not np.array_equal(output, expected_output): raise",
"tensorflow.python.platform import test from relation import Relation class RelationTest(test.TestCase): @staticmethod def test_relation_layer(): backend.set_session(None)",
"= layer_cls(my_i) model = tf.keras.Model(my_i, my_layer) weights = [w1, w2, g, bias] layer_cls.set_weights(weights)",
"[w1, w2, g, bias] layer_cls.set_weights(weights) output = model.predict(input_data) if not np.array_equal(output, expected_output): raise",
"[[86352, 109637, 130294, 167782, 34419], [84894, 119274, 153562, 92742, 32244], [75660, 111732, 142482,",
"[4, 9, 8, 1, 9]], dtype=np.float32) w2 = np.array([[3, 0, 0, 2, 3],",
"9], [17, 7, 3]]], dtype=np.float32) w1 = np.array([[3, 8, 6, 7, 8], [3,",
"7]], dtype=np.float32) g = np.array([[7, 9, 4, 9, 0], [1, 1, 5, 4,",
"expected_output): raise AssertionError('The output is not equal to our expected output') @staticmethod def",
"layer_cls = Relation(**kwargs) my_i = tf.keras.layers.Input(input_data.shape[1:], dtype=tf.float32) my_layer = layer_cls(my_i) model = tf.keras.Model(my_i,",
"layer_cls(my_i) model = tf.keras.Model(my_i, my_layer) weights = [w1, w2, g, bias] layer_cls.set_weights(weights) output",
"6, 9], [17, 7, 3]]], dtype=np.float32) w1 = np.array([[3, 8, 6, 7, 8],",
"tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) output = testing_utils.layer_test(Relation, kwargs=kwargs, input_data=input_data, expected_output=expected_output) if not np.array_equal(output, expected_output): raise AssertionError('The",
"825500]]], dtype=np.float32) tf.reset_default_graph() get_custom_objects()['Relation'] = Relation kwargs = {'relations': 2, 'kernel_initializer': tf.constant_initializer(weights), 'bias_initializer':",
"66150, 117274, 57173, 27096], [38577, 45665, 81458, 52805, 20745]], [[86352, 109637, 130294, 167782,",
"20]]], dtype=np.float32) weights = np.array([[1, 0], [5, 6], [7, 8]], dtype=np.float32) bias =",
"{'relations': 5} layer_cls = Relation(**kwargs) my_i = tf.keras.layers.Input(input_data.shape[1:], dtype=tf.float32) my_layer = layer_cls(my_i) model",
"9, 4, 9, 0], [1, 1, 5, 4, 0], [6, 1, 7, 1,",
"57173, 27096], [38577, 45665, 81458, 52805, 20745]], [[86352, 109637, 130294, 167782, 34419], [84894,",
"dtype=np.float32) kwargs = {'relations': 5} layer_cls = Relation(**kwargs) my_i = tf.keras.layers.Input(input_data.shape[1:], dtype=tf.float32) my_layer",
"tensorflow as tf from tensorflow.python.keras import testing_utils, backend from tensorflow.python.keras.utils import get_custom_objects from",
"output = testing_utils.layer_test(Relation, kwargs=kwargs, input_data=input_data, expected_output=expected_output) if not np.array_equal(output, expected_output): raise AssertionError('The output",
"1, 7, 1, 3]], dtype=np.float32) bias = np.array([0, 0, 0, 0, 0], dtype=np.float32)",
"np.array([0, 0, 0, 0, 0], dtype=np.float32) expected_output = np.array([[[128560, 219497, 209334, 128295, 48435],",
"3]]], dtype=np.float32) w1 = np.array([[3, 8, 6, 7, 8], [3, 1, 0, 8,",
"RelationTest(test.TestCase): @staticmethod def test_relation_layer(): backend.set_session(None) input_data = np.array([[[3, 2, 4], [1, 5, 2]],",
"raise AssertionError('The output is not equal to our expected output') @staticmethod def test_my_case():",
"[160195, 262057, 249294, 152200, 61335], [160195, 217673, 193350, 247137, 62754]], [[61893, 76272, 131794,",
"9, 0]], [[14, 17, 1], [1, 9, 16], [7, 6, 9], [17, 7,",
"np.array([[1, 0], [5, 6], [7, 8]], dtype=np.float32) bias = np.array([4, 7], dtype=np.float32) expected_output",
"test_relation_layer(): backend.set_session(None) input_data = np.array([[[3, 2, 4], [1, 5, 2]], [[30, 20, 40],",
"dtype=np.float32) weights = np.array([[1, 0], [5, 6], [7, 8]], dtype=np.float32) bias = np.array([4,",
"9, 16], [7, 6, 9], [17, 7, 3]]], dtype=np.float32) w1 = np.array([[3, 8,",
"10], [13, 1, 10], [13, 5, 19], [19, 19, 4]], [[5, 14, 10],",
"5} layer_cls = Relation(**kwargs) my_i = tf.keras.layers.Input(input_data.shape[1:], dtype=tf.float32) my_layer = layer_cls(my_i) model =",
"= np.array([[[15, 0, 10], [13, 1, 10], [13, 5, 19], [19, 19, 4]],",
"expected output') @staticmethod def test_my_case(): backend.set_session(None) input_data = np.array([[[15, 0, 10], [13, 1,",
"152200, 61335], [160195, 217673, 193350, 247137, 62754]], [[61893, 76272, 131794, 81197, 34404], [65721,",
"expected_output): raise AssertionError(\"The output is not equal with the expected output\") if __name__",
"[[61893, 76272, 131794, 81197, 34404], [65721, 87599, 151154, 83381, 36927], [49365, 66150, 117274,",
"10], [13, 5, 19], [19, 19, 4]], [[5, 14, 10], [9, 11, 12],",
"131794, 81197, 34404], [65721, 87599, 151154, 83381, 36927], [49365, 66150, 117274, 57173, 27096],",
"8, 7], [4, 9, 8, 1, 9]], dtype=np.float32) w2 = np.array([[3, 0, 0,",
"= tf.keras.layers.Input(input_data.shape[1:], dtype=tf.float32) my_layer = layer_cls(my_i) model = tf.keras.Model(my_i, my_layer) weights = [w1,",
"expected_output=expected_output) if not np.array_equal(output, expected_output): raise AssertionError('The output is not equal to our",
"if not np.array_equal(output, expected_output): raise AssertionError('The output is not equal to our expected",
"8], [3, 1, 0, 8, 7], [4, 9, 8, 1, 9]], dtype=np.float32) w2",
"[[14, 17, 1], [1, 9, 16], [7, 6, 9], [17, 7, 3]]], dtype=np.float32)",
"249294, 152200, 61335], [160195, 217673, 193350, 247137, 62754]], [[61893, 76272, 131794, 81197, 34404],",
"not np.array_equal(output, expected_output): raise AssertionError(\"The output is not equal with the expected output\")",
"92742, 32244], [75660, 111732, 142482, 98638, 29112], [80034, 123045, 149130, 137230, 31983]]], dtype=np.float32)",
"import test from relation import Relation class RelationTest(test.TestCase): @staticmethod def test_relation_layer(): backend.set_session(None) input_data",
"= [w1, w2, g, bias] layer_cls.set_weights(weights) output = model.predict(input_data) if not np.array_equal(output, expected_output):",
"153562, 92742, 32244], [75660, 111732, 142482, 98638, 29112], [80034, 123045, 149130, 137230, 31983]]],",
"not np.array_equal(output, expected_output): raise AssertionError('The output is not equal to our expected output')",
"[6845, 8822]], [[663440, 807500], [655340, 825500]]], dtype=np.float32) tf.reset_default_graph() get_custom_objects()['Relation'] = Relation kwargs =",
"kwargs = {'relations': 5} layer_cls = Relation(**kwargs) my_i = tf.keras.layers.Input(input_data.shape[1:], dtype=tf.float32) my_layer =",
"1, 9]], dtype=np.float32) w2 = np.array([[3, 0, 0, 2, 3], [3, 8, 9,",
"36927], [49365, 66150, 117274, 57173, 27096], [38577, 45665, 81458, 52805, 20745]], [[86352, 109637,",
"np import tensorflow as tf from tensorflow.python.keras import testing_utils, backend from tensorflow.python.keras.utils import",
"8]], dtype=np.float32) bias = np.array([4, 7], dtype=np.float32) expected_output = np.array([[[6926, 8642], [6845, 8822]],",
"109637, 130294, 167782, 34419], [84894, 119274, 153562, 92742, 32244], [75660, 111732, 142482, 98638,",
"Relation kwargs = {'relations': 2, 'kernel_initializer': tf.constant_initializer(weights), 'bias_initializer': tf.constant_initializer(bias) } a = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)",
"dtype=np.float32) w2 = np.array([[3, 0, 0, 2, 3], [3, 8, 9, 5, 7],",
"8642], [6845, 8822]], [[663440, 807500], [655340, 825500]]], dtype=np.float32) tf.reset_default_graph() get_custom_objects()['Relation'] = Relation kwargs",
"w1 = np.array([[3, 8, 6, 7, 8], [3, 1, 0, 8, 7], [4,",
"130294, 167782, 34419], [84894, 119274, 153562, 92742, 32244], [75660, 111732, 142482, 98638, 29112],",
"= Relation(**kwargs) my_i = tf.keras.layers.Input(input_data.shape[1:], dtype=tf.float32) my_layer = layer_cls(my_i) model = tf.keras.Model(my_i, my_layer)",
"bias = np.array([4, 7], dtype=np.float32) expected_output = np.array([[[6926, 8642], [6845, 8822]], [[663440, 807500],",
"[84894, 119274, 153562, 92742, 32244], [75660, 111732, 142482, 98638, 29112], [80034, 123045, 149130,",
"= Relation kwargs = {'relations': 2, 'kernel_initializer': tf.constant_initializer(weights), 'bias_initializer': tf.constant_initializer(bias) } a =",
"= np.array([4, 7], dtype=np.float32) expected_output = np.array([[[6926, 8642], [6845, 8822]], [[663440, 807500], [655340,",
"@staticmethod def test_relation_layer(): backend.set_session(None) input_data = np.array([[[3, 2, 4], [1, 5, 2]], [[30,",
"262057, 249294, 152200, 61335], [160195, 217673, 193350, 247137, 62754]], [[61893, 76272, 131794, 81197,",
"3], [3, 8, 9, 5, 7], [3, 9, 7, 0, 7]], dtype=np.float32) g",
"61335], [160195, 217673, 193350, 247137, 62754]], [[61893, 76272, 131794, 81197, 34404], [65721, 87599,",
"dtype=np.float32) tf.reset_default_graph() get_custom_objects()['Relation'] = Relation kwargs = {'relations': 2, 'kernel_initializer': tf.constant_initializer(weights), 'bias_initializer': tf.constant_initializer(bias)",
"7], [3, 9, 7, 0, 7]], dtype=np.float32) g = np.array([[7, 9, 4, 9,",
"backend.set_session(None) input_data = np.array([[[15, 0, 10], [13, 1, 10], [13, 5, 19], [19,",
"8, 1, 9]], dtype=np.float32) w2 = np.array([[3, 0, 0, 2, 3], [3, 8,",
"45665, 81458, 52805, 20745]], [[86352, 109637, 130294, 167782, 34419], [84894, 119274, 153562, 92742,",
"2, 4], [1, 5, 2]], [[30, 20, 40], [10, 50, 20]]], dtype=np.float32) weights",
"6], [7, 8]], dtype=np.float32) bias = np.array([4, 7], dtype=np.float32) expected_output = np.array([[[6926, 8642],",
"tf.reset_default_graph() get_custom_objects()['Relation'] = Relation kwargs = {'relations': 2, 'kernel_initializer': tf.constant_initializer(weights), 'bias_initializer': tf.constant_initializer(bias) }",
"[9, 11, 12], [4, 7, 7], [1, 9, 0]], [[14, 17, 1], [1,",
"AssertionError('The output is not equal to our expected output') @staticmethod def test_my_case(): backend.set_session(None)",
"47274], [160195, 262057, 249294, 152200, 61335], [160195, 217673, 193350, 247137, 62754]], [[61893, 76272,",
"[19, 19, 4]], [[5, 14, 10], [9, 11, 12], [4, 7, 7], [1,",
"34404], [65721, 87599, 151154, 83381, 36927], [49365, 66150, 117274, 57173, 27096], [38577, 45665,",
"'kernel_initializer': tf.constant_initializer(weights), 'bias_initializer': tf.constant_initializer(bias) } a = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) output = testing_utils.layer_test(Relation, kwargs=kwargs, input_data=input_data,",
"numpy as np import tensorflow as tf from tensorflow.python.keras import testing_utils, backend from",
"output = model.predict(input_data) if not np.array_equal(output, expected_output): raise AssertionError(\"The output is not equal",
"8, 6, 7, 8], [3, 1, 0, 8, 7], [4, 9, 8, 1,",
"= {'relations': 2, 'kernel_initializer': tf.constant_initializer(weights), 'bias_initializer': tf.constant_initializer(bias) } a = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) output =",
"9]], dtype=np.float32) w2 = np.array([[3, 0, 0, 2, 3], [3, 8, 9, 5,",
"kwargs = {'relations': 2, 'kernel_initializer': tf.constant_initializer(weights), 'bias_initializer': tf.constant_initializer(bias) } a = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) output",
"[17, 7, 3]]], dtype=np.float32) w1 = np.array([[3, 8, 6, 7, 8], [3, 1,",
"input_data=input_data, expected_output=expected_output) if not np.array_equal(output, expected_output): raise AssertionError('The output is not equal to",
"151154, 83381, 36927], [49365, 66150, 117274, 57173, 27096], [38577, 45665, 81458, 52805, 20745]],",
"expected_output = np.array([[[6926, 8642], [6845, 8822]], [[663440, 807500], [655340, 825500]]], dtype=np.float32) tf.reset_default_graph() get_custom_objects()['Relation']",
"81197, 34404], [65721, 87599, 151154, 83381, 36927], [49365, 66150, 117274, 57173, 27096], [38577,",
"4, 0], [6, 1, 7, 1, 3]], dtype=np.float32) bias = np.array([0, 0, 0,",
"Relation(**kwargs) my_i = tf.keras.layers.Input(input_data.shape[1:], dtype=tf.float32) my_layer = layer_cls(my_i) model = tf.keras.Model(my_i, my_layer) weights",
"dtype=np.float32) expected_output = np.array([[[128560, 219497, 209334, 128295, 48435], [126451, 210377, 201342, 124197, 47274],",
"117274, 57173, 27096], [38577, 45665, 81458, 52805, 20745]], [[86352, 109637, 130294, 167782, 34419],",
"[38577, 45665, 81458, 52805, 20745]], [[86352, 109637, 130294, 167782, 34419], [84894, 119274, 153562,",
"142482, 98638, 29112], [80034, 123045, 149130, 137230, 31983]]], dtype=np.float32) kwargs = {'relations': 5}",
"[5, 6], [7, 8]], dtype=np.float32) bias = np.array([4, 7], dtype=np.float32) expected_output = np.array([[[6926,",
"np.array([[[3, 2, 4], [1, 5, 2]], [[30, 20, 40], [10, 50, 20]]], dtype=np.float32)",
"kwargs=kwargs, input_data=input_data, expected_output=expected_output) if not np.array_equal(output, expected_output): raise AssertionError('The output is not equal",
"31983]]], dtype=np.float32) kwargs = {'relations': 5} layer_cls = Relation(**kwargs) my_i = tf.keras.layers.Input(input_data.shape[1:], dtype=tf.float32)",
"0, 7]], dtype=np.float32) g = np.array([[7, 9, 4, 9, 0], [1, 1, 5,",
"12], [4, 7, 7], [1, 9, 0]], [[14, 17, 1], [1, 9, 16],",
"8822]], [[663440, 807500], [655340, 825500]]], dtype=np.float32) tf.reset_default_graph() get_custom_objects()['Relation'] = Relation kwargs = {'relations':",
"7, 0, 7]], dtype=np.float32) g = np.array([[7, 9, 4, 9, 0], [1, 1,",
"AssertionError(\"The output is not equal with the expected output\") if __name__ == '__main__':",
"my_layer = layer_cls(my_i) model = tf.keras.Model(my_i, my_layer) weights = [w1, w2, g, bias]",
"34419], [84894, 119274, 153562, 92742, 32244], [75660, 111732, 142482, 98638, 29112], [80034, 123045,",
"tf from tensorflow.python.keras import testing_utils, backend from tensorflow.python.keras.utils import get_custom_objects from tensorflow.python.platform import",
"<reponame>melahi/my-tensorflow-layers import numpy as np import tensorflow as tf from tensorflow.python.keras import testing_utils,",
"149130, 137230, 31983]]], dtype=np.float32) kwargs = {'relations': 5} layer_cls = Relation(**kwargs) my_i =",
"[10, 50, 20]]], dtype=np.float32) weights = np.array([[1, 0], [5, 6], [7, 8]], dtype=np.float32)",
"5, 7], [3, 9, 7, 0, 7]], dtype=np.float32) g = np.array([[7, 9, 4,",
"17, 1], [1, 9, 16], [7, 6, 9], [17, 7, 3]]], dtype=np.float32) w1",
"48435], [126451, 210377, 201342, 124197, 47274], [160195, 262057, 249294, 152200, 61335], [160195, 217673,",
"[49365, 66150, 117274, 57173, 27096], [38577, 45665, 81458, 52805, 20745]], [[86352, 109637, 130294,",
"w2 = np.array([[3, 0, 0, 2, 3], [3, 8, 9, 5, 7], [3,",
"1, 5, 4, 0], [6, 1, 7, 1, 3]], dtype=np.float32) bias = np.array([0,",
"as np import tensorflow as tf from tensorflow.python.keras import testing_utils, backend from tensorflow.python.keras.utils",
"[[5, 14, 10], [9, 11, 12], [4, 7, 7], [1, 9, 0]], [[14,",
"w2, g, bias] layer_cls.set_weights(weights) output = model.predict(input_data) if not np.array_equal(output, expected_output): raise AssertionError(\"The",
"= np.array([[7, 9, 4, 9, 0], [1, 1, 5, 4, 0], [6, 1,",
"[[663440, 807500], [655340, 825500]]], dtype=np.float32) tf.reset_default_graph() get_custom_objects()['Relation'] = Relation kwargs = {'relations': 2,",
"[655340, 825500]]], dtype=np.float32) tf.reset_default_graph() get_custom_objects()['Relation'] = Relation kwargs = {'relations': 2, 'kernel_initializer': tf.constant_initializer(weights),",
"0, 0, 0, 0], dtype=np.float32) expected_output = np.array([[[128560, 219497, 209334, 128295, 48435], [126451,",
"backend.set_session(None) input_data = np.array([[[3, 2, 4], [1, 5, 2]], [[30, 20, 40], [10,",
"get_custom_objects from tensorflow.python.platform import test from relation import Relation class RelationTest(test.TestCase): @staticmethod def",
"'bias_initializer': tf.constant_initializer(bias) } a = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) output = testing_utils.layer_test(Relation, kwargs=kwargs, input_data=input_data, expected_output=expected_output) if",
"193350, 247137, 62754]], [[61893, 76272, 131794, 81197, 34404], [65721, 87599, 151154, 83381, 36927],",
"np.array_equal(output, expected_output): raise AssertionError(\"The output is not equal with the expected output\") if",
"relation import Relation class RelationTest(test.TestCase): @staticmethod def test_relation_layer(): backend.set_session(None) input_data = np.array([[[3, 2,",
"137230, 31983]]], dtype=np.float32) kwargs = {'relations': 5} layer_cls = Relation(**kwargs) my_i = tf.keras.layers.Input(input_data.shape[1:],",
"= tf.keras.Model(my_i, my_layer) weights = [w1, w2, g, bias] layer_cls.set_weights(weights) output = model.predict(input_data)",
"= np.array([[3, 0, 0, 2, 3], [3, 8, 9, 5, 7], [3, 9,",
"tensorflow.python.keras import testing_utils, backend from tensorflow.python.keras.utils import get_custom_objects from tensorflow.python.platform import test from",
"0, 0], dtype=np.float32) expected_output = np.array([[[128560, 219497, 209334, 128295, 48435], [126451, 210377, 201342,",
"87599, 151154, 83381, 36927], [49365, 66150, 117274, 57173, 27096], [38577, 45665, 81458, 52805,",
"from tensorflow.python.keras import testing_utils, backend from tensorflow.python.keras.utils import get_custom_objects from tensorflow.python.platform import test",
"10], [9, 11, 12], [4, 7, 7], [1, 9, 0]], [[14, 17, 1],",
"7, 1, 3]], dtype=np.float32) bias = np.array([0, 0, 0, 0, 0], dtype=np.float32) expected_output",
"123045, 149130, 137230, 31983]]], dtype=np.float32) kwargs = {'relations': 5} layer_cls = Relation(**kwargs) my_i",
"20, 40], [10, 50, 20]]], dtype=np.float32) weights = np.array([[1, 0], [5, 6], [7,",
"20745]], [[86352, 109637, 130294, 167782, 34419], [84894, 119274, 153562, 92742, 32244], [75660, 111732,",
"bias] layer_cls.set_weights(weights) output = model.predict(input_data) if not np.array_equal(output, expected_output): raise AssertionError(\"The output is",
"50, 20]]], dtype=np.float32) weights = np.array([[1, 0], [5, 6], [7, 8]], dtype=np.float32) bias",
"get_custom_objects()['Relation'] = Relation kwargs = {'relations': 2, 'kernel_initializer': tf.constant_initializer(weights), 'bias_initializer': tf.constant_initializer(bias) } a",
"9, 7, 0, 7]], dtype=np.float32) g = np.array([[7, 9, 4, 9, 0], [1,",
"not equal to our expected output') @staticmethod def test_my_case(): backend.set_session(None) input_data = np.array([[[15,",
"model.predict(input_data) if not np.array_equal(output, expected_output): raise AssertionError(\"The output is not equal with the",
"8, 9, 5, 7], [3, 9, 7, 0, 7]], dtype=np.float32) g = np.array([[7,",
"0, 0, 2, 3], [3, 8, 9, 5, 7], [3, 9, 7, 0,",
"128295, 48435], [126451, 210377, 201342, 124197, 47274], [160195, 262057, 249294, 152200, 61335], [160195,",
"= np.array([0, 0, 0, 0, 0], dtype=np.float32) expected_output = np.array([[[128560, 219497, 209334, 128295,",
"= np.array([[3, 8, 6, 7, 8], [3, 1, 0, 8, 7], [4, 9,",
"1], [1, 9, 16], [7, 6, 9], [17, 7, 3]]], dtype=np.float32) w1 =",
"0, 10], [13, 1, 10], [13, 5, 19], [19, 19, 4]], [[5, 14,",
"import numpy as np import tensorflow as tf from tensorflow.python.keras import testing_utils, backend",
"[1, 1, 5, 4, 0], [6, 1, 7, 1, 3]], dtype=np.float32) bias =",
"dtype=np.float32) w1 = np.array([[3, 8, 6, 7, 8], [3, 1, 0, 8, 7],",
"test_my_case(): backend.set_session(None) input_data = np.array([[[15, 0, 10], [13, 1, 10], [13, 5, 19],",
"4], [1, 5, 2]], [[30, 20, 40], [10, 50, 20]]], dtype=np.float32) weights =",
"from relation import Relation class RelationTest(test.TestCase): @staticmethod def test_relation_layer(): backend.set_session(None) input_data = np.array([[[3,",
"124197, 47274], [160195, 262057, 249294, 152200, 61335], [160195, 217673, 193350, 247137, 62754]], [[61893,",
"[4, 7, 7], [1, 9, 0]], [[14, 17, 1], [1, 9, 16], [7,",
"0], [1, 1, 5, 4, 0], [6, 1, 7, 1, 3]], dtype=np.float32) bias",
"[3, 1, 0, 8, 7], [4, 9, 8, 1, 9]], dtype=np.float32) w2 =",
"7, 7], [1, 9, 0]], [[14, 17, 1], [1, 9, 16], [7, 6,",
"testing_utils.layer_test(Relation, kwargs=kwargs, input_data=input_data, expected_output=expected_output) if not np.array_equal(output, expected_output): raise AssertionError('The output is not",
"np.array_equal(output, expected_output): raise AssertionError('The output is not equal to our expected output') @staticmethod",
"np.array([[3, 8, 6, 7, 8], [3, 1, 0, 8, 7], [4, 9, 8,",
"27096], [38577, 45665, 81458, 52805, 20745]], [[86352, 109637, 130294, 167782, 34419], [84894, 119274,",
"29112], [80034, 123045, 149130, 137230, 31983]]], dtype=np.float32) kwargs = {'relations': 5} layer_cls =",
"[1, 9, 0]], [[14, 17, 1], [1, 9, 16], [7, 6, 9], [17,",
"217673, 193350, 247137, 62754]], [[61893, 76272, 131794, 81197, 34404], [65721, 87599, 151154, 83381,",
"[6, 1, 7, 1, 3]], dtype=np.float32) bias = np.array([0, 0, 0, 0, 0],",
"1, 10], [13, 5, 19], [19, 19, 4]], [[5, 14, 10], [9, 11,",
"19, 4]], [[5, 14, 10], [9, 11, 12], [4, 7, 7], [1, 9,",
"input_data = np.array([[[15, 0, 10], [13, 1, 10], [13, 5, 19], [19, 19,",
"5, 2]], [[30, 20, 40], [10, 50, 20]]], dtype=np.float32) weights = np.array([[1, 0],",
"19], [19, 19, 4]], [[5, 14, 10], [9, 11, 12], [4, 7, 7],",
"7, 8], [3, 1, 0, 8, 7], [4, 9, 8, 1, 9]], dtype=np.float32)",
"210377, 201342, 124197, 47274], [160195, 262057, 249294, 152200, 61335], [160195, 217673, 193350, 247137,",
"weights = np.array([[1, 0], [5, 6], [7, 8]], dtype=np.float32) bias = np.array([4, 7],",
"[75660, 111732, 142482, 98638, 29112], [80034, 123045, 149130, 137230, 31983]]], dtype=np.float32) kwargs =",
"52805, 20745]], [[86352, 109637, 130294, 167782, 34419], [84894, 119274, 153562, 92742, 32244], [75660,",
"from tensorflow.python.platform import test from relation import Relation class RelationTest(test.TestCase): @staticmethod def test_relation_layer():",
"g = np.array([[7, 9, 4, 9, 0], [1, 1, 5, 4, 0], [6,",
"119274, 153562, 92742, 32244], [75660, 111732, 142482, 98638, 29112], [80034, 123045, 149130, 137230,",
"dtype=np.float32) g = np.array([[7, 9, 4, 9, 0], [1, 1, 5, 4, 0],",
"= {'relations': 5} layer_cls = Relation(**kwargs) my_i = tf.keras.layers.Input(input_data.shape[1:], dtype=tf.float32) my_layer = layer_cls(my_i)",
"tf.constant_initializer(weights), 'bias_initializer': tf.constant_initializer(bias) } a = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) output = testing_utils.layer_test(Relation, kwargs=kwargs, input_data=input_data, expected_output=expected_output)",
"import Relation class RelationTest(test.TestCase): @staticmethod def test_relation_layer(): backend.set_session(None) input_data = np.array([[[3, 2, 4],",
"0, 0, 0], dtype=np.float32) expected_output = np.array([[[128560, 219497, 209334, 128295, 48435], [126451, 210377,",
"} a = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) output = testing_utils.layer_test(Relation, kwargs=kwargs, input_data=input_data, expected_output=expected_output) if not np.array_equal(output,",
"7], [1, 9, 0]], [[14, 17, 1], [1, 9, 16], [7, 6, 9],",
"4, 9, 0], [1, 1, 5, 4, 0], [6, 1, 7, 1, 3]],",
"input_data = np.array([[[3, 2, 4], [1, 5, 2]], [[30, 20, 40], [10, 50,",
"@staticmethod def test_my_case(): backend.set_session(None) input_data = np.array([[[15, 0, 10], [13, 1, 10], [13,",
"np.array([[[6926, 8642], [6845, 8822]], [[663440, 807500], [655340, 825500]]], dtype=np.float32) tf.reset_default_graph() get_custom_objects()['Relation'] = Relation",
"[3, 9, 7, 0, 7]], dtype=np.float32) g = np.array([[7, 9, 4, 9, 0],",
"backend from tensorflow.python.keras.utils import get_custom_objects from tensorflow.python.platform import test from relation import Relation",
"40], [10, 50, 20]]], dtype=np.float32) weights = np.array([[1, 0], [5, 6], [7, 8]],",
"= np.array([[[6926, 8642], [6845, 8822]], [[663440, 807500], [655340, 825500]]], dtype=np.float32) tf.reset_default_graph() get_custom_objects()['Relation'] =",
"= np.array([[[128560, 219497, 209334, 128295, 48435], [126451, 210377, 201342, 124197, 47274], [160195, 262057,",
"201342, 124197, 47274], [160195, 262057, 249294, 152200, 61335], [160195, 217673, 193350, 247137, 62754]],",
"[126451, 210377, 201342, 124197, 47274], [160195, 262057, 249294, 152200, 61335], [160195, 217673, 193350,",
"Relation class RelationTest(test.TestCase): @staticmethod def test_relation_layer(): backend.set_session(None) input_data = np.array([[[3, 2, 4], [1,",
"[3, 8, 9, 5, 7], [3, 9, 7, 0, 7]], dtype=np.float32) g =",
"5, 19], [19, 19, 4]], [[5, 14, 10], [9, 11, 12], [4, 7,",
"0], dtype=np.float32) expected_output = np.array([[[128560, 219497, 209334, 128295, 48435], [126451, 210377, 201342, 124197,",
"32244], [75660, 111732, 142482, 98638, 29112], [80034, 123045, 149130, 137230, 31983]]], dtype=np.float32) kwargs",
"16], [7, 6, 9], [17, 7, 3]]], dtype=np.float32) w1 = np.array([[3, 8, 6,",
"[65721, 87599, 151154, 83381, 36927], [49365, 66150, 117274, 57173, 27096], [38577, 45665, 81458,",
"5, 4, 0], [6, 1, 7, 1, 3]], dtype=np.float32) bias = np.array([0, 0,",
"output is not equal with the expected output\") if __name__ == '__main__': test.main()",
"[7, 8]], dtype=np.float32) bias = np.array([4, 7], dtype=np.float32) expected_output = np.array([[[6926, 8642], [6845,",
"76272, 131794, 81197, 34404], [65721, 87599, 151154, 83381, 36927], [49365, 66150, 117274, 57173,",
"2, 'kernel_initializer': tf.constant_initializer(weights), 'bias_initializer': tf.constant_initializer(bias) } a = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) output = testing_utils.layer_test(Relation, kwargs=kwargs,",
"9, 0], [1, 1, 5, 4, 0], [6, 1, 7, 1, 3]], dtype=np.float32)",
"test from relation import Relation class RelationTest(test.TestCase): @staticmethod def test_relation_layer(): backend.set_session(None) input_data =",
"0], [6, 1, 7, 1, 3]], dtype=np.float32) bias = np.array([0, 0, 0, 0,",
"np.array([[7, 9, 4, 9, 0], [1, 1, 5, 4, 0], [6, 1, 7,",
"[[30, 20, 40], [10, 50, 20]]], dtype=np.float32) weights = np.array([[1, 0], [5, 6],",
"7], dtype=np.float32) expected_output = np.array([[[6926, 8642], [6845, 8822]], [[663440, 807500], [655340, 825500]]], dtype=np.float32)",
"import tensorflow as tf from tensorflow.python.keras import testing_utils, backend from tensorflow.python.keras.utils import get_custom_objects",
"4]], [[5, 14, 10], [9, 11, 12], [4, 7, 7], [1, 9, 0]],",
"to our expected output') @staticmethod def test_my_case(): backend.set_session(None) input_data = np.array([[[15, 0, 10],",
"[160195, 217673, 193350, 247137, 62754]], [[61893, 76272, 131794, 81197, 34404], [65721, 87599, 151154,",
"9, 8, 1, 9]], dtype=np.float32) w2 = np.array([[3, 0, 0, 2, 3], [3,",
"807500], [655340, 825500]]], dtype=np.float32) tf.reset_default_graph() get_custom_objects()['Relation'] = Relation kwargs = {'relations': 2, 'kernel_initializer':",
"np.array([4, 7], dtype=np.float32) expected_output = np.array([[[6926, 8642], [6845, 8822]], [[663440, 807500], [655340, 825500]]],",
"g, bias] layer_cls.set_weights(weights) output = model.predict(input_data) if not np.array_equal(output, expected_output): raise AssertionError(\"The output",
"0], [5, 6], [7, 8]], dtype=np.float32) bias = np.array([4, 7], dtype=np.float32) expected_output =",
"np.array([[[128560, 219497, 209334, 128295, 48435], [126451, 210377, 201342, 124197, 47274], [160195, 262057, 249294,",
"as tf from tensorflow.python.keras import testing_utils, backend from tensorflow.python.keras.utils import get_custom_objects from tensorflow.python.platform",
"2, 3], [3, 8, 9, 5, 7], [3, 9, 7, 0, 7]], dtype=np.float32)",
"111732, 142482, 98638, 29112], [80034, 123045, 149130, 137230, 31983]]], dtype=np.float32) kwargs = {'relations':",
"my_i = tf.keras.layers.Input(input_data.shape[1:], dtype=tf.float32) my_layer = layer_cls(my_i) model = tf.keras.Model(my_i, my_layer) weights =",
"= np.array([[[3, 2, 4], [1, 5, 2]], [[30, 20, 40], [10, 50, 20]]],",
"[7, 6, 9], [17, 7, 3]]], dtype=np.float32) w1 = np.array([[3, 8, 6, 7,",
"7, 3]]], dtype=np.float32) w1 = np.array([[3, 8, 6, 7, 8], [3, 1, 0,",
"209334, 128295, 48435], [126451, 210377, 201342, 124197, 47274], [160195, 262057, 249294, 152200, 61335],",
"219497, 209334, 128295, 48435], [126451, 210377, 201342, 124197, 47274], [160195, 262057, 249294, 152200,"
] |
[
"django.db import models from django.contrib.auth.models import User from blog.models import Post class Comment(models.Model):",
") target = models.CharField(max_length=200, null=True, verbose_name='评论目标') content = models.CharField(max_length=2000, verbose_name='内容') nickname = models.CharField(max_length=50,",
"from django.contrib.auth.models import User from blog.models import Post class Comment(models.Model): STATUS_ITEMS = (",
"= '评论' def __str__(self): return '{}'.format(self.target) def nickname_show(self): return '来自{}的评论'.format(self.nickname) nickname_show.short_description = '评论者'",
"null=True, verbose_name='评论目标') content = models.CharField(max_length=2000, verbose_name='内容') nickname = models.CharField(max_length=50, verbose_name='别名') status = models.PositiveIntegerField(default=1,",
"import User from blog.models import Post class Comment(models.Model): STATUS_ITEMS = ( (1,'正常'), (2,'删除'),",
"verbose_name='评论目标') content = models.CharField(max_length=2000, verbose_name='内容') nickname = models.CharField(max_length=50, verbose_name='别名') status = models.PositiveIntegerField(default=1, choices=STATUS_ITEMS,",
"models.DateTimeField(auto_now_add=True, verbose_name='创建时间') class Meta(): verbose_name = verbose_name_plural = '评论' def __str__(self): return '{}'.format(self.target)",
"Meta(): verbose_name = verbose_name_plural = '评论' def __str__(self): return '{}'.format(self.target) def nickname_show(self): return",
"choices=STATUS_ITEMS, verbose_name='状态') websit = models.URLField(verbose_name='网址') email = models.EmailField(verbose_name='邮箱') created_time = models.DateTimeField(auto_now_add=True, verbose_name='创建时间') class",
"import models from django.contrib.auth.models import User from blog.models import Post class Comment(models.Model): STATUS_ITEMS",
"django.contrib.auth.models import User from blog.models import Post class Comment(models.Model): STATUS_ITEMS = ( (1,'正常'),",
"blog.models import Post class Comment(models.Model): STATUS_ITEMS = ( (1,'正常'), (2,'删除'), ) target =",
"Comment(models.Model): STATUS_ITEMS = ( (1,'正常'), (2,'删除'), ) target = models.CharField(max_length=200, null=True, verbose_name='评论目标') content",
"= models.CharField(max_length=2000, verbose_name='内容') nickname = models.CharField(max_length=50, verbose_name='别名') status = models.PositiveIntegerField(default=1, choices=STATUS_ITEMS, verbose_name='状态') websit",
"class Comment(models.Model): STATUS_ITEMS = ( (1,'正常'), (2,'删除'), ) target = models.CharField(max_length=200, null=True, verbose_name='评论目标')",
"target = models.CharField(max_length=200, null=True, verbose_name='评论目标') content = models.CharField(max_length=2000, verbose_name='内容') nickname = models.CharField(max_length=50, verbose_name='别名')",
"verbose_name='内容') nickname = models.CharField(max_length=50, verbose_name='别名') status = models.PositiveIntegerField(default=1, choices=STATUS_ITEMS, verbose_name='状态') websit = models.URLField(verbose_name='网址')",
"( (1,'正常'), (2,'删除'), ) target = models.CharField(max_length=200, null=True, verbose_name='评论目标') content = models.CharField(max_length=2000, verbose_name='内容')",
"= ( (1,'正常'), (2,'删除'), ) target = models.CharField(max_length=200, null=True, verbose_name='评论目标') content = models.CharField(max_length=2000,",
"= models.CharField(max_length=50, verbose_name='别名') status = models.PositiveIntegerField(default=1, choices=STATUS_ITEMS, verbose_name='状态') websit = models.URLField(verbose_name='网址') email =",
"= models.EmailField(verbose_name='邮箱') created_time = models.DateTimeField(auto_now_add=True, verbose_name='创建时间') class Meta(): verbose_name = verbose_name_plural = '评论'",
"= models.DateTimeField(auto_now_add=True, verbose_name='创建时间') class Meta(): verbose_name = verbose_name_plural = '评论' def __str__(self): return",
"STATUS_ITEMS = ( (1,'正常'), (2,'删除'), ) target = models.CharField(max_length=200, null=True, verbose_name='评论目标') content =",
"verbose_name='别名') status = models.PositiveIntegerField(default=1, choices=STATUS_ITEMS, verbose_name='状态') websit = models.URLField(verbose_name='网址') email = models.EmailField(verbose_name='邮箱') created_time",
"status = models.PositiveIntegerField(default=1, choices=STATUS_ITEMS, verbose_name='状态') websit = models.URLField(verbose_name='网址') email = models.EmailField(verbose_name='邮箱') created_time =",
"models.CharField(max_length=2000, verbose_name='内容') nickname = models.CharField(max_length=50, verbose_name='别名') status = models.PositiveIntegerField(default=1, choices=STATUS_ITEMS, verbose_name='状态') websit =",
"class Meta(): verbose_name = verbose_name_plural = '评论' def __str__(self): return '{}'.format(self.target) def nickname_show(self):",
"nickname = models.CharField(max_length=50, verbose_name='别名') status = models.PositiveIntegerField(default=1, choices=STATUS_ITEMS, verbose_name='状态') websit = models.URLField(verbose_name='网址') email",
"websit = models.URLField(verbose_name='网址') email = models.EmailField(verbose_name='邮箱') created_time = models.DateTimeField(auto_now_add=True, verbose_name='创建时间') class Meta(): verbose_name",
"import Post class Comment(models.Model): STATUS_ITEMS = ( (1,'正常'), (2,'删除'), ) target = models.CharField(max_length=200,",
"= models.PositiveIntegerField(default=1, choices=STATUS_ITEMS, verbose_name='状态') websit = models.URLField(verbose_name='网址') email = models.EmailField(verbose_name='邮箱') created_time = models.DateTimeField(auto_now_add=True,",
"models.CharField(max_length=50, verbose_name='别名') status = models.PositiveIntegerField(default=1, choices=STATUS_ITEMS, verbose_name='状态') websit = models.URLField(verbose_name='网址') email = models.EmailField(verbose_name='邮箱')",
"email = models.EmailField(verbose_name='邮箱') created_time = models.DateTimeField(auto_now_add=True, verbose_name='创建时间') class Meta(): verbose_name = verbose_name_plural =",
"from blog.models import Post class Comment(models.Model): STATUS_ITEMS = ( (1,'正常'), (2,'删除'), ) target",
"verbose_name_plural = '评论' def __str__(self): return '{}'.format(self.target) def nickname_show(self): return '来自{}的评论'.format(self.nickname) nickname_show.short_description =",
"(2,'删除'), ) target = models.CharField(max_length=200, null=True, verbose_name='评论目标') content = models.CharField(max_length=2000, verbose_name='内容') nickname =",
"= models.CharField(max_length=200, null=True, verbose_name='评论目标') content = models.CharField(max_length=2000, verbose_name='内容') nickname = models.CharField(max_length=50, verbose_name='别名') status",
"models.CharField(max_length=200, null=True, verbose_name='评论目标') content = models.CharField(max_length=2000, verbose_name='内容') nickname = models.CharField(max_length=50, verbose_name='别名') status =",
"verbose_name='创建时间') class Meta(): verbose_name = verbose_name_plural = '评论' def __str__(self): return '{}'.format(self.target) def",
"verbose_name='状态') websit = models.URLField(verbose_name='网址') email = models.EmailField(verbose_name='邮箱') created_time = models.DateTimeField(auto_now_add=True, verbose_name='创建时间') class Meta():",
"= models.URLField(verbose_name='网址') email = models.EmailField(verbose_name='邮箱') created_time = models.DateTimeField(auto_now_add=True, verbose_name='创建时间') class Meta(): verbose_name =",
"from django.db import models from django.contrib.auth.models import User from blog.models import Post class",
"User from blog.models import Post class Comment(models.Model): STATUS_ITEMS = ( (1,'正常'), (2,'删除'), )",
"Post class Comment(models.Model): STATUS_ITEMS = ( (1,'正常'), (2,'删除'), ) target = models.CharField(max_length=200, null=True,",
"(1,'正常'), (2,'删除'), ) target = models.CharField(max_length=200, null=True, verbose_name='评论目标') content = models.CharField(max_length=2000, verbose_name='内容') nickname",
"models.URLField(verbose_name='网址') email = models.EmailField(verbose_name='邮箱') created_time = models.DateTimeField(auto_now_add=True, verbose_name='创建时间') class Meta(): verbose_name = verbose_name_plural",
"models.PositiveIntegerField(default=1, choices=STATUS_ITEMS, verbose_name='状态') websit = models.URLField(verbose_name='网址') email = models.EmailField(verbose_name='邮箱') created_time = models.DateTimeField(auto_now_add=True, verbose_name='创建时间')",
"models from django.contrib.auth.models import User from blog.models import Post class Comment(models.Model): STATUS_ITEMS =",
"created_time = models.DateTimeField(auto_now_add=True, verbose_name='创建时间') class Meta(): verbose_name = verbose_name_plural = '评论' def __str__(self):",
"content = models.CharField(max_length=2000, verbose_name='内容') nickname = models.CharField(max_length=50, verbose_name='别名') status = models.PositiveIntegerField(default=1, choices=STATUS_ITEMS, verbose_name='状态')",
"models.EmailField(verbose_name='邮箱') created_time = models.DateTimeField(auto_now_add=True, verbose_name='创建时间') class Meta(): verbose_name = verbose_name_plural = '评论' def",
"verbose_name = verbose_name_plural = '评论' def __str__(self): return '{}'.format(self.target) def nickname_show(self): return '来自{}的评论'.format(self.nickname)",
"= verbose_name_plural = '评论' def __str__(self): return '{}'.format(self.target) def nickname_show(self): return '来自{}的评论'.format(self.nickname) nickname_show.short_description"
] |
[
"if ctx.needs_input_grad[0]: grad_input = _C.rotate_roi_align_backward( grad_output.contiguous(), rois, spatial_scale, out_h, out_w, batch_size, num_channels, data_height,",
"Function from torch.autograd.function import once_differentiable from torch.nn.modules.utils import _pair from mmdet import _Custom",
"import _Custom as _C from apex import amp class _RROIAlign(Function): @staticmethod def forward(ctx,",
"import amp class _RROIAlign(Function): @staticmethod def forward(ctx, features, rois, out_size, spatial_scale, sample_num=0): out_h,",
"assert (feature_size is not None and grad_output.is_cuda) batch_size, num_channels, data_height, data_width = feature_size",
"features, rois, spatial_scale, out_h, out_w, sample_num ) # return output, argmax # DEBUG",
"= _pair(out_size) assert isinstance(out_h, int) and isinstance(out_w, int) ctx.spatial_scale = spatial_scale ctx.sample_num =",
"_C.rotate_roi_align_forward( features, rois, spatial_scale, out_h, out_w, sample_num ) # return output, argmax #",
"spatial_scale, out_h, out_w, batch_size, num_channels, data_height, data_width, sample_num ) return grad_input, grad_rois, None,",
"torch.autograd import Function from torch.autograd.function import once_differentiable from torch.nn.modules.utils import _pair from mmdet",
"out_w, batch_size, num_channels, data_height, data_width, sample_num ) return grad_input, grad_rois, None, None, None",
"_RROIAlign.apply class RROIAlign(nn.Module): def __init__(self, out_size, spatial_scale, sample_num=0): super(RROIAlign, self).__init__() self.out_size = out_size",
"= spatial_scale self.sample_num = sample_num @amp.float_function def forward(self, features, rois): return rroi_align( features,",
"out_h, out_w, sample_num ) # return output, argmax # DEBUG ONLY return output",
"import torch from torch import nn from torch.autograd import Function from torch.autograd.function import",
"rois, self.out_size, self.spatial_scale, self.sample_num ) def __repr__(self): format_str = self.__class__.__name__ format_str += '(out_size={},",
"out_w = grad_output.size(3) out_h = grad_output.size(2) grad_input = grad_rois = None if ctx.needs_input_grad[0]:",
"forward(ctx, features, rois, out_size, spatial_scale, sample_num=0): out_h, out_w = _pair(out_size) assert isinstance(out_h, int)",
"super(RROIAlign, self).__init__() self.out_size = out_size self.spatial_scale = spatial_scale self.sample_num = sample_num @amp.float_function def",
"self.out_size, self.spatial_scale, self.sample_num ) def __repr__(self): format_str = self.__class__.__name__ format_str += '(out_size={}, spatial_scale={},",
"self.out_size = out_size self.spatial_scale = spatial_scale self.sample_num = sample_num @amp.float_function def forward(self, features,",
"None rroi_align = _RROIAlign.apply class RROIAlign(nn.Module): def __init__(self, out_size, spatial_scale, sample_num=0): super(RROIAlign, self).__init__()",
"RROIAlign(nn.Module): def __init__(self, out_size, spatial_scale, sample_num=0): super(RROIAlign, self).__init__() self.out_size = out_size self.spatial_scale =",
"rois, spatial_scale, out_h, out_w, batch_size, num_channels, data_height, data_width, sample_num ) return grad_input, grad_rois,",
"apex import amp class _RROIAlign(Function): @staticmethod def forward(ctx, features, rois, out_size, spatial_scale, sample_num=0):",
"torch.autograd.function import once_differentiable from torch.nn.modules.utils import _pair from mmdet import _Custom as _C",
"= feature_size out_w = grad_output.size(3) out_h = grad_output.size(2) grad_input = grad_rois = None",
"def forward(ctx, features, rois, out_size, spatial_scale, sample_num=0): out_h, out_w = _pair(out_size) assert isinstance(out_h,",
"its affiliates. All Rights Reserved. import torch from torch import nn from torch.autograd",
"DEBUG ONLY return output @staticmethod @once_differentiable def backward(ctx, grad_output): feature_size = ctx.feature_size spatial_scale",
"ctx.saved_tensors[0] assert (feature_size is not None and grad_output.is_cuda) batch_size, num_channels, data_height, data_width =",
"from torch.autograd.function import once_differentiable from torch.nn.modules.utils import _pair from mmdet import _Custom as",
"torch import nn from torch.autograd import Function from torch.autograd.function import once_differentiable from torch.nn.modules.utils",
"from mmdet import _Custom as _C from apex import amp class _RROIAlign(Function): @staticmethod",
"__repr__(self): format_str = self.__class__.__name__ format_str += '(out_size={}, spatial_scale={}, sample_num={}'.format( self.out_size, self.spatial_scale, self.sample_num) return",
"grad_rois = None if ctx.needs_input_grad[0]: grad_input = _C.rotate_roi_align_backward( grad_output.contiguous(), rois, spatial_scale, out_h, out_w,",
"int) ctx.spatial_scale = spatial_scale ctx.sample_num = sample_num ctx.save_for_backward(rois) ctx.feature_size = features.size() output =",
"grad_output.size(3) out_h = grad_output.size(2) grad_input = grad_rois = None if ctx.needs_input_grad[0]: grad_input =",
"= ctx.sample_num rois = ctx.saved_tensors[0] assert (feature_size is not None and grad_output.is_cuda) batch_size,",
"torch from torch import nn from torch.autograd import Function from torch.autograd.function import once_differentiable",
"rois = ctx.saved_tensors[0] assert (feature_size is not None and grad_output.is_cuda) batch_size, num_channels, data_height,",
"grad_output.is_cuda) batch_size, num_channels, data_height, data_width = feature_size out_w = grad_output.size(3) out_h = grad_output.size(2)",
"rois): return rroi_align( features, rois, self.out_size, self.spatial_scale, self.sample_num ) def __repr__(self): format_str =",
"__init__(self, out_size, spatial_scale, sample_num=0): super(RROIAlign, self).__init__() self.out_size = out_size self.spatial_scale = spatial_scale self.sample_num",
"spatial_scale, sample_num=0): super(RROIAlign, self).__init__() self.out_size = out_size self.spatial_scale = spatial_scale self.sample_num = sample_num",
"ctx.sample_num rois = ctx.saved_tensors[0] assert (feature_size is not None and grad_output.is_cuda) batch_size, num_channels,",
"@staticmethod def forward(ctx, features, rois, out_size, spatial_scale, sample_num=0): out_h, out_w = _pair(out_size) assert",
"features, rois, self.out_size, self.spatial_scale, self.sample_num ) def __repr__(self): format_str = self.__class__.__name__ format_str +=",
"Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. import torch from",
"data_width, sample_num ) return grad_input, grad_rois, None, None, None rroi_align = _RROIAlign.apply class",
"out_w = _pair(out_size) assert isinstance(out_h, int) and isinstance(out_w, int) ctx.spatial_scale = spatial_scale ctx.sample_num",
"Facebook, Inc. and its affiliates. All Rights Reserved. import torch from torch import",
"output @staticmethod @once_differentiable def backward(ctx, grad_output): feature_size = ctx.feature_size spatial_scale = ctx.spatial_scale sample_num",
"return output @staticmethod @once_differentiable def backward(ctx, grad_output): feature_size = ctx.feature_size spatial_scale = ctx.spatial_scale",
"mmdet import _Custom as _C from apex import amp class _RROIAlign(Function): @staticmethod def",
"= ctx.feature_size spatial_scale = ctx.spatial_scale sample_num = ctx.sample_num rois = ctx.saved_tensors[0] assert (feature_size",
"rois, out_size, spatial_scale, sample_num=0): out_h, out_w = _pair(out_size) assert isinstance(out_h, int) and isinstance(out_w,",
"num_channels, data_height, data_width, sample_num ) return grad_input, grad_rois, None, None, None rroi_align =",
"ctx.save_for_backward(rois) ctx.feature_size = features.size() output = _C.rotate_roi_align_forward( features, rois, spatial_scale, out_h, out_w, sample_num",
"out_size, spatial_scale, sample_num=0): super(RROIAlign, self).__init__() self.out_size = out_size self.spatial_scale = spatial_scale self.sample_num =",
"assert isinstance(out_h, int) and isinstance(out_w, int) ctx.spatial_scale = spatial_scale ctx.sample_num = sample_num ctx.save_for_backward(rois)",
"ONLY return output @staticmethod @once_differentiable def backward(ctx, grad_output): feature_size = ctx.feature_size spatial_scale =",
"features.size() output = _C.rotate_roi_align_forward( features, rois, spatial_scale, out_h, out_w, sample_num ) # return",
"sample_num ) # return output, argmax # DEBUG ONLY return output @staticmethod @once_differentiable",
"_pair(out_size) assert isinstance(out_h, int) and isinstance(out_w, int) ctx.spatial_scale = spatial_scale ctx.sample_num = sample_num",
"= ctx.saved_tensors[0] assert (feature_size is not None and grad_output.is_cuda) batch_size, num_channels, data_height, data_width",
"class _RROIAlign(Function): @staticmethod def forward(ctx, features, rois, out_size, spatial_scale, sample_num=0): out_h, out_w =",
"= grad_rois = None if ctx.needs_input_grad[0]: grad_input = _C.rotate_roi_align_backward( grad_output.contiguous(), rois, spatial_scale, out_h,",
"grad_input, grad_rois, None, None, None rroi_align = _RROIAlign.apply class RROIAlign(nn.Module): def __init__(self, out_size,",
"ctx.feature_size = features.size() output = _C.rotate_roi_align_forward( features, rois, spatial_scale, out_h, out_w, sample_num )",
"data_height, data_width, sample_num ) return grad_input, grad_rois, None, None, None rroi_align = _RROIAlign.apply",
"class RROIAlign(nn.Module): def __init__(self, out_size, spatial_scale, sample_num=0): super(RROIAlign, self).__init__() self.out_size = out_size self.spatial_scale",
"import once_differentiable from torch.nn.modules.utils import _pair from mmdet import _Custom as _C from",
"grad_output.contiguous(), rois, spatial_scale, out_h, out_w, batch_size, num_channels, data_height, data_width, sample_num ) return grad_input,",
"@staticmethod @once_differentiable def backward(ctx, grad_output): feature_size = ctx.feature_size spatial_scale = ctx.spatial_scale sample_num =",
"= out_size self.spatial_scale = spatial_scale self.sample_num = sample_num @amp.float_function def forward(self, features, rois):",
"= spatial_scale ctx.sample_num = sample_num ctx.save_for_backward(rois) ctx.feature_size = features.size() output = _C.rotate_roi_align_forward( features,",
"grad_input = _C.rotate_roi_align_backward( grad_output.contiguous(), rois, spatial_scale, out_h, out_w, batch_size, num_channels, data_height, data_width, sample_num",
"from torch.nn.modules.utils import _pair from mmdet import _Custom as _C from apex import",
"(feature_size is not None and grad_output.is_cuda) batch_size, num_channels, data_height, data_width = feature_size out_w",
"_pair from mmdet import _Custom as _C from apex import amp class _RROIAlign(Function):",
"sample_num ctx.save_for_backward(rois) ctx.feature_size = features.size() output = _C.rotate_roi_align_forward( features, rois, spatial_scale, out_h, out_w,",
"grad_input = grad_rois = None if ctx.needs_input_grad[0]: grad_input = _C.rotate_roi_align_backward( grad_output.contiguous(), rois, spatial_scale,",
"_C from apex import amp class _RROIAlign(Function): @staticmethod def forward(ctx, features, rois, out_size,",
"Reserved. import torch from torch import nn from torch.autograd import Function from torch.autograd.function",
"feature_size out_w = grad_output.size(3) out_h = grad_output.size(2) grad_input = grad_rois = None if",
"is not None and grad_output.is_cuda) batch_size, num_channels, data_height, data_width = feature_size out_w =",
"grad_rois, None, None, None rroi_align = _RROIAlign.apply class RROIAlign(nn.Module): def __init__(self, out_size, spatial_scale,",
"ctx.feature_size spatial_scale = ctx.spatial_scale sample_num = ctx.sample_num rois = ctx.saved_tensors[0] assert (feature_size is",
"as _C from apex import amp class _RROIAlign(Function): @staticmethod def forward(ctx, features, rois,",
"All Rights Reserved. import torch from torch import nn from torch.autograd import Function",
"output = _C.rotate_roi_align_forward( features, rois, spatial_scale, out_h, out_w, sample_num ) # return output,",
"self.sample_num ) def __repr__(self): format_str = self.__class__.__name__ format_str += '(out_size={}, spatial_scale={}, sample_num={}'.format( self.out_size,",
"and grad_output.is_cuda) batch_size, num_channels, data_height, data_width = feature_size out_w = grad_output.size(3) out_h =",
") return grad_input, grad_rois, None, None, None rroi_align = _RROIAlign.apply class RROIAlign(nn.Module): def",
"and isinstance(out_w, int) ctx.spatial_scale = spatial_scale ctx.sample_num = sample_num ctx.save_for_backward(rois) ctx.feature_size = features.size()",
"data_width = feature_size out_w = grad_output.size(3) out_h = grad_output.size(2) grad_input = grad_rois =",
"rroi_align( features, rois, self.out_size, self.spatial_scale, self.sample_num ) def __repr__(self): format_str = self.__class__.__name__ format_str",
"None and grad_output.is_cuda) batch_size, num_channels, data_height, data_width = feature_size out_w = grad_output.size(3) out_h",
"return grad_input, grad_rois, None, None, None rroi_align = _RROIAlign.apply class RROIAlign(nn.Module): def __init__(self,",
"nn from torch.autograd import Function from torch.autograd.function import once_differentiable from torch.nn.modules.utils import _pair",
"= ctx.spatial_scale sample_num = ctx.sample_num rois = ctx.saved_tensors[0] assert (feature_size is not None",
"not None and grad_output.is_cuda) batch_size, num_channels, data_height, data_width = feature_size out_w = grad_output.size(3)",
"sample_num @amp.float_function def forward(self, features, rois): return rroi_align( features, rois, self.out_size, self.spatial_scale, self.sample_num",
"None, None rroi_align = _RROIAlign.apply class RROIAlign(nn.Module): def __init__(self, out_size, spatial_scale, sample_num=0): super(RROIAlign,",
"# DEBUG ONLY return output @staticmethod @once_differentiable def backward(ctx, grad_output): feature_size = ctx.feature_size",
"isinstance(out_h, int) and isinstance(out_w, int) ctx.spatial_scale = spatial_scale ctx.sample_num = sample_num ctx.save_for_backward(rois) ctx.feature_size",
"# return output, argmax # DEBUG ONLY return output @staticmethod @once_differentiable def backward(ctx,",
"_C.rotate_roi_align_backward( grad_output.contiguous(), rois, spatial_scale, out_h, out_w, batch_size, num_channels, data_height, data_width, sample_num ) return",
"grad_output): feature_size = ctx.feature_size spatial_scale = ctx.spatial_scale sample_num = ctx.sample_num rois = ctx.saved_tensors[0]",
"def __repr__(self): format_str = self.__class__.__name__ format_str += '(out_size={}, spatial_scale={}, sample_num={}'.format( self.out_size, self.spatial_scale, self.sample_num)",
"out_size, spatial_scale, sample_num=0): out_h, out_w = _pair(out_size) assert isinstance(out_h, int) and isinstance(out_w, int)",
"spatial_scale self.sample_num = sample_num @amp.float_function def forward(self, features, rois): return rroi_align( features, rois,",
"= grad_output.size(2) grad_input = grad_rois = None if ctx.needs_input_grad[0]: grad_input = _C.rotate_roi_align_backward( grad_output.contiguous(),",
"None if ctx.needs_input_grad[0]: grad_input = _C.rotate_roi_align_backward( grad_output.contiguous(), rois, spatial_scale, out_h, out_w, batch_size, num_channels,",
"import _pair from mmdet import _Custom as _C from apex import amp class",
"affiliates. All Rights Reserved. import torch from torch import nn from torch.autograd import",
"return output, argmax # DEBUG ONLY return output @staticmethod @once_differentiable def backward(ctx, grad_output):",
"spatial_scale = ctx.spatial_scale sample_num = ctx.sample_num rois = ctx.saved_tensors[0] assert (feature_size is not",
"out_size self.spatial_scale = spatial_scale self.sample_num = sample_num @amp.float_function def forward(self, features, rois): return",
"None, None, None rroi_align = _RROIAlign.apply class RROIAlign(nn.Module): def __init__(self, out_size, spatial_scale, sample_num=0):",
"rois, spatial_scale, out_h, out_w, sample_num ) # return output, argmax # DEBUG ONLY",
"spatial_scale, out_h, out_w, sample_num ) # return output, argmax # DEBUG ONLY return",
"def __init__(self, out_size, spatial_scale, sample_num=0): super(RROIAlign, self).__init__() self.out_size = out_size self.spatial_scale = spatial_scale",
"sample_num = ctx.sample_num rois = ctx.saved_tensors[0] assert (feature_size is not None and grad_output.is_cuda)",
"sample_num=0): super(RROIAlign, self).__init__() self.out_size = out_size self.spatial_scale = spatial_scale self.sample_num = sample_num @amp.float_function",
"= sample_num ctx.save_for_backward(rois) ctx.feature_size = features.size() output = _C.rotate_roi_align_forward( features, rois, spatial_scale, out_h,",
"out_h = grad_output.size(2) grad_input = grad_rois = None if ctx.needs_input_grad[0]: grad_input = _C.rotate_roi_align_backward(",
"amp class _RROIAlign(Function): @staticmethod def forward(ctx, features, rois, out_size, spatial_scale, sample_num=0): out_h, out_w",
"self.spatial_scale = spatial_scale self.sample_num = sample_num @amp.float_function def forward(self, features, rois): return rroi_align(",
"_RROIAlign(Function): @staticmethod def forward(ctx, features, rois, out_size, spatial_scale, sample_num=0): out_h, out_w = _pair(out_size)",
"int) and isinstance(out_w, int) ctx.spatial_scale = spatial_scale ctx.sample_num = sample_num ctx.save_for_backward(rois) ctx.feature_size =",
") # return output, argmax # DEBUG ONLY return output @staticmethod @once_differentiable def",
"self.sample_num = sample_num @amp.float_function def forward(self, features, rois): return rroi_align( features, rois, self.out_size,",
") def __repr__(self): format_str = self.__class__.__name__ format_str += '(out_size={}, spatial_scale={}, sample_num={}'.format( self.out_size, self.spatial_scale,",
"= features.size() output = _C.rotate_roi_align_forward( features, rois, spatial_scale, out_h, out_w, sample_num ) #",
"(c) Facebook, Inc. and its affiliates. All Rights Reserved. import torch from torch",
"= _C.rotate_roi_align_forward( features, rois, spatial_scale, out_h, out_w, sample_num ) # return output, argmax",
"ctx.spatial_scale sample_num = ctx.sample_num rois = ctx.saved_tensors[0] assert (feature_size is not None and",
"return rroi_align( features, rois, self.out_size, self.spatial_scale, self.sample_num ) def __repr__(self): format_str = self.__class__.__name__",
"once_differentiable from torch.nn.modules.utils import _pair from mmdet import _Custom as _C from apex",
"ctx.sample_num = sample_num ctx.save_for_backward(rois) ctx.feature_size = features.size() output = _C.rotate_roi_align_forward( features, rois, spatial_scale,",
"spatial_scale, sample_num=0): out_h, out_w = _pair(out_size) assert isinstance(out_h, int) and isinstance(out_w, int) ctx.spatial_scale",
"features, rois): return rroi_align( features, rois, self.out_size, self.spatial_scale, self.sample_num ) def __repr__(self): format_str",
"from apex import amp class _RROIAlign(Function): @staticmethod def forward(ctx, features, rois, out_size, spatial_scale,",
"and its affiliates. All Rights Reserved. import torch from torch import nn from",
"output, argmax # DEBUG ONLY return output @staticmethod @once_differentiable def backward(ctx, grad_output): feature_size",
"rroi_align = _RROIAlign.apply class RROIAlign(nn.Module): def __init__(self, out_size, spatial_scale, sample_num=0): super(RROIAlign, self).__init__() self.out_size",
"format_str = self.__class__.__name__ format_str += '(out_size={}, spatial_scale={}, sample_num={}'.format( self.out_size, self.spatial_scale, self.sample_num) return format_str",
"out_h, out_w, batch_size, num_channels, data_height, data_width, sample_num ) return grad_input, grad_rois, None, None,",
"= sample_num @amp.float_function def forward(self, features, rois): return rroi_align( features, rois, self.out_size, self.spatial_scale,",
"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. import torch",
"= _RROIAlign.apply class RROIAlign(nn.Module): def __init__(self, out_size, spatial_scale, sample_num=0): super(RROIAlign, self).__init__() self.out_size =",
"import Function from torch.autograd.function import once_differentiable from torch.nn.modules.utils import _pair from mmdet import",
"= _C.rotate_roi_align_backward( grad_output.contiguous(), rois, spatial_scale, out_h, out_w, batch_size, num_channels, data_height, data_width, sample_num )",
"from torch.autograd import Function from torch.autograd.function import once_differentiable from torch.nn.modules.utils import _pair from",
"data_height, data_width = feature_size out_w = grad_output.size(3) out_h = grad_output.size(2) grad_input = grad_rois",
"from torch import nn from torch.autograd import Function from torch.autograd.function import once_differentiable from",
"def backward(ctx, grad_output): feature_size = ctx.feature_size spatial_scale = ctx.spatial_scale sample_num = ctx.sample_num rois",
"= grad_output.size(3) out_h = grad_output.size(2) grad_input = grad_rois = None if ctx.needs_input_grad[0]: grad_input",
"grad_output.size(2) grad_input = grad_rois = None if ctx.needs_input_grad[0]: grad_input = _C.rotate_roi_align_backward( grad_output.contiguous(), rois,",
"self.spatial_scale, self.sample_num ) def __repr__(self): format_str = self.__class__.__name__ format_str += '(out_size={}, spatial_scale={}, sample_num={}'.format(",
"argmax # DEBUG ONLY return output @staticmethod @once_differentiable def backward(ctx, grad_output): feature_size =",
"Inc. and its affiliates. All Rights Reserved. import torch from torch import nn",
"torch.nn.modules.utils import _pair from mmdet import _Custom as _C from apex import amp",
"backward(ctx, grad_output): feature_size = ctx.feature_size spatial_scale = ctx.spatial_scale sample_num = ctx.sample_num rois =",
"out_w, sample_num ) # return output, argmax # DEBUG ONLY return output @staticmethod",
"feature_size = ctx.feature_size spatial_scale = ctx.spatial_scale sample_num = ctx.sample_num rois = ctx.saved_tensors[0] assert",
"@once_differentiable def backward(ctx, grad_output): feature_size = ctx.feature_size spatial_scale = ctx.spatial_scale sample_num = ctx.sample_num",
"ctx.spatial_scale = spatial_scale ctx.sample_num = sample_num ctx.save_for_backward(rois) ctx.feature_size = features.size() output = _C.rotate_roi_align_forward(",
"Rights Reserved. import torch from torch import nn from torch.autograd import Function from",
"import nn from torch.autograd import Function from torch.autograd.function import once_differentiable from torch.nn.modules.utils import",
"spatial_scale ctx.sample_num = sample_num ctx.save_for_backward(rois) ctx.feature_size = features.size() output = _C.rotate_roi_align_forward( features, rois,",
"num_channels, data_height, data_width = feature_size out_w = grad_output.size(3) out_h = grad_output.size(2) grad_input =",
"@amp.float_function def forward(self, features, rois): return rroi_align( features, rois, self.out_size, self.spatial_scale, self.sample_num )",
"= None if ctx.needs_input_grad[0]: grad_input = _C.rotate_roi_align_backward( grad_output.contiguous(), rois, spatial_scale, out_h, out_w, batch_size,",
"out_h, out_w = _pair(out_size) assert isinstance(out_h, int) and isinstance(out_w, int) ctx.spatial_scale = spatial_scale",
"ctx.needs_input_grad[0]: grad_input = _C.rotate_roi_align_backward( grad_output.contiguous(), rois, spatial_scale, out_h, out_w, batch_size, num_channels, data_height, data_width,",
"forward(self, features, rois): return rroi_align( features, rois, self.out_size, self.spatial_scale, self.sample_num ) def __repr__(self):",
"def forward(self, features, rois): return rroi_align( features, rois, self.out_size, self.spatial_scale, self.sample_num ) def",
"isinstance(out_w, int) ctx.spatial_scale = spatial_scale ctx.sample_num = sample_num ctx.save_for_backward(rois) ctx.feature_size = features.size() output",
"features, rois, out_size, spatial_scale, sample_num=0): out_h, out_w = _pair(out_size) assert isinstance(out_h, int) and",
"sample_num ) return grad_input, grad_rois, None, None, None rroi_align = _RROIAlign.apply class RROIAlign(nn.Module):",
"sample_num=0): out_h, out_w = _pair(out_size) assert isinstance(out_h, int) and isinstance(out_w, int) ctx.spatial_scale =",
"batch_size, num_channels, data_height, data_width = feature_size out_w = grad_output.size(3) out_h = grad_output.size(2) grad_input",
"batch_size, num_channels, data_height, data_width, sample_num ) return grad_input, grad_rois, None, None, None rroi_align",
"self).__init__() self.out_size = out_size self.spatial_scale = spatial_scale self.sample_num = sample_num @amp.float_function def forward(self,",
"_Custom as _C from apex import amp class _RROIAlign(Function): @staticmethod def forward(ctx, features,"
] |
[
"m = sys_params[..., 0] # assume the first component encodes masses D =",
"= mse_dyn o.mse_V = mse_V o.prediction = pred_zs o.mse = mse o.loss =",
"AttrDict import torch import torch.nn.functional as F from lie_conv.dynamicsTrainer import Partial from torchdiffeq",
"sys_params, ts), true_zs = data pred_zs = self._rollout_model(z0, ts, sys_params) mse = (pred_zs",
"dyn_tz_pred).pow(2).mean() o.mse_dyn = mse_dyn o.mse_V = mse_V o.prediction = pred_zs o.mse = mse",
"from torch import nn from attrdict import AttrDict import torch import torch.nn.functional as",
"V_pred = self.predictor.compute_V((q, sys_params)) k = sys_params[..., 1] V_true = SpringV(q, k) mse_V",
"z[:, D // 2 :].reshape(*m.shape, -1) V_pred = self.predictor.compute_V((q, sys_params)) k = sys_params[...,",
"odeint from lie_conv.hamiltonian import SpringV, SpringH, HamiltonianDynamics, KeplerV, KeplerH class DynamicsPredictor(nn.Module): \"\"\"This class",
"sys_params)) k = sys_params[..., 1] V_true = SpringV(q, k) mse_V = (V_pred -",
"1] V_true = SpringV(q, k) mse_V = (V_pred - V_true).pow(2).mean() # dynamics dyn_tz_pred",
"mse_dyn o.mse_V = mse_V o.prediction = pred_zs o.mse = mse o.loss = mse",
"# loss wrt which we train the model if self.debug: o.reports = AttrDict({\"mse\":",
"return zs.permute(1, 0, 2) def forward(self, data): o = AttrDict() (z0, sys_params, ts),",
"\"\"\"inputs [z0: (bs, z_dim), ts: (bs, T), sys_params: (bs, n, c)] outputs pred_zs:",
"= z0 m = sys_params[..., 0] # assume the first component encodes masses",
"o.mse_V, \"mse_dyn\": o.mse_dyn}) else: o.reports = AttrDict({\"mse\": o.mse}) if not self.model_with_dict: return pred_zs",
"= (pred_zs - true_zs).pow(2).mean() if self.debug: if self.task == \"spring\": # currently a",
"= self._rollout_model(z0, ts, sys_params) mse = (pred_zs - true_zs).pow(2).mean() if self.debug: if self.task",
"H = lambda t, z: SpringH( z, sys_params[..., 0].squeeze(-1), sys_params[..., 1].squeeze(-1) ) dynamics",
"torch import nn from attrdict import AttrDict import torch import torch.nn.functional as F",
"ts), true_zs = data pred_zs = self._rollout_model(z0, ts, sys_params) mse = (pred_zs -",
"= z[:, D // 2 :].reshape(*m.shape, -1) V_pred = self.predictor.compute_V((q, sys_params)) k =",
"dyn_tz_true = dynamics(ts, z0) mse_dyn = (dyn_tz_true - dyn_tz_pred).pow(2).mean() o.mse_dyn = mse_dyn o.mse_V",
"import torch.nn.functional as F from lie_conv.dynamicsTrainer import Partial from torchdiffeq import odeint from",
"= AttrDict() (z0, sys_params, ts), true_zs = data pred_zs = self._rollout_model(z0, ts, sys_params)",
"(z0, sys_params, ts), true_zs = data z = z0 m = sys_params[..., 0]",
"mse = (pred_zs - true_zs).pow(2).mean() if self.debug: if self.task == \"spring\": # currently",
"o.reports = AttrDict({\"mse\": o.mse, \"mse_V\": o.mse_V, \"mse_dyn\": o.mse_dyn}) else: o.reports = AttrDict({\"mse\": o.mse})",
"self.debug: print(\"DynamicsPredictor is in DEBUG MODE.\") def _rollout_model(self, z0, ts, sys_params, tol=1e-4): \"\"\"inputs",
"o.mse_dyn}) else: o.reports = AttrDict({\"mse\": o.mse}) if not self.model_with_dict: return pred_zs return o",
"true_zs).pow(2).mean() if self.debug: if self.task == \"spring\": # currently a bit inefficient to",
"def __init__(self, predictor, debug=False, task=\"spring\", model_with_dict=True): super().__init__() self.predictor = predictor self.debug = debug",
"- V_true).pow(2).mean() # dynamics dyn_tz_pred = self.predictor(ts, z0, sys_params) H = lambda t,",
"mse_V o.prediction = pred_zs o.mse = mse o.loss = mse # loss wrt",
"self.predictor = predictor self.debug = debug self.task = task self.model_with_dict = model_with_dict if",
"self.predictor(ts, z0, sys_params) H = lambda t, z: SpringH( z, sys_params[..., 0].squeeze(-1), sys_params[...,",
"2*num_particles*space_dim q = z[:, : D // 2].reshape(*m.shape, -1) p = z[:, D",
"ODE dims, 2*num_particles*space_dim q = z[:, : D // 2].reshape(*m.shape, -1) p =",
"SpringV(q, k) mse_V = (V_pred - V_true).pow(2).mean() # dynamics dyn_tz_pred = self.predictor(ts, z0,",
"self.model_with_dict = model_with_dict if self.debug: print(\"DynamicsPredictor is in DEBUG MODE.\") def _rollout_model(self, z0,",
"currently a bit inefficient to do the below? with torch.no_grad(): (z0, sys_params, ts),",
"z, sys_params[..., 0].squeeze(-1), sys_params[..., 1].squeeze(-1) ) dynamics = HamiltonianDynamics(H, wgrad=False) dyn_tz_true = dynamics(ts,",
"z0, ts[0], rtol=tol, method=\"rk4\") return zs.permute(1, 0, 2) def forward(self, data): o =",
"pred_zs: (bs, T, z_dim)\"\"\" dynamics = Partial(self.predictor, sysP=sys_params) zs = odeint(dynamics, z0, ts[0],",
"(V_pred - V_true).pow(2).mean() # dynamics dyn_tz_pred = self.predictor(ts, z0, sys_params) H = lambda",
"HamiltonianDynamics(H, wgrad=False) dyn_tz_true = dynamics(ts, z0) mse_dyn = (dyn_tz_true - dyn_tz_pred).pow(2).mean() o.mse_dyn =",
"2) def forward(self, data): o = AttrDict() (z0, sys_params, ts), true_zs = data",
"ts: (bs, T), sys_params: (bs, n, c)] outputs pred_zs: (bs, T, z_dim)\"\"\" dynamics",
"sys_params, tol=1e-4): \"\"\"inputs [z0: (bs, z_dim), ts: (bs, T), sys_params: (bs, n, c)]",
"masses D = z.shape[-1] # of ODE dims, 2*num_particles*space_dim q = z[:, :",
"component encodes masses D = z.shape[-1] # of ODE dims, 2*num_particles*space_dim q =",
"- true_zs).pow(2).mean() if self.debug: if self.task == \"spring\": # currently a bit inefficient",
"predictor, debug=False, task=\"spring\", model_with_dict=True): super().__init__() self.predictor = predictor self.debug = debug self.task =",
"loss computation.\"\"\" def __init__(self, predictor, debug=False, task=\"spring\", model_with_dict=True): super().__init__() self.predictor = predictor self.debug",
"encodes masses D = z.shape[-1] # of ODE dims, 2*num_particles*space_dim q = z[:,",
"z: SpringH( z, sys_params[..., 0].squeeze(-1), sys_params[..., 1].squeeze(-1) ) dynamics = HamiltonianDynamics(H, wgrad=False) dyn_tz_true",
"= data pred_zs = self._rollout_model(z0, ts, sys_params) mse = (pred_zs - true_zs).pow(2).mean() if",
"self.debug: if self.task == \"spring\": # currently a bit inefficient to do the",
"sys_params, ts), true_zs = data z = z0 m = sys_params[..., 0] #",
"\"spring\": # currently a bit inefficient to do the below? with torch.no_grad(): (z0,",
"= z.shape[-1] # of ODE dims, 2*num_particles*space_dim q = z[:, : D //",
"task self.model_with_dict = model_with_dict if self.debug: print(\"DynamicsPredictor is in DEBUG MODE.\") def _rollout_model(self,",
"from lie_conv.dynamicsTrainer import Partial from torchdiffeq import odeint from lie_conv.hamiltonian import SpringV, SpringH,",
"data z = z0 m = sys_params[..., 0] # assume the first component",
"V_true).pow(2).mean() # dynamics dyn_tz_pred = self.predictor(ts, z0, sys_params) H = lambda t, z:",
"pred_zs o.mse = mse o.loss = mse # loss wrt which we train",
"AttrDict({\"mse\": o.mse, \"mse_V\": o.mse_V, \"mse_dyn\": o.mse_dyn}) else: o.reports = AttrDict({\"mse\": o.mse}) if not",
"(bs, n, c)] outputs pred_zs: (bs, T, z_dim)\"\"\" dynamics = Partial(self.predictor, sysP=sys_params) zs",
"k) mse_V = (V_pred - V_true).pow(2).mean() # dynamics dyn_tz_pred = self.predictor(ts, z0, sys_params)",
"odeint(dynamics, z0, ts[0], rtol=tol, method=\"rk4\") return zs.permute(1, 0, 2) def forward(self, data): o",
"= HamiltonianDynamics(H, wgrad=False) dyn_tz_true = dynamics(ts, z0) mse_dyn = (dyn_tz_true - dyn_tz_pred).pow(2).mean() o.mse_dyn",
"(bs, T, z_dim)\"\"\" dynamics = Partial(self.predictor, sysP=sys_params) zs = odeint(dynamics, z0, ts[0], rtol=tol,",
"wgrad=False) dyn_tz_true = dynamics(ts, z0) mse_dyn = (dyn_tz_true - dyn_tz_pred).pow(2).mean() o.mse_dyn = mse_dyn",
"self.task = task self.model_with_dict = model_with_dict if self.debug: print(\"DynamicsPredictor is in DEBUG MODE.\")",
"# currently a bit inefficient to do the below? with torch.no_grad(): (z0, sys_params,",
"= data z = z0 m = sys_params[..., 0] # assume the first",
"import torch import torch.nn.functional as F from lie_conv.dynamicsTrainer import Partial from torchdiffeq import",
"from attrdict import AttrDict import torch import torch.nn.functional as F from lie_conv.dynamicsTrainer import",
"k = sys_params[..., 1] V_true = SpringV(q, k) mse_V = (V_pred - V_true).pow(2).mean()",
"self.task == \"spring\": # currently a bit inefficient to do the below? with",
"Partial(self.predictor, sysP=sys_params) zs = odeint(dynamics, z0, ts[0], rtol=tol, method=\"rk4\") return zs.permute(1, 0, 2)",
"D // 2].reshape(*m.shape, -1) p = z[:, D // 2 :].reshape(*m.shape, -1) V_pred",
"0] # assume the first component encodes masses D = z.shape[-1] # of",
"model_with_dict=True): super().__init__() self.predictor = predictor self.debug = debug self.task = task self.model_with_dict =",
"computation.\"\"\" def __init__(self, predictor, debug=False, task=\"spring\", model_with_dict=True): super().__init__() self.predictor = predictor self.debug =",
"ts, sys_params, tol=1e-4): \"\"\"inputs [z0: (bs, z_dim), ts: (bs, T), sys_params: (bs, n,",
"model if self.debug: o.reports = AttrDict({\"mse\": o.mse, \"mse_V\": o.mse_V, \"mse_dyn\": o.mse_dyn}) else: o.reports",
"lambda t, z: SpringH( z, sys_params[..., 0].squeeze(-1), sys_params[..., 1].squeeze(-1) ) dynamics = HamiltonianDynamics(H,",
"n, c)] outputs pred_zs: (bs, T, z_dim)\"\"\" dynamics = Partial(self.predictor, sysP=sys_params) zs =",
") dynamics = HamiltonianDynamics(H, wgrad=False) dyn_tz_true = dynamics(ts, z0) mse_dyn = (dyn_tz_true -",
"= odeint(dynamics, z0, ts[0], rtol=tol, method=\"rk4\") return zs.permute(1, 0, 2) def forward(self, data):",
"= (dyn_tz_true - dyn_tz_pred).pow(2).mean() o.mse_dyn = mse_dyn o.mse_V = mse_V o.prediction = pred_zs",
"the model if self.debug: o.reports = AttrDict({\"mse\": o.mse, \"mse_V\": o.mse_V, \"mse_dyn\": o.mse_dyn}) else:",
"SpringH, HamiltonianDynamics, KeplerV, KeplerH class DynamicsPredictor(nn.Module): \"\"\"This class implements forward pass through our",
"from torchdiffeq import odeint from lie_conv.hamiltonian import SpringV, SpringH, HamiltonianDynamics, KeplerV, KeplerH class",
"HamiltonianDynamics, KeplerV, KeplerH class DynamicsPredictor(nn.Module): \"\"\"This class implements forward pass through our model,",
"o.mse_dyn = mse_dyn o.mse_V = mse_V o.prediction = pred_zs o.mse = mse o.loss",
"= debug self.task = task self.model_with_dict = model_with_dict if self.debug: print(\"DynamicsPredictor is in",
"bit inefficient to do the below? with torch.no_grad(): (z0, sys_params, ts), true_zs =",
"- dyn_tz_pred).pow(2).mean() o.mse_dyn = mse_dyn o.mse_V = mse_V o.prediction = pred_zs o.mse =",
"from lie_conv.hamiltonian import SpringV, SpringH, HamiltonianDynamics, KeplerV, KeplerH class DynamicsPredictor(nn.Module): \"\"\"This class implements",
"dynamics(ts, z0) mse_dyn = (dyn_tz_true - dyn_tz_pred).pow(2).mean() o.mse_dyn = mse_dyn o.mse_V = mse_V",
"model, including loss computation.\"\"\" def __init__(self, predictor, debug=False, task=\"spring\", model_with_dict=True): super().__init__() self.predictor =",
"including loss computation.\"\"\" def __init__(self, predictor, debug=False, task=\"spring\", model_with_dict=True): super().__init__() self.predictor = predictor",
"in DEBUG MODE.\") def _rollout_model(self, z0, ts, sys_params, tol=1e-4): \"\"\"inputs [z0: (bs, z_dim),",
"zs = odeint(dynamics, z0, ts[0], rtol=tol, method=\"rk4\") return zs.permute(1, 0, 2) def forward(self,",
"sys_params) mse = (pred_zs - true_zs).pow(2).mean() if self.debug: if self.task == \"spring\": #",
"= mse # loss wrt which we train the model if self.debug: o.reports",
"loss wrt which we train the model if self.debug: o.reports = AttrDict({\"mse\": o.mse,",
"torch.nn.functional as F from lie_conv.dynamicsTrainer import Partial from torchdiffeq import odeint from lie_conv.hamiltonian",
"SpringV, SpringH, HamiltonianDynamics, KeplerV, KeplerH class DynamicsPredictor(nn.Module): \"\"\"This class implements forward pass through",
"assume the first component encodes masses D = z.shape[-1] # of ODE dims,",
"ts), true_zs = data z = z0 m = sys_params[..., 0] # assume",
"2].reshape(*m.shape, -1) p = z[:, D // 2 :].reshape(*m.shape, -1) V_pred = self.predictor.compute_V((q,",
"D = z.shape[-1] # of ODE dims, 2*num_particles*space_dim q = z[:, : D",
"= model_with_dict if self.debug: print(\"DynamicsPredictor is in DEBUG MODE.\") def _rollout_model(self, z0, ts,",
"dynamics = HamiltonianDynamics(H, wgrad=False) dyn_tz_true = dynamics(ts, z0) mse_dyn = (dyn_tz_true - dyn_tz_pred).pow(2).mean()",
"mse_V = (V_pred - V_true).pow(2).mean() # dynamics dyn_tz_pred = self.predictor(ts, z0, sys_params) H",
"0, 2) def forward(self, data): o = AttrDict() (z0, sys_params, ts), true_zs =",
"dyn_tz_pred = self.predictor(ts, z0, sys_params) H = lambda t, z: SpringH( z, sys_params[...,",
"train the model if self.debug: o.reports = AttrDict({\"mse\": o.mse, \"mse_V\": o.mse_V, \"mse_dyn\": o.mse_dyn})",
"data): o = AttrDict() (z0, sys_params, ts), true_zs = data pred_zs = self._rollout_model(z0,",
"== \"spring\": # currently a bit inefficient to do the below? with torch.no_grad():",
"debug self.task = task self.model_with_dict = model_with_dict if self.debug: print(\"DynamicsPredictor is in DEBUG",
"import Partial from torchdiffeq import odeint from lie_conv.hamiltonian import SpringV, SpringH, HamiltonianDynamics, KeplerV,",
"the first component encodes masses D = z.shape[-1] # of ODE dims, 2*num_particles*space_dim",
"sys_params[..., 1] V_true = SpringV(q, k) mse_V = (V_pred - V_true).pow(2).mean() # dynamics",
"t, z: SpringH( z, sys_params[..., 0].squeeze(-1), sys_params[..., 1].squeeze(-1) ) dynamics = HamiltonianDynamics(H, wgrad=False)",
"o.mse = mse o.loss = mse # loss wrt which we train the",
"p = z[:, D // 2 :].reshape(*m.shape, -1) V_pred = self.predictor.compute_V((q, sys_params)) k",
"outputs pred_zs: (bs, T, z_dim)\"\"\" dynamics = Partial(self.predictor, sysP=sys_params) zs = odeint(dynamics, z0,",
"true_zs = data z = z0 m = sys_params[..., 0] # assume the",
"Partial from torchdiffeq import odeint from lie_conv.hamiltonian import SpringV, SpringH, HamiltonianDynamics, KeplerV, KeplerH",
"is in DEBUG MODE.\") def _rollout_model(self, z0, ts, sys_params, tol=1e-4): \"\"\"inputs [z0: (bs,",
"def _rollout_model(self, z0, ts, sys_params, tol=1e-4): \"\"\"inputs [z0: (bs, z_dim), ts: (bs, T),",
"forward pass through our model, including loss computation.\"\"\" def __init__(self, predictor, debug=False, task=\"spring\",",
"import nn from attrdict import AttrDict import torch import torch.nn.functional as F from",
"debug=False, task=\"spring\", model_with_dict=True): super().__init__() self.predictor = predictor self.debug = debug self.task = task",
"lie_conv.dynamicsTrainer import Partial from torchdiffeq import odeint from lie_conv.hamiltonian import SpringV, SpringH, HamiltonianDynamics,",
"import SpringV, SpringH, HamiltonianDynamics, KeplerV, KeplerH class DynamicsPredictor(nn.Module): \"\"\"This class implements forward pass",
"# of ODE dims, 2*num_particles*space_dim q = z[:, : D // 2].reshape(*m.shape, -1)",
"model_with_dict if self.debug: print(\"DynamicsPredictor is in DEBUG MODE.\") def _rollout_model(self, z0, ts, sys_params,",
"// 2 :].reshape(*m.shape, -1) V_pred = self.predictor.compute_V((q, sys_params)) k = sys_params[..., 1] V_true",
"# dynamics dyn_tz_pred = self.predictor(ts, z0, sys_params) H = lambda t, z: SpringH(",
"our model, including loss computation.\"\"\" def __init__(self, predictor, debug=False, task=\"spring\", model_with_dict=True): super().__init__() self.predictor",
"task=\"spring\", model_with_dict=True): super().__init__() self.predictor = predictor self.debug = debug self.task = task self.model_with_dict",
"data pred_zs = self._rollout_model(z0, ts, sys_params) mse = (pred_zs - true_zs).pow(2).mean() if self.debug:",
"= z[:, : D // 2].reshape(*m.shape, -1) p = z[:, D // 2",
"the below? with torch.no_grad(): (z0, sys_params, ts), true_zs = data z = z0",
"zs.permute(1, 0, 2) def forward(self, data): o = AttrDict() (z0, sys_params, ts), true_zs",
"= self.predictor.compute_V((q, sys_params)) k = sys_params[..., 1] V_true = SpringV(q, k) mse_V =",
"sys_params) H = lambda t, z: SpringH( z, sys_params[..., 0].squeeze(-1), sys_params[..., 1].squeeze(-1) )",
"D // 2 :].reshape(*m.shape, -1) V_pred = self.predictor.compute_V((q, sys_params)) k = sys_params[..., 1]",
"DEBUG MODE.\") def _rollout_model(self, z0, ts, sys_params, tol=1e-4): \"\"\"inputs [z0: (bs, z_dim), ts:",
"[z0: (bs, z_dim), ts: (bs, T), sys_params: (bs, n, c)] outputs pred_zs: (bs,",
"sysP=sys_params) zs = odeint(dynamics, z0, ts[0], rtol=tol, method=\"rk4\") return zs.permute(1, 0, 2) def",
"SpringH( z, sys_params[..., 0].squeeze(-1), sys_params[..., 1].squeeze(-1) ) dynamics = HamiltonianDynamics(H, wgrad=False) dyn_tz_true =",
"1].squeeze(-1) ) dynamics = HamiltonianDynamics(H, wgrad=False) dyn_tz_true = dynamics(ts, z0) mse_dyn = (dyn_tz_true",
"z0, ts, sys_params, tol=1e-4): \"\"\"inputs [z0: (bs, z_dim), ts: (bs, T), sys_params: (bs,",
": D // 2].reshape(*m.shape, -1) p = z[:, D // 2 :].reshape(*m.shape, -1)",
"KeplerH class DynamicsPredictor(nn.Module): \"\"\"This class implements forward pass through our model, including loss",
"do the below? with torch.no_grad(): (z0, sys_params, ts), true_zs = data z =",
"z0, sys_params) H = lambda t, z: SpringH( z, sys_params[..., 0].squeeze(-1), sys_params[..., 1].squeeze(-1)",
"of ODE dims, 2*num_particles*space_dim q = z[:, : D // 2].reshape(*m.shape, -1) p",
"// 2].reshape(*m.shape, -1) p = z[:, D // 2 :].reshape(*m.shape, -1) V_pred =",
"T), sys_params: (bs, n, c)] outputs pred_zs: (bs, T, z_dim)\"\"\" dynamics = Partial(self.predictor,",
"T, z_dim)\"\"\" dynamics = Partial(self.predictor, sysP=sys_params) zs = odeint(dynamics, z0, ts[0], rtol=tol, method=\"rk4\")",
"ts[0], rtol=tol, method=\"rk4\") return zs.permute(1, 0, 2) def forward(self, data): o = AttrDict()",
"o.loss = mse # loss wrt which we train the model if self.debug:",
"\"mse_V\": o.mse_V, \"mse_dyn\": o.mse_dyn}) else: o.reports = AttrDict({\"mse\": o.mse}) if not self.model_with_dict: return",
"def forward(self, data): o = AttrDict() (z0, sys_params, ts), true_zs = data pred_zs",
"torch import torch.nn.functional as F from lie_conv.dynamicsTrainer import Partial from torchdiffeq import odeint",
"KeplerV, KeplerH class DynamicsPredictor(nn.Module): \"\"\"This class implements forward pass through our model, including",
"we train the model if self.debug: o.reports = AttrDict({\"mse\": o.mse, \"mse_V\": o.mse_V, \"mse_dyn\":",
"method=\"rk4\") return zs.permute(1, 0, 2) def forward(self, data): o = AttrDict() (z0, sys_params,",
"below? with torch.no_grad(): (z0, sys_params, ts), true_zs = data z = z0 m",
"forward(self, data): o = AttrDict() (z0, sys_params, ts), true_zs = data pred_zs =",
"F from lie_conv.dynamicsTrainer import Partial from torchdiffeq import odeint from lie_conv.hamiltonian import SpringV,",
"MODE.\") def _rollout_model(self, z0, ts, sys_params, tol=1e-4): \"\"\"inputs [z0: (bs, z_dim), ts: (bs,",
"dynamics dyn_tz_pred = self.predictor(ts, z0, sys_params) H = lambda t, z: SpringH( z,",
"import odeint from lie_conv.hamiltonian import SpringV, SpringH, HamiltonianDynamics, KeplerV, KeplerH class DynamicsPredictor(nn.Module): \"\"\"This",
"c)] outputs pred_zs: (bs, T, z_dim)\"\"\" dynamics = Partial(self.predictor, sysP=sys_params) zs = odeint(dynamics,",
"if self.debug: if self.task == \"spring\": # currently a bit inefficient to do",
"o.prediction = pred_zs o.mse = mse o.loss = mse # loss wrt which",
"pred_zs = self._rollout_model(z0, ts, sys_params) mse = (pred_zs - true_zs).pow(2).mean() if self.debug: if",
"class DynamicsPredictor(nn.Module): \"\"\"This class implements forward pass through our model, including loss computation.\"\"\"",
"super().__init__() self.predictor = predictor self.debug = debug self.task = task self.model_with_dict = model_with_dict",
"ts, sys_params) mse = (pred_zs - true_zs).pow(2).mean() if self.debug: if self.task == \"spring\":",
"dynamics = Partial(self.predictor, sysP=sys_params) zs = odeint(dynamics, z0, ts[0], rtol=tol, method=\"rk4\") return zs.permute(1,",
"AttrDict() (z0, sys_params, ts), true_zs = data pred_zs = self._rollout_model(z0, ts, sys_params) mse",
"= pred_zs o.mse = mse o.loss = mse # loss wrt which we",
"o.mse, \"mse_V\": o.mse_V, \"mse_dyn\": o.mse_dyn}) else: o.reports = AttrDict({\"mse\": o.mse}) if not self.model_with_dict:",
"-1) V_pred = self.predictor.compute_V((q, sys_params)) k = sys_params[..., 1] V_true = SpringV(q, k)",
"self.debug = debug self.task = task self.model_with_dict = model_with_dict if self.debug: print(\"DynamicsPredictor is",
"\"mse_dyn\": o.mse_dyn}) else: o.reports = AttrDict({\"mse\": o.mse}) if not self.model_with_dict: return pred_zs return",
"= mse o.loss = mse # loss wrt which we train the model",
"nn from attrdict import AttrDict import torch import torch.nn.functional as F from lie_conv.dynamicsTrainer",
"(bs, T), sys_params: (bs, n, c)] outputs pred_zs: (bs, T, z_dim)\"\"\" dynamics =",
"(dyn_tz_true - dyn_tz_pred).pow(2).mean() o.mse_dyn = mse_dyn o.mse_V = mse_V o.prediction = pred_zs o.mse",
"rtol=tol, method=\"rk4\") return zs.permute(1, 0, 2) def forward(self, data): o = AttrDict() (z0,",
"__init__(self, predictor, debug=False, task=\"spring\", model_with_dict=True): super().__init__() self.predictor = predictor self.debug = debug self.task",
"sys_params: (bs, n, c)] outputs pred_zs: (bs, T, z_dim)\"\"\" dynamics = Partial(self.predictor, sysP=sys_params)",
"= sys_params[..., 1] V_true = SpringV(q, k) mse_V = (V_pred - V_true).pow(2).mean() #",
"= (V_pred - V_true).pow(2).mean() # dynamics dyn_tz_pred = self.predictor(ts, z0, sys_params) H =",
"torch.no_grad(): (z0, sys_params, ts), true_zs = data z = z0 m = sys_params[...,",
"dims, 2*num_particles*space_dim q = z[:, : D // 2].reshape(*m.shape, -1) p = z[:,",
"= lambda t, z: SpringH( z, sys_params[..., 0].squeeze(-1), sys_params[..., 1].squeeze(-1) ) dynamics =",
"mse o.loss = mse # loss wrt which we train the model if",
"o.mse_V = mse_V o.prediction = pred_zs o.mse = mse o.loss = mse #",
"print(\"DynamicsPredictor is in DEBUG MODE.\") def _rollout_model(self, z0, ts, sys_params, tol=1e-4): \"\"\"inputs [z0:",
"pass through our model, including loss computation.\"\"\" def __init__(self, predictor, debug=False, task=\"spring\", model_with_dict=True):",
"= sys_params[..., 0] # assume the first component encodes masses D = z.shape[-1]",
"mse_dyn = (dyn_tz_true - dyn_tz_pred).pow(2).mean() o.mse_dyn = mse_dyn o.mse_V = mse_V o.prediction =",
"if self.debug: print(\"DynamicsPredictor is in DEBUG MODE.\") def _rollout_model(self, z0, ts, sys_params, tol=1e-4):",
"to do the below? with torch.no_grad(): (z0, sys_params, ts), true_zs = data z",
"V_true = SpringV(q, k) mse_V = (V_pred - V_true).pow(2).mean() # dynamics dyn_tz_pred =",
"self._rollout_model(z0, ts, sys_params) mse = (pred_zs - true_zs).pow(2).mean() if self.debug: if self.task ==",
"\"\"\"This class implements forward pass through our model, including loss computation.\"\"\" def __init__(self,",
"sys_params[..., 0].squeeze(-1), sys_params[..., 1].squeeze(-1) ) dynamics = HamiltonianDynamics(H, wgrad=False) dyn_tz_true = dynamics(ts, z0)",
"z0) mse_dyn = (dyn_tz_true - dyn_tz_pred).pow(2).mean() o.mse_dyn = mse_dyn o.mse_V = mse_V o.prediction",
"if self.debug: o.reports = AttrDict({\"mse\": o.mse, \"mse_V\": o.mse_V, \"mse_dyn\": o.mse_dyn}) else: o.reports =",
"import AttrDict import torch import torch.nn.functional as F from lie_conv.dynamicsTrainer import Partial from",
"o = AttrDict() (z0, sys_params, ts), true_zs = data pred_zs = self._rollout_model(z0, ts,",
"wrt which we train the model if self.debug: o.reports = AttrDict({\"mse\": o.mse, \"mse_V\":",
"= dynamics(ts, z0) mse_dyn = (dyn_tz_true - dyn_tz_pred).pow(2).mean() o.mse_dyn = mse_dyn o.mse_V =",
"self.debug: o.reports = AttrDict({\"mse\": o.mse, \"mse_V\": o.mse_V, \"mse_dyn\": o.mse_dyn}) else: o.reports = AttrDict({\"mse\":",
"(bs, z_dim), ts: (bs, T), sys_params: (bs, n, c)] outputs pred_zs: (bs, T,",
"torchdiffeq import odeint from lie_conv.hamiltonian import SpringV, SpringH, HamiltonianDynamics, KeplerV, KeplerH class DynamicsPredictor(nn.Module):",
"(pred_zs - true_zs).pow(2).mean() if self.debug: if self.task == \"spring\": # currently a bit",
"which we train the model if self.debug: o.reports = AttrDict({\"mse\": o.mse, \"mse_V\": o.mse_V,",
"(z0, sys_params, ts), true_zs = data pred_zs = self._rollout_model(z0, ts, sys_params) mse =",
"a bit inefficient to do the below? with torch.no_grad(): (z0, sys_params, ts), true_zs",
"z = z0 m = sys_params[..., 0] # assume the first component encodes",
"DynamicsPredictor(nn.Module): \"\"\"This class implements forward pass through our model, including loss computation.\"\"\" def",
"as F from lie_conv.dynamicsTrainer import Partial from torchdiffeq import odeint from lie_conv.hamiltonian import",
"= task self.model_with_dict = model_with_dict if self.debug: print(\"DynamicsPredictor is in DEBUG MODE.\") def",
"inefficient to do the below? with torch.no_grad(): (z0, sys_params, ts), true_zs = data",
"_rollout_model(self, z0, ts, sys_params, tol=1e-4): \"\"\"inputs [z0: (bs, z_dim), ts: (bs, T), sys_params:",
"z_dim), ts: (bs, T), sys_params: (bs, n, c)] outputs pred_zs: (bs, T, z_dim)\"\"\"",
"0].squeeze(-1), sys_params[..., 1].squeeze(-1) ) dynamics = HamiltonianDynamics(H, wgrad=False) dyn_tz_true = dynamics(ts, z0) mse_dyn",
"= Partial(self.predictor, sysP=sys_params) zs = odeint(dynamics, z0, ts[0], rtol=tol, method=\"rk4\") return zs.permute(1, 0,",
"sys_params[..., 0] # assume the first component encodes masses D = z.shape[-1] #",
"= SpringV(q, k) mse_V = (V_pred - V_true).pow(2).mean() # dynamics dyn_tz_pred = self.predictor(ts,",
"= AttrDict({\"mse\": o.mse, \"mse_V\": o.mse_V, \"mse_dyn\": o.mse_dyn}) else: o.reports = AttrDict({\"mse\": o.mse}) if",
"# assume the first component encodes masses D = z.shape[-1] # of ODE",
"z.shape[-1] # of ODE dims, 2*num_particles*space_dim q = z[:, : D // 2].reshape(*m.shape,",
"through our model, including loss computation.\"\"\" def __init__(self, predictor, debug=False, task=\"spring\", model_with_dict=True): super().__init__()",
":].reshape(*m.shape, -1) V_pred = self.predictor.compute_V((q, sys_params)) k = sys_params[..., 1] V_true = SpringV(q,",
"self.predictor.compute_V((q, sys_params)) k = sys_params[..., 1] V_true = SpringV(q, k) mse_V = (V_pred",
"implements forward pass through our model, including loss computation.\"\"\" def __init__(self, predictor, debug=False,",
"= self.predictor(ts, z0, sys_params) H = lambda t, z: SpringH( z, sys_params[..., 0].squeeze(-1),",
"with torch.no_grad(): (z0, sys_params, ts), true_zs = data z = z0 m =",
"sys_params[..., 1].squeeze(-1) ) dynamics = HamiltonianDynamics(H, wgrad=False) dyn_tz_true = dynamics(ts, z0) mse_dyn =",
"= mse_V o.prediction = pred_zs o.mse = mse o.loss = mse # loss",
"-1) p = z[:, D // 2 :].reshape(*m.shape, -1) V_pred = self.predictor.compute_V((q, sys_params))",
"2 :].reshape(*m.shape, -1) V_pred = self.predictor.compute_V((q, sys_params)) k = sys_params[..., 1] V_true =",
"= predictor self.debug = debug self.task = task self.model_with_dict = model_with_dict if self.debug:",
"tol=1e-4): \"\"\"inputs [z0: (bs, z_dim), ts: (bs, T), sys_params: (bs, n, c)] outputs",
"lie_conv.hamiltonian import SpringV, SpringH, HamiltonianDynamics, KeplerV, KeplerH class DynamicsPredictor(nn.Module): \"\"\"This class implements forward",
"attrdict import AttrDict import torch import torch.nn.functional as F from lie_conv.dynamicsTrainer import Partial",
"mse # loss wrt which we train the model if self.debug: o.reports =",
"first component encodes masses D = z.shape[-1] # of ODE dims, 2*num_particles*space_dim q",
"q = z[:, : D // 2].reshape(*m.shape, -1) p = z[:, D //",
"predictor self.debug = debug self.task = task self.model_with_dict = model_with_dict if self.debug: print(\"DynamicsPredictor",
"true_zs = data pred_zs = self._rollout_model(z0, ts, sys_params) mse = (pred_zs - true_zs).pow(2).mean()",
"z_dim)\"\"\" dynamics = Partial(self.predictor, sysP=sys_params) zs = odeint(dynamics, z0, ts[0], rtol=tol, method=\"rk4\") return",
"z[:, : D // 2].reshape(*m.shape, -1) p = z[:, D // 2 :].reshape(*m.shape,",
"z0 m = sys_params[..., 0] # assume the first component encodes masses D",
"if self.task == \"spring\": # currently a bit inefficient to do the below?",
"class implements forward pass through our model, including loss computation.\"\"\" def __init__(self, predictor,"
] |
[
"c, i = 0, 0 while c < t: c += 3 <<",
"c - t + 1 def strangeCounter2(t): rem = 3 while t >",
"1 def strangeCounter2(t): rem = 3 while t > rem: t = t-rem",
"t: c += 3 << i i += 1 return c - t",
"<reponame>tjeubaoit/algorithm<filename>hackerrank/Algorithms/implementation/strange-code.py # Complete the strangeCounter function below. def strangeCounter(t): c, i = 0,",
"+ 1 def strangeCounter2(t): rem = 3 while t > rem: t =",
"= 3 while t > rem: t = t-rem rem *= 2 return",
"strangeCounter function below. def strangeCounter(t): c, i = 0, 0 while c <",
"+= 1 return c - t + 1 def strangeCounter2(t): rem = 3",
"3 << i i += 1 return c - t + 1 def",
"rem = 3 while t > rem: t = t-rem rem *= 2",
"1 return c - t + 1 def strangeCounter2(t): rem = 3 while",
"+= 3 << i i += 1 return c - t + 1",
"i i += 1 return c - t + 1 def strangeCounter2(t): rem",
"function below. def strangeCounter(t): c, i = 0, 0 while c < t:",
"Complete the strangeCounter function below. def strangeCounter(t): c, i = 0, 0 while",
"i += 1 return c - t + 1 def strangeCounter2(t): rem =",
"strangeCounter(t): c, i = 0, 0 while c < t: c += 3",
"while c < t: c += 3 << i i += 1 return",
"strangeCounter2(t): rem = 3 while t > rem: t = t-rem rem *=",
"return c - t + 1 def strangeCounter2(t): rem = 3 while t",
"<< i i += 1 return c - t + 1 def strangeCounter2(t):",
"0 while c < t: c += 3 << i i += 1",
"c < t: c += 3 << i i += 1 return c",
"i = 0, 0 while c < t: c += 3 << i",
"c += 3 << i i += 1 return c - t +",
"0, 0 while c < t: c += 3 << i i +=",
"the strangeCounter function below. def strangeCounter(t): c, i = 0, 0 while c",
"# Complete the strangeCounter function below. def strangeCounter(t): c, i = 0, 0",
"- t + 1 def strangeCounter2(t): rem = 3 while t > rem:",
"= 0, 0 while c < t: c += 3 << i i",
"< t: c += 3 << i i += 1 return c -",
"3 while t > rem: t = t-rem rem *= 2 return rem-t+1",
"below. def strangeCounter(t): c, i = 0, 0 while c < t: c",
"def strangeCounter2(t): rem = 3 while t > rem: t = t-rem rem",
"t + 1 def strangeCounter2(t): rem = 3 while t > rem: t",
"def strangeCounter(t): c, i = 0, 0 while c < t: c +="
] |
[] |
[
"acopy.Solver(rho=.03, q=1) colony = acopy.Colony(alpha=1, beta=3) tour = solver.solve(G, colony, limit=100) print(tour.cost) print(tour.nodes)",
"= acopy.Solver(rho=.03, q=1) colony = acopy.Colony(alpha=1, beta=3) tour = solver.solve(G, colony, limit=100) print(tour.cost)",
"q=1) colony = acopy.Colony(alpha=1, beta=3) tour = solver.solve(G, colony, limit=100) print(tour.cost) print(tour.nodes) \"\"\"",
"colony = acopy.Colony(alpha=1, beta=3) tour = solver.solve(G, colony, limit=100) print(tour.cost) print(tour.nodes) \"\"\" if",
"import acopy import networkx as nx def main(): G = nx.read_graphml('graph_binomial_tree_5.graphml') print(G.nodes()) print(nx.get_node_attributes(G,",
"acopy import networkx as nx def main(): G = nx.read_graphml('graph_binomial_tree_5.graphml') print(G.nodes()) print(nx.get_node_attributes(G, 'IPT'))",
"G = nx.read_graphml('graph_binomial_tree_5.graphml') print(G.nodes()) print(nx.get_node_attributes(G, 'IPT')) \"\"\" solver = acopy.Solver(rho=.03, q=1) colony =",
"main(): G = nx.read_graphml('graph_binomial_tree_5.graphml') print(G.nodes()) print(nx.get_node_attributes(G, 'IPT')) \"\"\" solver = acopy.Solver(rho=.03, q=1) colony",
"print(G.nodes()) print(nx.get_node_attributes(G, 'IPT')) \"\"\" solver = acopy.Solver(rho=.03, q=1) colony = acopy.Colony(alpha=1, beta=3) tour",
"= acopy.Colony(alpha=1, beta=3) tour = solver.solve(G, colony, limit=100) print(tour.cost) print(tour.nodes) \"\"\" if __name__",
"'IPT')) \"\"\" solver = acopy.Solver(rho=.03, q=1) colony = acopy.Colony(alpha=1, beta=3) tour = solver.solve(G,",
"print(nx.get_node_attributes(G, 'IPT')) \"\"\" solver = acopy.Solver(rho=.03, q=1) colony = acopy.Colony(alpha=1, beta=3) tour =",
"acopy.Colony(alpha=1, beta=3) tour = solver.solve(G, colony, limit=100) print(tour.cost) print(tour.nodes) \"\"\" if __name__ ==",
"tour = solver.solve(G, colony, limit=100) print(tour.cost) print(tour.nodes) \"\"\" if __name__ == '__main__': main()",
"solver = acopy.Solver(rho=.03, q=1) colony = acopy.Colony(alpha=1, beta=3) tour = solver.solve(G, colony, limit=100)",
"def main(): G = nx.read_graphml('graph_binomial_tree_5.graphml') print(G.nodes()) print(nx.get_node_attributes(G, 'IPT')) \"\"\" solver = acopy.Solver(rho=.03, q=1)",
"beta=3) tour = solver.solve(G, colony, limit=100) print(tour.cost) print(tour.nodes) \"\"\" if __name__ == '__main__':",
"nx.read_graphml('graph_binomial_tree_5.graphml') print(G.nodes()) print(nx.get_node_attributes(G, 'IPT')) \"\"\" solver = acopy.Solver(rho=.03, q=1) colony = acopy.Colony(alpha=1, beta=3)",
"\"\"\" solver = acopy.Solver(rho=.03, q=1) colony = acopy.Colony(alpha=1, beta=3) tour = solver.solve(G, colony,",
"import networkx as nx def main(): G = nx.read_graphml('graph_binomial_tree_5.graphml') print(G.nodes()) print(nx.get_node_attributes(G, 'IPT')) \"\"\"",
"= nx.read_graphml('graph_binomial_tree_5.graphml') print(G.nodes()) print(nx.get_node_attributes(G, 'IPT')) \"\"\" solver = acopy.Solver(rho=.03, q=1) colony = acopy.Colony(alpha=1,",
"<reponame>andremtsilva/dissertacao import acopy import networkx as nx def main(): G = nx.read_graphml('graph_binomial_tree_5.graphml') print(G.nodes())",
"as nx def main(): G = nx.read_graphml('graph_binomial_tree_5.graphml') print(G.nodes()) print(nx.get_node_attributes(G, 'IPT')) \"\"\" solver =",
"networkx as nx def main(): G = nx.read_graphml('graph_binomial_tree_5.graphml') print(G.nodes()) print(nx.get_node_attributes(G, 'IPT')) \"\"\" solver",
"nx def main(): G = nx.read_graphml('graph_binomial_tree_5.graphml') print(G.nodes()) print(nx.get_node_attributes(G, 'IPT')) \"\"\" solver = acopy.Solver(rho=.03,"
] |
[
"means anyone can accept # player in self.__offerees checks if the player is",
"he/she offered\") player.requireResources(self._cost) # both have what they wish to trade player.takeResources(self._cost) self.__offeror.takeResources(self._goodsOffered)",
"anyone can accept # player in self.__offerees checks if the player is in",
"Returns whether or not this player can accept the given trade deal. '''",
"both have what they wish to trade player.takeResources(self._cost) self.__offeror.takeResources(self._goodsOffered) player.giveResource(self._goodsOffered) self.__offeror.giveResources(self._cost) self.__closedDeal =",
"self.__invalid = False # whether the deal has been invalidated super().__init__(cost, goodsOffered) def",
"what he offered self.__invalid = True raise ActionError(f\"{self.__offeror.name} doesn't have what he/she offered\")",
"have what he offers ''' offeror.requireResources(goodsOffered) # throws ActionError self.__offeror: Player = offeror",
"def __init__(self, cost: Dict[Resource, int], goodsOffered: Dict[Resource, int]): self._cost: Dict[Resource, int] = cost",
"can accept the offer. InterPlayerTrade are for only a single trade and so",
"throws ActionError self.__offeror: Player = offeror self.__offerees: List[Player] = offerees self.__closedDeal = False",
"offer. InterPlayerTrade are for only a single trade and so become invalid after",
"''' offeror.requireResources(goodsOffered) # throws ActionError self.__offeror: Player = offeror self.__offerees: List[Player] = offerees",
"not allowed to take this trade offer\") if self.__invalid: raise ActionError(f\"This trade offer",
"Player) -> bool: ''' Returns whether or not this player can accept the",
"players def purchase(self, player: Player): # errors here would go to player if",
"import ActionError from .player import Player class Trade: def __init__(self, cost: Dict[Resource, int],",
"to take this trade offer\") if self.__invalid: raise ActionError(f\"This trade offer is invalid",
"= False # whether the deal has been closed self.__invalid = False #",
"Dict, List from ..extraCode.location import Resource from ..extraCode.util import ActionError from .player import",
"goodsOffered def purchase(self, player: Player): ''' throws `ActionError`. player is the one accepting",
"offer is invalid due to {self.__offeror.name}\") if self.__closedDeal: raise ActionError(f\"This offer has already",
"cost: Dict[Resource, int], goodsOffered: Dict[Resource, int]): self._cost: Dict[Resource, int] = cost self._goodsOffered: Dict[Resource,",
"Player = offeror self.__offerees: List[Player] = offerees self.__closedDeal = False # whether the",
"of the offer. If Offerees is empty, then anyone can accept the offer.",
"def purchase(self, player: Player): # errors here would go to player if not",
"class InterPlayerTrade(Trade): def __init__(self, cost: Dict[Resource, int], goodsOffered: Dict[Resource, int], offeror: Player, offerees:",
"Dict[Resource, int], goodsOffered: Dict[Resource, int]): self._cost: Dict[Resource, int] = cost self._goodsOffered: Dict[Resource, int]",
"Dict[Resource, int], goodsOffered: Dict[Resource, int], offeror: Player, offerees: List[Player] = []): ''' Offeror",
"# player in self.__offerees checks if the player is in the list of",
"# both have what they wish to trade player.takeResources(self._cost) self.__offeror.takeResources(self._goodsOffered) player.giveResource(self._goodsOffered) self.__offeror.giveResources(self._cost) self.__closedDeal",
"__isValidOfferee(self, player: Player) -> bool: ''' Returns whether or not this player can",
"one accepting the trade ''' player.requireResources(self._cost) # raises ActionError player.takeResources(self._cost) player.giveResources(self._goodsOffered) class InterPlayerTrade(Trade):",
"trade deal. ''' return len(self.__offerees) == 0 or player in self.__offerees # len(self.__offerees)",
"self._cost: Dict[Resource, int] = cost self._goodsOffered: Dict[Resource, int] = goodsOffered def purchase(self, player:",
"have what he/she offered\") player.requireResources(self._cost) # both have what they wish to trade",
"-> bool: ''' Returns whether or not this player can accept the given",
"Player): # errors here would go to player if not self.__isValidOfferee(player): raise ActionError(f\"Player",
"= offeror self.__offerees: List[Player] = offerees self.__closedDeal = False # whether the deal",
"the offer. If Offerees is empty, then anyone can accept the offer. InterPlayerTrade",
"ActionError(f\"This trade offer is invalid due to {self.__offeror.name}\") if self.__closedDeal: raise ActionError(f\"This offer",
"offeror self.__offerees: List[Player] = offerees self.__closedDeal = False # whether the deal has",
"take this trade offer\") if self.__invalid: raise ActionError(f\"This trade offer is invalid due",
"int], offeror: Player, offerees: List[Player] = []): ''' Offeror is the one proposing",
"can accept # player in self.__offerees checks if the player is in the",
"raise ActionError(f\"This trade offer is invalid due to {self.__offeror.name}\") if self.__closedDeal: raise ActionError(f\"This",
"offerees self.__closedDeal = False # whether the deal has been closed self.__invalid =",
"deal. ''' return len(self.__offerees) == 0 or player in self.__offerees # len(self.__offerees) ==",
"InterPlayerTrade(Trade): def __init__(self, cost: Dict[Resource, int], goodsOffered: Dict[Resource, int], offeror: Player, offerees: List[Player]",
"accept # player in self.__offerees checks if the player is in the list",
"ActionError(f\"This offer has already been accepted\") if not self.__offeror.hasResources(self._goodsOffered): # offeror no longer",
"True raise ActionError(f\"{self.__offeror.name} doesn't have what he/she offered\") player.requireResources(self._cost) # both have what",
"offer. If Offerees is empty, then anyone can accept the offer. InterPlayerTrade are",
"Offeror does not have what he offers ''' offeror.requireResources(goodsOffered) # throws ActionError self.__offeror:",
"def __isValidOfferee(self, player: Player) -> bool: ''' Returns whether or not this player",
"from typing import Dict, List from ..extraCode.location import Resource from ..extraCode.util import ActionError",
"accepted players def purchase(self, player: Player): # errors here would go to player",
"or player in self.__offerees # len(self.__offerees) == 0 means anyone can accept #",
"Trade: def __init__(self, cost: Dict[Resource, int], goodsOffered: Dict[Resource, int]): self._cost: Dict[Resource, int] =",
"he offered self.__invalid = True raise ActionError(f\"{self.__offeror.name} doesn't have what he/she offered\") player.requireResources(self._cost)",
"Dict[Resource, int]): self._cost: Dict[Resource, int] = cost self._goodsOffered: Dict[Resource, int] = goodsOffered def",
"self.__isValidOfferee(player): raise ActionError(f\"Player is not allowed to take this trade offer\") if self.__invalid:",
"len(self.__offerees) == 0 or player in self.__offerees # len(self.__offerees) == 0 means anyone",
"[]): ''' Offeror is the one proposing the offer, Offerees is the recipients",
"the trade ''' player.requireResources(self._cost) # raises ActionError player.takeResources(self._cost) player.giveResources(self._goodsOffered) class InterPlayerTrade(Trade): def __init__(self,",
"Player class Trade: def __init__(self, cost: Dict[Resource, int], goodsOffered: Dict[Resource, int]): self._cost: Dict[Resource,",
"ActionError from .player import Player class Trade: def __init__(self, cost: Dict[Resource, int], goodsOffered:",
"goodsOffered: Dict[Resource, int], offeror: Player, offerees: List[Player] = []): ''' Offeror is the",
"int]): self._cost: Dict[Resource, int] = cost self._goodsOffered: Dict[Resource, int] = goodsOffered def purchase(self,",
"only a single trade and so become invalid after a successful trade. Throws",
"# offeror no longer has what he offered self.__invalid = True raise ActionError(f\"{self.__offeror.name}",
"InterPlayerTrade are for only a single trade and so become invalid after a",
"due to {self.__offeror.name}\") if self.__closedDeal: raise ActionError(f\"This offer has already been accepted\") if",
"player is the one accepting the trade ''' player.requireResources(self._cost) # raises ActionError player.takeResources(self._cost)",
"the offer, Offerees is the recipients of the offer. If Offerees is empty,",
"__init__(self, cost: Dict[Resource, int], goodsOffered: Dict[Resource, int], offeror: Player, offerees: List[Player] = []):",
"from ..extraCode.util import ActionError from .player import Player class Trade: def __init__(self, cost:",
"..extraCode.util import ActionError from .player import Player class Trade: def __init__(self, cost: Dict[Resource,",
"ActionError if the Offeror does not have what he offers ''' offeror.requireResources(goodsOffered) #",
"''' Offeror is the one proposing the offer, Offerees is the recipients of",
"this trade offer\") if self.__invalid: raise ActionError(f\"This trade offer is invalid due to",
"self.__offerees: List[Player] = offerees self.__closedDeal = False # whether the deal has been",
"so become invalid after a successful trade. Throws ActionError if the Offeror does",
"a successful trade. Throws ActionError if the Offeror does not have what he",
"class Trade: def __init__(self, cost: Dict[Resource, int], goodsOffered: Dict[Resource, int]): self._cost: Dict[Resource, int]",
"has been invalidated super().__init__(cost, goodsOffered) def __isValidOfferee(self, player: Player) -> bool: ''' Returns",
"player is in the list of accepted players def purchase(self, player: Player): #",
"would go to player if not self.__isValidOfferee(player): raise ActionError(f\"Player is not allowed to",
"goodsOffered: Dict[Resource, int]): self._cost: Dict[Resource, int] = cost self._goodsOffered: Dict[Resource, int] = goodsOffered",
"the offer. InterPlayerTrade are for only a single trade and so become invalid",
"= offerees self.__closedDeal = False # whether the deal has been closed self.__invalid",
"0 means anyone can accept # player in self.__offerees checks if the player",
"import Player class Trade: def __init__(self, cost: Dict[Resource, int], goodsOffered: Dict[Resource, int]): self._cost:",
"longer has what he offered self.__invalid = True raise ActionError(f\"{self.__offeror.name} doesn't have what",
"Throws ActionError if the Offeror does not have what he offers ''' offeror.requireResources(goodsOffered)",
"player: Player): # errors here would go to player if not self.__isValidOfferee(player): raise",
"cost self._goodsOffered: Dict[Resource, int] = goodsOffered def purchase(self, player: Player): ''' throws `ActionError`.",
"int] = goodsOffered def purchase(self, player: Player): ''' throws `ActionError`. player is the",
"is the one accepting the trade ''' player.requireResources(self._cost) # raises ActionError player.takeResources(self._cost) player.giveResources(self._goodsOffered)",
"= []): ''' Offeror is the one proposing the offer, Offerees is the",
"player.requireResources(self._cost) # both have what they wish to trade player.takeResources(self._cost) self.__offeror.takeResources(self._goodsOffered) player.giveResource(self._goodsOffered) self.__offeror.giveResources(self._cost)",
"offered self.__invalid = True raise ActionError(f\"{self.__offeror.name} doesn't have what he/she offered\") player.requireResources(self._cost) #",
"what he/she offered\") player.requireResources(self._cost) # both have what they wish to trade player.takeResources(self._cost)",
"then anyone can accept the offer. InterPlayerTrade are for only a single trade",
"Dict[Resource, int] = goodsOffered def purchase(self, player: Player): ''' throws `ActionError`. player is",
"False # whether the deal has been closed self.__invalid = False # whether",
"deal has been closed self.__invalid = False # whether the deal has been",
"# whether the deal has been invalidated super().__init__(cost, goodsOffered) def __isValidOfferee(self, player: Player)",
"whether or not this player can accept the given trade deal. ''' return",
"is invalid due to {self.__offeror.name}\") if self.__closedDeal: raise ActionError(f\"This offer has already been",
"self.__closedDeal: raise ActionError(f\"This offer has already been accepted\") if not self.__offeror.hasResources(self._goodsOffered): # offeror",
"self.__offeror.hasResources(self._goodsOffered): # offeror no longer has what he offered self.__invalid = True raise",
"cost: Dict[Resource, int], goodsOffered: Dict[Resource, int], offeror: Player, offerees: List[Player] = []): '''",
"accept the offer. InterPlayerTrade are for only a single trade and so become",
"single trade and so become invalid after a successful trade. Throws ActionError if",
"throws `ActionError`. player is the one accepting the trade ''' player.requireResources(self._cost) # raises",
"empty, then anyone can accept the offer. InterPlayerTrade are for only a single",
"# len(self.__offerees) == 0 means anyone can accept # player in self.__offerees checks",
"self.__offerees checks if the player is in the list of accepted players def",
"been invalidated super().__init__(cost, goodsOffered) def __isValidOfferee(self, player: Player) -> bool: ''' Returns whether",
"raise ActionError(f\"{self.__offeror.name} doesn't have what he/she offered\") player.requireResources(self._cost) # both have what they",
"== 0 or player in self.__offerees # len(self.__offerees) == 0 means anyone can",
"what he offers ''' offeror.requireResources(goodsOffered) # throws ActionError self.__offeror: Player = offeror self.__offerees:",
"if self.__closedDeal: raise ActionError(f\"This offer has already been accepted\") if not self.__offeror.hasResources(self._goodsOffered): #",
"List from ..extraCode.location import Resource from ..extraCode.util import ActionError from .player import Player",
"trade. Throws ActionError if the Offeror does not have what he offers '''",
"== 0 means anyone can accept # player in self.__offerees checks if the",
"whether the deal has been closed self.__invalid = False # whether the deal",
"and so become invalid after a successful trade. Throws ActionError if the Offeror",
"self.__invalid = True raise ActionError(f\"{self.__offeror.name} doesn't have what he/she offered\") player.requireResources(self._cost) # both",
"Resource from ..extraCode.util import ActionError from .player import Player class Trade: def __init__(self,",
"can accept the given trade deal. ''' return len(self.__offerees) == 0 or player",
"= cost self._goodsOffered: Dict[Resource, int] = goodsOffered def purchase(self, player: Player): ''' throws",
"player: Player): ''' throws `ActionError`. player is the one accepting the trade '''",
"is not allowed to take this trade offer\") if self.__invalid: raise ActionError(f\"This trade",
"int] = cost self._goodsOffered: Dict[Resource, int] = goodsOffered def purchase(self, player: Player): '''",
"__init__(self, cost: Dict[Resource, int], goodsOffered: Dict[Resource, int]): self._cost: Dict[Resource, int] = cost self._goodsOffered:",
"offer\") if self.__invalid: raise ActionError(f\"This trade offer is invalid due to {self.__offeror.name}\") if",
"List[Player] = offerees self.__closedDeal = False # whether the deal has been closed",
"''' throws `ActionError`. player is the one accepting the trade ''' player.requireResources(self._cost) #",
"goodsOffered) def __isValidOfferee(self, player: Player) -> bool: ''' Returns whether or not this",
"or not this player can accept the given trade deal. ''' return len(self.__offerees)",
"the list of accepted players def purchase(self, player: Player): # errors here would",
"typing import Dict, List from ..extraCode.location import Resource from ..extraCode.util import ActionError from",
"Dict[Resource, int] = cost self._goodsOffered: Dict[Resource, int] = goodsOffered def purchase(self, player: Player):",
"offers ''' offeror.requireResources(goodsOffered) # throws ActionError self.__offeror: Player = offeror self.__offerees: List[Player] =",
"he offers ''' offeror.requireResources(goodsOffered) # throws ActionError self.__offeror: Player = offeror self.__offerees: List[Player]",
"if not self.__offeror.hasResources(self._goodsOffered): # offeror no longer has what he offered self.__invalid =",
"player in self.__offerees checks if the player is in the list of accepted",
"{self.__offeror.name}\") if self.__closedDeal: raise ActionError(f\"This offer has already been accepted\") if not self.__offeror.hasResources(self._goodsOffered):",
"player.giveResources(self._goodsOffered) class InterPlayerTrade(Trade): def __init__(self, cost: Dict[Resource, int], goodsOffered: Dict[Resource, int], offeror: Player,",
"after a successful trade. Throws ActionError if the Offeror does not have what",
"already been accepted\") if not self.__offeror.hasResources(self._goodsOffered): # offeror no longer has what he",
"Offerees is the recipients of the offer. If Offerees is empty, then anyone",
"checks if the player is in the list of accepted players def purchase(self,",
"offer, Offerees is the recipients of the offer. If Offerees is empty, then",
"Player): ''' throws `ActionError`. player is the one accepting the trade ''' player.requireResources(self._cost)",
"the recipients of the offer. If Offerees is empty, then anyone can accept",
"invalidated super().__init__(cost, goodsOffered) def __isValidOfferee(self, player: Player) -> bool: ''' Returns whether or",
"# errors here would go to player if not self.__isValidOfferee(player): raise ActionError(f\"Player is",
"trade offer is invalid due to {self.__offeror.name}\") if self.__closedDeal: raise ActionError(f\"This offer has",
"raise ActionError(f\"This offer has already been accepted\") if not self.__offeror.hasResources(self._goodsOffered): # offeror no",
"accepted\") if not self.__offeror.hasResources(self._goodsOffered): # offeror no longer has what he offered self.__invalid",
"raises ActionError player.takeResources(self._cost) player.giveResources(self._goodsOffered) class InterPlayerTrade(Trade): def __init__(self, cost: Dict[Resource, int], goodsOffered: Dict[Resource,",
"the one proposing the offer, Offerees is the recipients of the offer. If",
"is the one proposing the offer, Offerees is the recipients of the offer.",
"player.takeResources(self._cost) player.giveResources(self._goodsOffered) class InterPlayerTrade(Trade): def __init__(self, cost: Dict[Resource, int], goodsOffered: Dict[Resource, int], offeror:",
"# raises ActionError player.takeResources(self._cost) player.giveResources(self._goodsOffered) class InterPlayerTrade(Trade): def __init__(self, cost: Dict[Resource, int], goodsOffered:",
"player.requireResources(self._cost) # raises ActionError player.takeResources(self._cost) player.giveResources(self._goodsOffered) class InterPlayerTrade(Trade): def __init__(self, cost: Dict[Resource, int],",
"ActionError player.takeResources(self._cost) player.giveResources(self._goodsOffered) class InterPlayerTrade(Trade): def __init__(self, cost: Dict[Resource, int], goodsOffered: Dict[Resource, int],",
"no longer has what he offered self.__invalid = True raise ActionError(f\"{self.__offeror.name} doesn't have",
"have what they wish to trade player.takeResources(self._cost) self.__offeror.takeResources(self._goodsOffered) player.giveResource(self._goodsOffered) self.__offeror.giveResources(self._cost) self.__closedDeal = True",
"len(self.__offerees) == 0 means anyone can accept # player in self.__offerees checks if",
"player in self.__offerees # len(self.__offerees) == 0 means anyone can accept # player",
"are for only a single trade and so become invalid after a successful",
"errors here would go to player if not self.__isValidOfferee(player): raise ActionError(f\"Player is not",
"self._goodsOffered: Dict[Resource, int] = goodsOffered def purchase(self, player: Player): ''' throws `ActionError`. player",
"given trade deal. ''' return len(self.__offerees) == 0 or player in self.__offerees #",
"become invalid after a successful trade. Throws ActionError if the Offeror does not",
"''' Returns whether or not this player can accept the given trade deal.",
"If Offerees is empty, then anyone can accept the offer. InterPlayerTrade are for",
"purchase(self, player: Player): ''' throws `ActionError`. player is the one accepting the trade",
"if not self.__isValidOfferee(player): raise ActionError(f\"Player is not allowed to take this trade offer\")",
"the one accepting the trade ''' player.requireResources(self._cost) # raises ActionError player.takeResources(self._cost) player.giveResources(self._goodsOffered) class",
"not self.__isValidOfferee(player): raise ActionError(f\"Player is not allowed to take this trade offer\") if",
"<gh_stars>0 from typing import Dict, List from ..extraCode.location import Resource from ..extraCode.util import",
"accepting the trade ''' player.requireResources(self._cost) # raises ActionError player.takeResources(self._cost) player.giveResources(self._goodsOffered) class InterPlayerTrade(Trade): def",
"if the Offeror does not have what he offers ''' offeror.requireResources(goodsOffered) # throws",
"offer has already been accepted\") if not self.__offeror.hasResources(self._goodsOffered): # offeror no longer has",
"one proposing the offer, Offerees is the recipients of the offer. If Offerees",
"has already been accepted\") if not self.__offeror.hasResources(self._goodsOffered): # offeror no longer has what",
"offered\") player.requireResources(self._cost) # both have what they wish to trade player.takeResources(self._cost) self.__offeror.takeResources(self._goodsOffered) player.giveResource(self._goodsOffered)",
"return len(self.__offerees) == 0 or player in self.__offerees # len(self.__offerees) == 0 means",
"= goodsOffered def purchase(self, player: Player): ''' throws `ActionError`. player is the one",
"player if not self.__isValidOfferee(player): raise ActionError(f\"Player is not allowed to take this trade",
"been closed self.__invalid = False # whether the deal has been invalidated super().__init__(cost,",
"Offerees is empty, then anyone can accept the offer. InterPlayerTrade are for only",
"offerees: List[Player] = []): ''' Offeror is the one proposing the offer, Offerees",
"if the player is in the list of accepted players def purchase(self, player:",
"anyone can accept the offer. InterPlayerTrade are for only a single trade and",
"list of accepted players def purchase(self, player: Player): # errors here would go",
"not have what he offers ''' offeror.requireResources(goodsOffered) # throws ActionError self.__offeror: Player =",
"ActionError(f\"Player is not allowed to take this trade offer\") if self.__invalid: raise ActionError(f\"This",
"from .player import Player class Trade: def __init__(self, cost: Dict[Resource, int], goodsOffered: Dict[Resource,",
"offeror.requireResources(goodsOffered) # throws ActionError self.__offeror: Player = offeror self.__offerees: List[Player] = offerees self.__closedDeal",
"List[Player] = []): ''' Offeror is the one proposing the offer, Offerees is",
"recipients of the offer. If Offerees is empty, then anyone can accept the",
"trade and so become invalid after a successful trade. Throws ActionError if the",
"does not have what he offers ''' offeror.requireResources(goodsOffered) # throws ActionError self.__offeror: Player",
"been accepted\") if not self.__offeror.hasResources(self._goodsOffered): # offeror no longer has what he offered",
"ActionError(f\"{self.__offeror.name} doesn't have what he/she offered\") player.requireResources(self._cost) # both have what they wish",
"import Resource from ..extraCode.util import ActionError from .player import Player class Trade: def",
"is in the list of accepted players def purchase(self, player: Player): # errors",
"the player is in the list of accepted players def purchase(self, player: Player):",
"purchase(self, player: Player): # errors here would go to player if not self.__isValidOfferee(player):",
"doesn't have what he/she offered\") player.requireResources(self._cost) # both have what they wish to",
"Player, offerees: List[Player] = []): ''' Offeror is the one proposing the offer,",
"''' return len(self.__offerees) == 0 or player in self.__offerees # len(self.__offerees) == 0",
"super().__init__(cost, goodsOffered) def __isValidOfferee(self, player: Player) -> bool: ''' Returns whether or not",
"the deal has been closed self.__invalid = False # whether the deal has",
"..extraCode.location import Resource from ..extraCode.util import ActionError from .player import Player class Trade:",
"self.__closedDeal = False # whether the deal has been closed self.__invalid = False",
"self.__invalid: raise ActionError(f\"This trade offer is invalid due to {self.__offeror.name}\") if self.__closedDeal: raise",
"from ..extraCode.location import Resource from ..extraCode.util import ActionError from .player import Player class",
"raise ActionError(f\"Player is not allowed to take this trade offer\") if self.__invalid: raise",
"proposing the offer, Offerees is the recipients of the offer. If Offerees is",
"invalid after a successful trade. Throws ActionError if the Offeror does not have",
"allowed to take this trade offer\") if self.__invalid: raise ActionError(f\"This trade offer is",
"is empty, then anyone can accept the offer. InterPlayerTrade are for only a",
"successful trade. Throws ActionError if the Offeror does not have what he offers",
"if self.__invalid: raise ActionError(f\"This trade offer is invalid due to {self.__offeror.name}\") if self.__closedDeal:",
"bool: ''' Returns whether or not this player can accept the given trade",
"to {self.__offeror.name}\") if self.__closedDeal: raise ActionError(f\"This offer has already been accepted\") if not",
"int], goodsOffered: Dict[Resource, int], offeror: Player, offerees: List[Player] = []): ''' Offeror is",
"go to player if not self.__isValidOfferee(player): raise ActionError(f\"Player is not allowed to take",
"has been closed self.__invalid = False # whether the deal has been invalidated",
"in self.__offerees # len(self.__offerees) == 0 means anyone can accept # player in",
"here would go to player if not self.__isValidOfferee(player): raise ActionError(f\"Player is not allowed",
"to player if not self.__isValidOfferee(player): raise ActionError(f\"Player is not allowed to take this",
"trade offer\") if self.__invalid: raise ActionError(f\"This trade offer is invalid due to {self.__offeror.name}\")",
"is the recipients of the offer. If Offerees is empty, then anyone can",
"in self.__offerees checks if the player is in the list of accepted players",
"of accepted players def purchase(self, player: Player): # errors here would go to",
"the Offeror does not have what he offers ''' offeror.requireResources(goodsOffered) # throws ActionError",
"deal has been invalidated super().__init__(cost, goodsOffered) def __isValidOfferee(self, player: Player) -> bool: '''",
"not self.__offeror.hasResources(self._goodsOffered): # offeror no longer has what he offered self.__invalid = True",
"self.__offerees # len(self.__offerees) == 0 means anyone can accept # player in self.__offerees",
"the deal has been invalidated super().__init__(cost, goodsOffered) def __isValidOfferee(self, player: Player) -> bool:",
"closed self.__invalid = False # whether the deal has been invalidated super().__init__(cost, goodsOffered)",
"False # whether the deal has been invalidated super().__init__(cost, goodsOffered) def __isValidOfferee(self, player:",
"0 or player in self.__offerees # len(self.__offerees) == 0 means anyone can accept",
"player can accept the given trade deal. ''' return len(self.__offerees) == 0 or",
"offeror: Player, offerees: List[Player] = []): ''' Offeror is the one proposing the",
"Dict[Resource, int], offeror: Player, offerees: List[Player] = []): ''' Offeror is the one",
"self.__offeror: Player = offeror self.__offerees: List[Player] = offerees self.__closedDeal = False # whether",
"has what he offered self.__invalid = True raise ActionError(f\"{self.__offeror.name} doesn't have what he/she",
".player import Player class Trade: def __init__(self, cost: Dict[Resource, int], goodsOffered: Dict[Resource, int]):",
"int], goodsOffered: Dict[Resource, int]): self._cost: Dict[Resource, int] = cost self._goodsOffered: Dict[Resource, int] =",
"whether the deal has been invalidated super().__init__(cost, goodsOffered) def __isValidOfferee(self, player: Player) ->",
"= True raise ActionError(f\"{self.__offeror.name} doesn't have what he/she offered\") player.requireResources(self._cost) # both have",
"not this player can accept the given trade deal. ''' return len(self.__offerees) ==",
"# throws ActionError self.__offeror: Player = offeror self.__offerees: List[Player] = offerees self.__closedDeal =",
"accept the given trade deal. ''' return len(self.__offerees) == 0 or player in",
"def __init__(self, cost: Dict[Resource, int], goodsOffered: Dict[Resource, int], offeror: Player, offerees: List[Player] =",
"in the list of accepted players def purchase(self, player: Player): # errors here",
"Offeror is the one proposing the offer, Offerees is the recipients of the",
"`ActionError`. player is the one accepting the trade ''' player.requireResources(self._cost) # raises ActionError",
"# whether the deal has been closed self.__invalid = False # whether the",
"a single trade and so become invalid after a successful trade. Throws ActionError",
"for only a single trade and so become invalid after a successful trade.",
"player: Player) -> bool: ''' Returns whether or not this player can accept",
"invalid due to {self.__offeror.name}\") if self.__closedDeal: raise ActionError(f\"This offer has already been accepted\")",
"offeror no longer has what he offered self.__invalid = True raise ActionError(f\"{self.__offeror.name} doesn't",
"= False # whether the deal has been invalidated super().__init__(cost, goodsOffered) def __isValidOfferee(self,",
"the given trade deal. ''' return len(self.__offerees) == 0 or player in self.__offerees",
"import Dict, List from ..extraCode.location import Resource from ..extraCode.util import ActionError from .player",
"''' player.requireResources(self._cost) # raises ActionError player.takeResources(self._cost) player.giveResources(self._goodsOffered) class InterPlayerTrade(Trade): def __init__(self, cost: Dict[Resource,",
"ActionError self.__offeror: Player = offeror self.__offerees: List[Player] = offerees self.__closedDeal = False #",
"this player can accept the given trade deal. ''' return len(self.__offerees) == 0",
"def purchase(self, player: Player): ''' throws `ActionError`. player is the one accepting the",
"trade ''' player.requireResources(self._cost) # raises ActionError player.takeResources(self._cost) player.giveResources(self._goodsOffered) class InterPlayerTrade(Trade): def __init__(self, cost:"
] |
[
"then 2 kA for 1,000 ms? # # Details: # ======== # #",
"k12 ) # # ** Sidenote: The physical meaning of the quantity \"F_600\"",
"T, I_arc, or I_bf. # # Motivation: # =========== # # The IEEE",
"J/cal = 5.0208 J/cm² # 50 / 12.552 * 5.0208 = 20 (exact)",
"2.7, 14.3,)) if V_oc <= 0.6: k = table_3[c.EC] elif V_oc == 2.7:",
"** 2) x4 = sqrt(x1 * (x2 - x3)) return 1 / x4",
"case where E_600' is equal to exactly # 1.2 cal/cm². Noting that 1.2",
"\\ + k[\"k7\"] * I_bf ** 3 \\ + k[\"k8\"] * I_bf **",
"= x1 * 10 ** (x2 + x3 + x4 + x5) assert",
"there is no singular value of T, I_arc, or I_bf. # # Motivation:",
"x_14300): V_oc = c.V_oc # Eq 16, Eq 19, Eq 22 x1 =",
"HV case. Eqs 3, 4, 5 x3_num = k[\"k3\"] * I_arc else: #",
"* ( D ^ k12 ) # # Where: # * E_600 is",
"CF is not required, as it would be if using Eq's 7, 8,",
"** 7 \\ + k[\"k5\"] * I_bf ** 6 \\ + k[\"k6\"] *",
"\\ + k[\"k9\"] * I_bf ** 2 \\ + k[\"k10\"] * I_bf x3",
"total amount of # energy released (i.e. Joules). (The distribution of energy is",
"eq's 7, 8, 9, and 10, are pretty complicated. # # In particular,",
"None # After all the explanation, calculation of the (intermediate) AFB is simply",
"= E / (c.D ** k[\"k12\"]) AFB = (5.0208 / F) ** (1",
"I_arc_final_LV(c: Cubicle, I_arc_600, I_bf): # Equation 25 V_oc = c.V_oc x1 = (0.6",
"note that the arc flash boundary, AFB_600, is simply the special case where",
"import table_1, table_3, table_4, table_5 def I_arc_intermediate(c: Cubicle, V_oc: float, I_bf: float): #",
"when I_arc is 10 kA for 100 ms, # then 5 kA for",
"E >= 0 return E def intermediate_AFB_from_E(c: Cubicle, V_oc: float, E: float): #",
"at any distance D' we like: # # E_600' (at distance D') =",
"from ieee_1584.cubicle import Cubicle from ieee_1584.tables import table_1, table_3, table_4, table_5 def I_arc_intermediate(c:",
"I_bf ** 3 \\ + k[\"k8\"] * I_bf ** 2 \\ + k[\"k9\"]",
"table_1[(c.EC, V_oc,)] x1 = + k[\"k1\"] \\ + k[\"k2\"] * log10(I_bf) \\ +",
"x1) * x2 return I_a def I_arc_min(c: Cubicle, I_arc: float): # Equation 2",
"2 return I_arc * (1 - 0.5 * c.VarCF) def E_AFB_intermediate(c: Cubicle, V_oc:",
"3 using exponent identities # we can simplify to: # # E_600 =",
"not isotropic, i.e. k12 != -2.00, so this # interpretation is not exact.)",
"- 2.7)) + x_2700 # Eq 17, Eq 20, Eq 23 x2 =",
"equations 3, 4, 5, 6 for \"intermediate incident energy\". assert (V_oc <= 0.6)",
"Table 4, or Table 5. # # We can calculate what F_600** would",
"* I_bf ** 4 \\ + k[\"k8\"] * I_bf ** 3 \\ +",
"assert E >= 0 return E def intermediate_AFB_from_E(c: Cubicle, V_oc: float, E: float):",
"AFB = (5.0208 / F) ** (1 / k[\"k12\"]) assert AFB >= 0",
"Table 3, Table 4, or Table 5. # # We can calculate what",
"23 x2 = (((x_14300 - x_2700) / 11.6) * (V_oc - 14.3)) +",
"* I_bf ** 5 \\ + k[\"k7\"] * I_bf ** 4 \\ +",
"# Sidenote 2: the funny number \"50/12.552\" in Eq 3/4/5/6 turns into the",
"# Alternately, we can calculate the distance D' that will give a particular",
"+ ((x2 * (V_oc - 0.6)) / 2.1) if 0.600 < V_oc <=",
"# Calculates the (intermediate) arc flash boundary, i.e. AFB_600, from the incident energy",
"10 kA for 100 ms, # then 5 kA for 900 ms, then",
"(J/cm²) and distance D (mm) is simply # that __the energy E_600 falls",
"Cubicle, V_oc: float, E: float): # Implements equations 7, 8, 9, 10, for",
"4.184 J/cal = 5.0208 J/cm² # 50 / 12.552 * 5.0208 = 20",
"+ k[\"k6\"] * I_bf ** 5 \\ + k[\"k7\"] * I_bf ** 4",
"+ k[\"k7\"] * I_bf ** 3 \\ + k[\"k8\"] * I_bf ** 2",
"+ k[\"k4\"] * I_bf ** 7 \\ + k[\"k5\"] * I_bf ** 6",
"float, I_arc: float, I_bf: float, T: float, I_arc_600: float = None): # Implements",
"assert (V_oc <= 0.6) or (V_oc in (0.6, 2.7, 14.3,)) if V_oc <=",
"I_bf: float): # Equation 1 assert V_oc in (0.6, 2.7, 14.3,) k =",
"(1 - 0.5 * c.VarCF) def E_AFB_intermediate(c: Cubicle, V_oc: float, I_arc: float, I_bf:",
"in a simpler way. # # Calculates the (intermediate) arc flash boundary, i.e.",
"exponent identities # we can simplify to: # # E_600 = F *",
"9, 10, for \"intermediate arc flash boundary\", in a simpler way. # #",
"x1 = (((x_2700 - x_600) / 2.1) * (V_oc - 2.7)) + x_2700",
"/ 50 * T x2 = k[\"k1\"] + k[\"k2\"] * log10(c.G) if I_arc_600",
"the fault to where E_600 has been measured. # * k12 is a",
"# Implements equations 7, 8, 9, 10, for \"intermediate arc flash boundary\", in",
"i.e. E_600 only. # Knowledge of T, G, I_arc, I_bf, and CF is",
"6 \\ + k[\"k5\"] * I_bf ** 5 \\ + k[\"k6\"] * I_bf",
"** 2 \\ + k[\"k9\"] * I_bf ** 1 \\ + k[\"k10\"] I_a",
"10, for \"intermediate arc flash boundary\", in a simpler way. # # Calculates",
"=========== # # The IEEE 1584-2018 formulas for arc flash boundary (AFB), i.e.",
"None: # HV case. Eqs 3, 4, 5 x3_num = k[\"k3\"] * I_arc",
"Eq's 7, 8, 9, 10 directly. # This is useful for multi-time-step calculations",
"+ k[\"k1\"] \\ + k[\"k2\"] * log10(I_bf) \\ + k[\"k3\"] * log10(c.G) x2",
"\\ + k[\"k3\"] * log10(c.G) x2 = + k[\"k4\"] * I_bf ** 6",
"gap G, the currents I_arc and I_bf, # and size correction factor CF.",
"/ (c.D ** k[\"k12\"]) AFB = (5.0208 / F) ** (1 / k[\"k12\"])",
"2) x3 = (0.6 ** 2 - V_oc ** 2) / (0.6 **",
"table_5[c.EC] else: k = None # After all the explanation, calculation of the",
"# Consider Eq 3 for the quantity E_600. # # First, we recognise",
"boundary\", in a simpler way. # # Calculates the (intermediate) arc flash boundary,",
"is not isotropic, i.e. k12 != -2.00, so this # interpretation is not",
"distance (mm) from the fault to where E_600 has been measured. # *",
"I_bf: float, T: float, I_arc_600: float = None): # Implements equations 3, 4,",
"isotropic, i.e. k12 != -2.00, so this # interpretation is not exact.) #",
"E_AFB_intermediate(c: Cubicle, V_oc: float, I_arc: float, I_bf: float, T: float, I_arc_600: float =",
"AFB def interpolate(c: Cubicle, x_600, x_2700, x_14300): V_oc = c.V_oc # Eq 16,",
"x3_num = k[\"k3\"] * I_arc_600 x3_den = + k[\"k4\"] * I_bf ** 7",
"(0.6 / V_oc) ** 2 x2 = 1 / (I_arc_600 ** 2) x3",
"energy at V = 0.6 kV, with units of J/cm², # * F_600",
"flash boundary (AFB), i.e. eq's 7, 8, 9, and 10, are pretty complicated.",
"# # Finally note that the arc flash boundary, AFB_600, is simply the",
"# # Sidenote 2: the funny number \"50/12.552\" in Eq 3/4/5/6 turns into",
"x_600) / 2.1) * (V_oc - 2.7)) + x_2700 # Eq 17, Eq",
"Cubicle, I_arc_600, I_bf): # Equation 25 V_oc = c.V_oc x1 = (0.6 /",
"5, 6 E = x1 * 10 ** (x2 + x3 + x4",
"relationship between incident energy E_600 (J/cm²) and distance D (mm) is simply #",
"= 20 (exact) assert (V_oc <= 0.6) or (V_oc in (0.6, 2.7, 14.3,))",
"4, 5, 6 E = x1 * 10 ** (x2 + x3 +",
"distribution of energy is not isotropic, i.e. k12 != -2.00, so this #",
"lines. F = E / (c.D ** k[\"k12\"]) AFB = (5.0208 / F)",
"= (((x_14300 - x_2700) / 11.6) * (V_oc - 14.3)) + x_14300 #",
"F_600 is, in some sense, the total amount of # energy released (i.e.",
"to: # # E_600 = F * ( D ^ k12 ) #",
"gap G, currents I_arc and I_bf, and size # correction factor CF, #",
"x1 * 10 ** (x2 + x3 + x4 + x5) assert E",
"have been: # # F_600 = E_600 / ( D ^ k12 )",
"k12 ) # # Once we know the value of F_600, we can",
"# # In particular, the equations for AFB requires knowledge of time T,",
"case. Eqs 3, 4, 5 x3_num = k[\"k3\"] * I_arc else: # LV",
"that F_600 is, in some sense, the total amount of # energy released",
"# # Where: # * E_600 is the __intermediate__ arcing energy at V",
"F) ** (1 / k[\"k12\"]) assert AFB >= 0 return AFB def interpolate(c:",
"(i.e. Joules). (The distribution of energy is not isotropic, i.e. k12 != -2.00,",
"k[\"k8\"] * I_bf ** 3 \\ + k[\"k9\"] * I_bf ** 2 \\",
"k[\"k12\"]) assert AFB >= 0 return AFB def interpolate(c: Cubicle, x_600, x_2700, x_14300):",
"k[\"k10\"] * I_bf x3 = x3_num / x3_den x4 = + k[\"k11\"] *",
"8, 9, and 10, are pretty complicated. # # In particular, the equations",
"D' we like: # # E_600' (at distance D') = F_600 * (",
"7, 8, 9, 10, for \"intermediate arc flash boundary\", in a simpler way.",
"sense, the total amount of # energy released (i.e. Joules). (The distribution of",
"2) / (0.6 ** 2 * I_bf ** 2) x4 = sqrt(x1 *",
"I_bf ** 2 \\ + k[\"k9\"] * I_bf ** 1 \\ + k[\"k10\"]",
"Finally note that the arc flash boundary, AFB_600, is simply the special case",
"k = table_1[(c.EC, V_oc,)] x1 = + k[\"k1\"] \\ + k[\"k2\"] * log10(I_bf)",
"** 4 \\ + k[\"k8\"] * I_bf ** 3 \\ + k[\"k9\"] *",
"energy released (i.e. Joules). (The distribution of energy is not isotropic, i.e. k12",
"k[\"k6\"] * I_bf ** 4 \\ + k[\"k7\"] * I_bf ** 3 \\",
"The physical meaning of the quantity \"F_600\" is that F_600 is, in some",
"I_bf ** 6 \\ + k[\"k5\"] * I_bf ** 5 \\ + k[\"k6\"]",
"* I_bf x3 = x3_num / x3_den x4 = + k[\"k11\"] * log10(I_bf)",
"2.7: return x2 def I_arc_final_LV(c: Cubicle, I_arc_600, I_bf): # Equation 25 V_oc =",
"\\ + k[\"k8\"] * I_bf ** 2 \\ + k[\"k9\"] * I_bf **",
"I_arc_600: float = None): # Implements equations 3, 4, 5, 6 for \"intermediate",
"T, busbar gap G, the currents I_arc and I_bf, # and size correction",
"# This is a problem when doing multi-time-step arc flash calculations where the",
"values of T, I_arc, and I_bf are # different for each time-step. What",
"of F_600, we can calculate E_600' at any distance D' we like: #",
"recognise that the relationship between incident energy E_600 (J/cm²) and distance D (mm)",
"# # Once we know the value of F_600, we can calculate E_600'",
"# then 5 kA for 900 ms, then 2 kA for 1,000 ms?",
"k12 ) # # Finally note that the arc flash boundary, AFB_600, is",
"x3_den = + k[\"k4\"] * I_bf ** 7 \\ + k[\"k5\"] * I_bf",
"7/8/9/10. # 1.2 cal/cm² × 4.184 J/cal = 5.0208 J/cm² # 50 /",
"E_AFB_intermediate() is deprecated. Use intermediate_E() and intermediate_AFB_from_E() instead.\") E = intermediate_E(c, V_oc, I_arc,",
"E_600' is equal to exactly # 1.2 cal/cm². Noting that 1.2 cal/cm² *",
"E: float): # Implements equations 7, 8, 9, 10, for \"intermediate arc flash",
"can simplify to: # # E_600 = F * ( D ^ k12",
"value of F_600, we can calculate E_600' at any distance D' we like:",
"* I_arc else: # LV case. Eq 6. x3_num = k[\"k3\"] * I_arc_600",
"3, 4, 5, 6 E = x1 * 10 ** (x2 + x3",
"V_oc: float, E: float): # Implements equations 7, 8, 9, 10, for \"intermediate",
"* T x2 = k[\"k1\"] + k[\"k2\"] * log10(c.G) if I_arc_600 is None:",
"that the arc flash boundary, AFB_600, is simply the special case where E_600'",
"/ c.CF) x5 = k[\"k12\"] * log10(c.D) # Equations 3, 4, 5, 6",
"G, currents I_arc and I_bf, and size # correction factor CF, # *",
"= 1 / (I_arc_600 ** 2) x3 = (0.6 ** 2 - V_oc",
"\\ + k[\"k5\"] * I_bf ** 5 \\ + k[\"k6\"] * I_bf **",
"= table_3[c.EC] elif V_oc == 2.7: k = table_4[c.EC] elif V_oc == 14.3:",
"* E_600 is the __intermediate__ arcing energy at V = 0.6 kV, with",
"log10(c.G) x2 = + k[\"k4\"] * I_bf ** 6 \\ + k[\"k5\"] *",
"boundary, i.e. AFB_600, from the incident energy i.e. E_600 only. # Knowledge of",
"k = table_5[c.EC] else: k = None x1 = 12.552 / 50 *",
"x2 = 1 / (I_arc_600 ** 2) x3 = (0.6 ** 2 -",
"= k[\"k12\"] * log10(c.D) # Equations 3, 4, 5, 6 E = x1",
"exponentially with distance D__. If we rearrange Eq 3 using exponent identities #",
"interpolate(c: Cubicle, x_600, x_2700, x_14300): V_oc = c.V_oc # Eq 16, Eq 19,",
"7 \\ + k[\"k5\"] * I_bf ** 6 \\ + k[\"k6\"] * I_bf",
"MIT License. Refer LICENSE.txt. import logging from math import log10, sqrt from ieee_1584.cubicle",
"# =========== # # The IEEE 1584-2018 formulas for arc flash boundary (AFB),",
"turns into the magic number 20 in Eq 7/8/9/10. # 1.2 cal/cm² ×",
"and intermediate_AFB_from_E() instead.\") E = intermediate_E(c, V_oc, I_arc, I_bf, T, I_arc_600) AFB =",
"\\ + k[\"k7\"] * I_bf ** 4 \\ + k[\"k8\"] * I_bf **",
"any distance D' we like: # # E_600' (at distance D') = F_600",
"Licensed under the MIT License. Refer LICENSE.txt. import logging from math import log10,",
"log10(I_bf) \\ + k[\"k13\"] * log10(I_arc) \\ + log10(1 / c.CF) x5 =",
"0.6: k = table_3[c.EC] elif V_oc == 2.7: k = table_4[c.EC] elif V_oc",
"measured. # * k12 is a constant __\"distance exponent\"__ from Table 3, Table",
"this # interpretation is not exact.) # # Sidenote 2: the funny number",
"distance D' we like: # # E_600' (at distance D') = F_600 *",
"# ** Sidenote: The physical meaning of the quantity \"F_600\" is that F_600",
"\"F_600\" is that F_600 is, in some sense, the total amount of #",
"(intermediate) arc flash boundary, i.e. AFB_600, from the incident energy i.e. E_600 only.",
"is not required, as it would be if using Eq's 7, 8, 9,",
"# energy released (i.e. Joules). (The distribution of energy is not isotropic, i.e.",
"factor CF. # # This is a problem when doing multi-time-step arc flash",
"* (1 - 0.5 * c.VarCF) def E_AFB_intermediate(c: Cubicle, V_oc: float, I_arc: float,",
"T, I_arc, and I_bf are # different for each time-step. What single value",
"can calculate what F_600** would have been: # # F_600 = E_600 /",
"we can calculate E_600' at any distance D' we like: # # E_600'",
"not exact.) # # Sidenote 2: the funny number \"50/12.552\" in Eq 3/4/5/6",
"the distance D' that will give a particular value of E_600'. # #",
"# After all the explanation, calculation of the (intermediate) AFB is simply 2",
"5.0208 = 20 (exact) assert (V_oc <= 0.6) or (V_oc in (0.6, 2.7,",
"= c.V_oc # Eq 16, Eq 19, Eq 22 x1 = (((x_2700 -",
"5. # # We can calculate what F_600** would have been: # #",
"simpler way. # # Calculates the (intermediate) arc flash boundary, i.e. AFB_600, from",
"busbar gap G, the currents I_arc and I_bf, # and size correction factor",
"of T, I_arc, and I_bf are # different for each time-step. What single",
"kA for 900 ms, then 2 kA for 1,000 ms? # # Details:",
"a (reasonably complicated) function of time T, busbar gap G, currents I_arc and",
"V_oc, E) return E, AFB def intermediate_E(c: Cubicle, V_oc: float, I_arc: float, I_bf:",
"and I_bf are # different for each time-step. What single value of I_arc",
"table_3, table_4, table_5 def I_arc_intermediate(c: Cubicle, V_oc: float, I_bf: float): # Equation 1",
"5 \\ + k[\"k6\"] * I_bf ** 4 \\ + k[\"k7\"] * I_bf",
"(((x_2700 - x_600) / 2.1) * (V_oc - 2.7)) + x_2700 # Eq",
"else: k = None x1 = 12.552 / 50 * T x2 =",
"* I_bf ** 5 \\ + k[\"k6\"] * I_bf ** 4 \\ +",
"logging.warning( \"Function E_AFB_intermediate() is deprecated. Use intermediate_E() and intermediate_AFB_from_E() instead.\") E = intermediate_E(c,",
"# # Motivation: # =========== # # The IEEE 1584-2018 formulas for arc",
"requires knowledge of time T, busbar gap G, the currents I_arc and I_bf,",
"# * D is the distance (mm) from the fault to where E_600",
"would have been: # # F_600 = E_600 / ( D ^ k12",
"( D' ^ k12 ) # # Alternately, we can calculate the distance",
"identities # we can simplify to: # # E_600 = F * (",
"9, and 10, are pretty complicated. # # In particular, the equations for",
"# Equation 25 V_oc = c.V_oc x1 = (0.6 / V_oc) ** 2",
"- x_600) / 2.1) * (V_oc - 2.7)) + x_2700 # Eq 17,",
"2 kA for 1,000 ms? # # Details: # ======== # # Consider",
"12.552 * 5.0208 = 20 (exact) assert (V_oc <= 0.6) or (V_oc in",
"F_600 is a (reasonably complicated) function of time T, busbar gap G, currents",
"# Licensed under the MIT License. Refer LICENSE.txt. import logging from math import",
"14.3: k = table_5[c.EC] else: k = None x1 = 12.552 / 50",
"k[\"k9\"] * I_bf ** 2 \\ + k[\"k10\"] * I_bf x3 = x3_num",
"Cubicle, x_600, x_2700, x_14300): V_oc = c.V_oc # Eq 16, Eq 19, Eq",
"= table_5[c.EC] else: k = None x1 = 12.552 / 50 * T",
"x_2700, x_14300): V_oc = c.V_oc # Eq 16, Eq 19, Eq 22 x1",
"and distance D (mm) is simply # that __the energy E_600 falls off",
"* I_bf ** 4 \\ + k[\"k7\"] * I_bf ** 3 \\ +",
"(x2 + x3 + x4 + x5) assert E >= 0 return E",
"from the incident energy i.e. E_600 only. # Knowledge of T, G, I_arc,",
"quantity E_600. # # First, we recognise that the relationship between incident energy",
"# # AFB_600 = (5.0208 / F_600) ^ ( 1 / k12 )",
"* c.VarCF) def E_AFB_intermediate(c: Cubicle, V_oc: float, I_arc: float, I_bf: float, T: float,",
"* (2.7 - V_oc)) / 2.1) + ((x2 * (V_oc - 0.6)) /",
"k = None x1 = 12.552 / 50 * T x2 = k[\"k1\"]",
"# Motivation: # =========== # # The IEEE 1584-2018 formulas for arc flash",
"( D ^ k12 ) # # Once we know the value of",
"time T, busbar gap G, currents I_arc and I_bf, and size # correction",
"50 * T x2 = k[\"k1\"] + k[\"k2\"] * log10(c.G) if I_arc_600 is",
"we rearrange Eq 3 using exponent identities # we can simplify to: #",
"2 - V_oc ** 2) / (0.6 ** 2 * I_bf ** 2)",
"give a particular value of E_600'. # # D' = (E_600' / F_600)",
"(c.D ** k[\"k12\"]) AFB = (5.0208 / F) ** (1 / k[\"k12\"]) assert",
"k12 is a constant __\"distance exponent\"__ from Table 3, Table 4, or Table",
"def I_arc_min(c: Cubicle, I_arc: float): # Equation 2 return I_arc * (1 -",
"-2.00, so this # interpretation is not exact.) # # Sidenote 2: the",
"float): # Equation 1 assert V_oc in (0.6, 2.7, 14.3,) k = table_1[(c.EC,",
"E_600' at any distance D' we like: # # E_600' (at distance D')",
"energy E_600 falls off exponentially with distance D__. If we rearrange Eq 3",
"k[\"k5\"] * I_bf ** 5 \\ + k[\"k6\"] * I_bf ** 4 \\",
"intermediate_E(c: Cubicle, V_oc: float, I_arc: float, I_bf: float, T: float, I_arc_600: float =",
"T, I_arc_600) AFB = intermediate_AFB_from_E(c, V_oc, E) return E, AFB def intermediate_E(c: Cubicle,",
"amount of # energy released (i.e. Joules). (The distribution of energy is not",
"<reponame>LiaungYip/arcflash # Copyright 2022, <NAME> - https://www.penwatch.net # Licensed under the MIT License.",
"= (5.0208 / F_600) ^ ( 1 / k12 ) # # **",
"* log10(c.D) # Equations 3, 4, 5, 6 E = x1 * 10",
"energy is not isotropic, i.e. k12 != -2.00, so this # interpretation is",
"+ k[\"k3\"] * log10(c.G) x2 = + k[\"k4\"] * I_bf ** 6 \\",
"if V_oc <= 0.6: k = table_3[c.EC] elif V_oc == 2.7: k =",
"is simply 2 lines. F = E / (c.D ** k[\"k12\"]) AFB =",
"# Eq 16, Eq 19, Eq 22 x1 = (((x_2700 - x_600) /",
"+ k[\"k2\"] * log10(I_bf) \\ + k[\"k3\"] * log10(c.G) x2 = + k[\"k4\"]",
"elif V_oc == 14.3: k = table_5[c.EC] else: k = None x1 =",
"22 x1 = (((x_2700 - x_600) / 2.1) * (V_oc - 2.7)) +",
"complicated. # # In particular, the equations for AFB requires knowledge of time",
"I_bf, # and size correction factor CF. # # This is a problem",
"problem when doing multi-time-step arc flash calculations where the values of T, I_arc,",
"Copyright 2022, <NAME> - https://www.penwatch.net # Licensed under the MIT License. Refer LICENSE.txt.",
"# Equations 3, 4, 5, 6 E = x1 * 10 ** (x2",
"* log10(c.G) if I_arc_600 is None: # HV case. Eqs 3, 4, 5",
"float, I_arc: float, I_bf: float, T: float, I_arc_600: float = None): logging.warning( \"Function",
"kV, with units of J/cm², # * F_600 is a (reasonably complicated) function",
"5.0208 J/cm², # # AFB_600 = (5.0208 / F_600) ^ ( 1 /",
"flash calculations where the values of T, I_arc, and I_bf are # different",
"math import log10, sqrt from ieee_1584.cubicle import Cubicle from ieee_1584.tables import table_1, table_3,",
"the values of T, I_arc, and I_bf are # different for each time-step.",
"float, E: float): # Implements equations 7, 8, 9, 10, for \"intermediate arc",
"14.3: k = table_5[c.EC] else: k = None # After all the explanation,",
"^ k12 ) # # Once we know the value of F_600, we",
"else: # LV case. Eq 6. x3_num = k[\"k3\"] * I_arc_600 x3_den =",
"number 20 in Eq 7/8/9/10. # 1.2 cal/cm² × 4.184 J/cal = 5.0208",
"Table 5. # # We can calculate what F_600** would have been: #",
"to where E_600 has been measured. # * k12 is a constant __\"distance",
"exact.) # # Sidenote 2: the funny number \"50/12.552\" in Eq 3/4/5/6 turns",
"into the magic number 20 in Eq 7/8/9/10. # 1.2 cal/cm² × 4.184",
"None x1 = 12.552 / 50 * T x2 = k[\"k1\"] + k[\"k2\"]",
"exactly # 1.2 cal/cm². Noting that 1.2 cal/cm² * 4.184 J/cal = 5.0208",
"distance D__. If we rearrange Eq 3 using exponent identities # we can",
"V_oc: float, I_arc: float, I_bf: float, T: float, I_arc_600: float = None): logging.warning(",
"3, 4, 5 x3_num = k[\"k3\"] * I_arc else: # LV case. Eq",
"float, T: float, I_arc_600: float = None): logging.warning( \"Function E_AFB_intermediate() is deprecated. Use",
"+ log10(1 / c.CF) x5 = k[\"k12\"] * log10(c.D) # Equations 3, 4,",
"(mm) is simply # that __the energy E_600 falls off exponentially with distance",
"of T, I_arc, or I_bf. # # Motivation: # =========== # # The",
"(5.0208 / F) ** (1 / k[\"k12\"]) assert AFB >= 0 return AFB",
"E_600 falls off exponentially with distance D__. If we rearrange Eq 3 using",
"arc flash boundary\", in a simpler way. # # Calculates the (intermediate) arc",
"2.1) + ((x2 * (V_oc - 0.6)) / 2.1) if 0.600 < V_oc",
"(10 ** x1) * x2 return I_a def I_arc_min(c: Cubicle, I_arc: float): #",
"# 1.2 cal/cm². Noting that 1.2 cal/cm² * 4.184 J/cal = 5.0208 J/cm²,",
"k = table_4[c.EC] elif V_oc == 14.3: k = table_5[c.EC] else: k =",
"for the quantity E_600. # # First, we recognise that the relationship between",
"import logging from math import log10, sqrt from ieee_1584.cubicle import Cubicle from ieee_1584.tables",
"time-step. What single value of I_arc would you plug into Eq 7, when",
"# # ** Sidenote: The physical meaning of the quantity \"F_600\" is that",
"that the relationship between incident energy E_600 (J/cm²) and distance D (mm) is",
"After all the explanation, calculation of the (intermediate) AFB is simply 2 lines.",
"and size # correction factor CF, # * D is the distance (mm)",
"** 6 \\ + k[\"k6\"] * I_bf ** 5 \\ + k[\"k7\"] *",
"# Eq 17, Eq 20, Eq 23 x2 = (((x_14300 - x_2700) /",
"What single value of I_arc would you plug into Eq 7, when I_arc",
"function of time T, busbar gap G, currents I_arc and I_bf, and size",
"= + k[\"k4\"] * I_bf ** 7 \\ + k[\"k5\"] * I_bf **",
"AFB def intermediate_E(c: Cubicle, V_oc: float, I_arc: float, I_bf: float, T: float, I_arc_600:",
"we can calculate the distance D' that will give a particular value of",
"for \"intermediate arc flash boundary\", in a simpler way. # # Calculates the",
"20, Eq 23 x2 = (((x_14300 - x_2700) / 11.6) * (V_oc -",
"the value of F_600, we can calculate E_600' at any distance D' we",
"2022, <NAME> - https://www.penwatch.net # Licensed under the MIT License. Refer LICENSE.txt. import",
"flash boundary, i.e. AFB_600, from the incident energy i.e. E_600 only. # Knowledge",
"== 14.3: k = table_5[c.EC] else: k = None # After all the",
"4 \\ + k[\"k7\"] * I_bf ** 3 \\ + k[\"k8\"] * I_bf",
"- 14.3)) + x_14300 # Eq 18, Eq 21, Eq 24 x3 =",
"k[\"k4\"] * I_bf ** 7 \\ + k[\"k5\"] * I_bf ** 6 \\",
"calculate what F_600** would have been: # # F_600 = E_600 / (",
"cal/cm² × 4.184 J/cal = 5.0208 J/cm² # 50 / 12.552 * 5.0208",
"log10, sqrt from ieee_1584.cubicle import Cubicle from ieee_1584.tables import table_1, table_3, table_4, table_5",
"case. Eq 6. x3_num = k[\"k3\"] * I_arc_600 x3_den = + k[\"k4\"] *",
"Once we know the value of F_600, we can calculate E_600' at any",
"I_arc * (1 - 0.5 * c.VarCF) def E_AFB_intermediate(c: Cubicle, V_oc: float, I_arc:",
"value of E_600'. # # D' = (E_600' / F_600) ^ ( 1",
"× 4.184 J/cal = 5.0208 J/cm² # 50 / 12.552 * 5.0208 =",
"arcing energy at V = 0.6 kV, with units of J/cm², # *",
"x_2700 # Eq 17, Eq 20, Eq 23 x2 = (((x_14300 - x_2700)",
"- 0.6)) / 2.1) if 0.600 < V_oc <= 2.7: return x3 elif",
"simply 2 lines. F = E / (c.D ** k[\"k12\"]) AFB = (5.0208",
"currents I_arc and I_bf, and size # correction factor CF, # * D",
"explanation, calculation of the (intermediate) AFB is simply 2 lines. F = E",
"/ 2.1) + ((x2 * (V_oc - 0.6)) / 2.1) if 0.600 <",
"# The IEEE 1584-2018 formulas for arc flash boundary (AFB), i.e. eq's 7,",
"you plug into Eq 7, when I_arc is 10 kA for 100 ms,",
"** k[\"k12\"]) AFB = (5.0208 / F) ** (1 / k[\"k12\"]) assert AFB",
"# # This is a problem when doing multi-time-step arc flash calculations where",
"* (V_oc - 2.7)) + x_2700 # Eq 17, Eq 20, Eq 23",
"float, T: float, I_arc_600: float = None): # Implements equations 3, 4, 5,",
"# # D' = (E_600' / F_600) ^ ( 1 / k12 )",
"x1 = 12.552 / 50 * T x2 = k[\"k1\"] + k[\"k2\"] *",
"then 5 kA for 900 ms, then 2 kA for 1,000 ms? #",
"V_oc, I_arc, I_bf, T, I_arc_600) AFB = intermediate_AFB_from_E(c, V_oc, E) return E, AFB",
"Equation 25 V_oc = c.V_oc x1 = (0.6 / V_oc) ** 2 x2",
"table_5 def I_arc_intermediate(c: Cubicle, V_oc: float, I_bf: float): # Equation 1 assert V_oc",
"None): logging.warning( \"Function E_AFB_intermediate() is deprecated. Use intermediate_E() and intermediate_AFB_from_E() instead.\") E =",
"<= 0.6: k = table_3[c.EC] elif V_oc == 2.7: k = table_4[c.EC] elif",
"0.6 kV, with units of J/cm², # * F_600 is a (reasonably complicated)",
"calculations where the values of T, I_arc, and I_bf are # different for",
") # # Finally note that the arc flash boundary, AFB_600, is simply",
"def interpolate(c: Cubicle, x_600, x_2700, x_14300): V_oc = c.V_oc # Eq 16, Eq",
"k[\"k1\"] \\ + k[\"k2\"] * log10(I_bf) \\ + k[\"k3\"] * log10(c.G) x2 =",
"# D' = (E_600' / F_600) ^ ( 1 / k12 ) #",
"< V_oc <= 2.7: return x3 elif V_oc > 2.7: return x2 def",
"return x2 def I_arc_final_LV(c: Cubicle, I_arc_600, I_bf): # Equation 25 V_oc = c.V_oc",
"would be if using Eq's 7, 8, 9, 10 directly. # This is",
"cal/cm². Noting that 1.2 cal/cm² * 4.184 J/cal = 5.0208 J/cm², # #",
"E = x1 * 10 ** (x2 + x3 + x4 + x5)",
"flash boundary\", in a simpler way. # # Calculates the (intermediate) arc flash",
"is 10 kA for 100 ms, # then 5 kA for 900 ms,",
"been measured. # * k12 is a constant __\"distance exponent\"__ from Table 3,",
"ieee_1584.cubicle import Cubicle from ieee_1584.tables import table_1, table_3, table_4, table_5 def I_arc_intermediate(c: Cubicle,",
"a simpler way. # # Calculates the (intermediate) arc flash boundary, i.e. AFB_600,",
"14.3,) k = table_1[(c.EC, V_oc,)] x1 = + k[\"k1\"] \\ + k[\"k2\"] *",
"as it would be if using Eq's 7, 8, 9, 10 directly. #",
"J/cm², # * F_600 is a (reasonably complicated) function of time T, busbar",
"Equations 3, 4, 5, 6 E = x1 * 10 ** (x2 +",
"where the values of T, I_arc, and I_bf are # different for each",
"V_oc: float, I_arc: float, I_bf: float, T: float, I_arc_600: float = None): #",
"number \"50/12.552\" in Eq 3/4/5/6 turns into the magic number 20 in Eq",
"c.V_oc x1 = (0.6 / V_oc) ** 2 x2 = 1 / (I_arc_600",
"(0.6, 2.7, 14.3,)) if V_oc <= 0.6: k = table_3[c.EC] elif V_oc ==",
"energy E_600 (J/cm²) and distance D (mm) is simply # that __the energy",
"been: # # F_600 = E_600 / ( D ^ k12 ) #",
"c.CF) x5 = k[\"k12\"] * log10(c.D) # Equations 3, 4, 5, 6 E",
"I_bf ** 1 \\ + k[\"k10\"] I_a = (10 ** x1) * x2",
"the (intermediate) AFB is simply 2 lines. F = E / (c.D **",
"assert AFB >= 0 return AFB def interpolate(c: Cubicle, x_600, x_2700, x_14300): V_oc",
"from math import log10, sqrt from ieee_1584.cubicle import Cubicle from ieee_1584.tables import table_1,",
"I_arc: float, I_bf: float, T: float, I_arc_600: float = None): logging.warning( \"Function E_AFB_intermediate()",
"# * k12 is a constant __\"distance exponent\"__ from Table 3, Table 4,",
"boundary, AFB_600, is simply the special case where E_600' is equal to exactly",
"F_600) ^ ( 1 / k12 ) # # ** Sidenote: The physical",
"(0.6 ** 2 - V_oc ** 2) / (0.6 ** 2 * I_bf",
"J/cal = 5.0208 J/cm², # # AFB_600 = (5.0208 / F_600) ^ (",
"(AFB), i.e. eq's 7, 8, 9, and 10, are pretty complicated. # #",
"exponent\"__ from Table 3, Table 4, or Table 5. # # We can",
"currents I_arc and I_bf, # and size correction factor CF. # # This",
"I_bf ** 2) x4 = sqrt(x1 * (x2 - x3)) return 1 /",
"the special case where E_600' is equal to exactly # 1.2 cal/cm². Noting",
"(0.6 ** 2 * I_bf ** 2) x4 = sqrt(x1 * (x2 -",
"= + k[\"k4\"] * I_bf ** 6 \\ + k[\"k5\"] * I_bf **",
"Eq 23 x2 = (((x_14300 - x_2700) / 11.6) * (V_oc - 14.3))",
"V_oc,)] x1 = + k[\"k1\"] \\ + k[\"k2\"] * log10(I_bf) \\ + k[\"k3\"]",
"T: float, I_arc_600: float = None): logging.warning( \"Function E_AFB_intermediate() is deprecated. Use intermediate_E()",
">= 0 return E def intermediate_AFB_from_E(c: Cubicle, V_oc: float, E: float): # Implements",
"3, Table 4, or Table 5. # # We can calculate what F_600**",
"I_arc_600) AFB = intermediate_AFB_from_E(c, V_oc, E) return E, AFB def intermediate_E(c: Cubicle, V_oc:",
"when doing multi-time-step arc flash calculations where the values of T, I_arc, and",
"E def intermediate_AFB_from_E(c: Cubicle, V_oc: float, E: float): # Implements equations 7, 8,",
"for \"intermediate incident energy\". assert (V_oc <= 0.6) or (V_oc in (0.6, 2.7,",
"0.5 * c.VarCF) def E_AFB_intermediate(c: Cubicle, V_oc: float, I_arc: float, I_bf: float, T:",
"k = table_3[c.EC] elif V_oc == 2.7: k = table_4[c.EC] elif V_oc ==",
"D') = F_600 * ( D' ^ k12 ) # # Alternately, we",
"F_600) ^ ( 1 / k12 ) # # Finally note that the",
"2.7: return x3 elif V_oc > 2.7: return x2 def I_arc_final_LV(c: Cubicle, I_arc_600,",
"for 1,000 ms? # # Details: # ======== # # Consider Eq 3",
"return AFB def interpolate(c: Cubicle, x_600, x_2700, x_14300): V_oc = c.V_oc # Eq",
"50 / 12.552 * 5.0208 = 20 (exact) assert (V_oc <= 0.6) or",
"# E_600' (at distance D') = F_600 * ( D' ^ k12 )",
"is a problem when doing multi-time-step arc flash calculations where the values of",
"import log10, sqrt from ieee_1584.cubicle import Cubicle from ieee_1584.tables import table_1, table_3, table_4,",
"is, in some sense, the total amount of # energy released (i.e. Joules).",
"the quantity \"F_600\" is that F_600 is, in some sense, the total amount",
"return x3 elif V_oc > 2.7: return x2 def I_arc_final_LV(c: Cubicle, I_arc_600, I_bf):",
"table_5[c.EC] else: k = None x1 = 12.552 / 50 * T x2",
"I_bf ** 4 \\ + k[\"k7\"] * I_bf ** 3 \\ + k[\"k8\"]",
"+ k[\"k5\"] * I_bf ** 5 \\ + k[\"k6\"] * I_bf ** 4",
"a problem when doing multi-time-step arc flash calculations where the values of T,",
"using exponent identities # we can simplify to: # # E_600 = F",
"+ x5) assert E >= 0 return E def intermediate_AFB_from_E(c: Cubicle, V_oc: float,",
"the MIT License. Refer LICENSE.txt. import logging from math import log10, sqrt from",
"- x_2700) / 11.6) * (V_oc - 14.3)) + x_14300 # Eq 18,",
"table_1, table_3, table_4, table_5 def I_arc_intermediate(c: Cubicle, V_oc: float, I_bf: float): # Equation",
"Eq 16, Eq 19, Eq 22 x1 = (((x_2700 - x_600) / 2.1)",
"5 \\ + k[\"k7\"] * I_bf ** 4 \\ + k[\"k8\"] * I_bf",
"x3 = x3_num / x3_den x4 = + k[\"k11\"] * log10(I_bf) \\ +",
"D' ^ k12 ) # # Alternately, we can calculate the distance D'",
"20 (exact) assert (V_oc <= 0.6) or (V_oc in (0.6, 2.7, 14.3,)) if",
"log10(I_bf) \\ + k[\"k3\"] * log10(c.G) x2 = + k[\"k4\"] * I_bf **",
"I_arc would you plug into Eq 7, when I_arc is 10 kA for",
"of the (intermediate) AFB is simply 2 lines. F = E / (c.D",
"/ x3_den x4 = + k[\"k11\"] * log10(I_bf) \\ + k[\"k13\"] * log10(I_arc)",
"\\ + k[\"k8\"] * I_bf ** 3 \\ + k[\"k9\"] * I_bf **",
"+ x_2700 # Eq 17, Eq 20, Eq 23 x2 = (((x_14300 -",
"and size correction factor CF. # # This is a problem when doing",
"V_oc: float, I_bf: float): # Equation 1 assert V_oc in (0.6, 2.7, 14.3,)",
"log10(I_arc) \\ + log10(1 / c.CF) x5 = k[\"k12\"] * log10(c.D) # Equations",
"10 ** (x2 + x3 + x4 + x5) assert E >= 0",
"Implements equations 3, 4, 5, 6 for \"intermediate incident energy\". assert (V_oc <=",
"if using Eq's 7, 8, 9, 10 directly. # This is useful for",
"D ^ k12 ) # # Where: # * E_600 is the __intermediate__",
"x1 = (0.6 / V_oc) ** 2 x2 = 1 / (I_arc_600 **",
"Eq 24 x3 = ((x1 * (2.7 - V_oc)) / 2.1) + ((x2",
"- https://www.penwatch.net # Licensed under the MIT License. Refer LICENSE.txt. import logging from",
"+ k[\"k4\"] * I_bf ** 6 \\ + k[\"k5\"] * I_bf ** 5",
"k[\"k9\"] * I_bf ** 1 \\ + k[\"k10\"] I_a = (10 ** x1)",
"# # Alternately, we can calculate the distance D' that will give a",
"Cubicle, V_oc: float, I_bf: float): # Equation 1 assert V_oc in (0.6, 2.7,",
"Cubicle, I_arc: float): # Equation 2 return I_arc * (1 - 0.5 *",
"7, 8, 9, and 10, are pretty complicated. # # In particular, the",
"def I_arc_final_LV(c: Cubicle, I_arc_600, I_bf): # Equation 25 V_oc = c.V_oc x1 =",
"using Eq's 7, 8, 9, 10 directly. # This is useful for multi-time-step",
"I_arc_600, I_bf): # Equation 25 V_oc = c.V_oc x1 = (0.6 / V_oc)",
"k[\"k6\"] * I_bf ** 5 \\ + k[\"k7\"] * I_bf ** 4 \\",
"at V = 0.6 kV, with units of J/cm², # * F_600 is",
"# Knowledge of T, G, I_arc, I_bf, and CF is not required, as",
"be if using Eq's 7, 8, 9, 10 directly. # This is useful",
"2.7)) + x_2700 # Eq 17, Eq 20, Eq 23 x2 = (((x_14300",
"# Where: # * E_600 is the __intermediate__ arcing energy at V =",
"21, Eq 24 x3 = ((x1 * (2.7 - V_oc)) / 2.1) +",
"V_oc)) / 2.1) + ((x2 * (V_oc - 0.6)) / 2.1) if 0.600",
"\\ + k[\"k10\"] * I_bf x3 = x3_num / x3_den x4 = +",
"x2 = + k[\"k4\"] * I_bf ** 6 \\ + k[\"k5\"] * I_bf",
"meaning of the quantity \"F_600\" is that F_600 is, in some sense, the",
"k[\"k1\"] + k[\"k2\"] * log10(c.G) if I_arc_600 is None: # HV case. Eqs",
"of I_arc would you plug into Eq 7, when I_arc is 10 kA",
"AFB_600, is simply the special case where E_600' is equal to exactly #",
"V_oc == 14.3: k = table_5[c.EC] else: k = None # After all",
"can calculate E_600' at any distance D' we like: # # E_600' (at",
"# and size correction factor CF. # # This is a problem when",
"the explanation, calculation of the (intermediate) AFB is simply 2 lines. F =",
"1 \\ + k[\"k10\"] I_a = (10 ** x1) * x2 return I_a",
"\\ + k[\"k5\"] * I_bf ** 6 \\ + k[\"k6\"] * I_bf **",
"no singular value of T, I_arc, or I_bf. # # Motivation: # ===========",
"19, Eq 22 x1 = (((x_2700 - x_600) / 2.1) * (V_oc -",
"the distance (mm) from the fault to where E_600 has been measured. #",
"I_a = (10 ** x1) * x2 return I_a def I_arc_min(c: Cubicle, I_arc:",
"# # We can calculate what F_600** would have been: # # F_600",
"for each time-step. What single value of I_arc would you plug into Eq",
"rearrange Eq 3 using exponent identities # we can simplify to: # #",
"calculate the distance D' that will give a particular value of E_600'. #",
"= 5.0208 J/cm² # 50 / 12.552 * 5.0208 = 20 (exact) assert",
"intermediate_AFB_from_E() instead.\") E = intermediate_E(c, V_oc, I_arc, I_bf, T, I_arc_600) AFB = intermediate_AFB_from_E(c,",
"V_oc in (0.6, 2.7, 14.3,) k = table_1[(c.EC, V_oc,)] x1 = + k[\"k1\"]",
"12.552 / 50 * T x2 = k[\"k1\"] + k[\"k2\"] * log10(c.G) if",
"# AFB_600 = (5.0208 / F_600) ^ ( 1 / k12 ) #",
"E_600 (J/cm²) and distance D (mm) is simply # that __the energy E_600",
"the arc flash boundary, AFB_600, is simply the special case where E_600' is",
"2 * I_bf ** 2) x4 = sqrt(x1 * (x2 - x3)) return",
"quantity \"F_600\" is that F_600 is, in some sense, the total amount of",
"100 ms, # then 5 kA for 900 ms, then 2 kA for",
"\\ + k[\"k10\"] I_a = (10 ** x1) * x2 return I_a def",
"what F_600** would have been: # # F_600 = E_600 / ( D",
"(at distance D') = F_600 * ( D' ^ k12 ) # #",
"assert V_oc in (0.6, 2.7, 14.3,) k = table_1[(c.EC, V_oc,)] x1 = +",
"is deprecated. Use intermediate_E() and intermediate_AFB_from_E() instead.\") E = intermediate_E(c, V_oc, I_arc, I_bf,",
"= None # After all the explanation, calculation of the (intermediate) AFB is",
"** 5 \\ + k[\"k7\"] * I_bf ** 4 \\ + k[\"k8\"] *",
"Implements equations 7, 8, 9, 10, for \"intermediate arc flash boundary\", in a",
"value of I_arc would you plug into Eq 7, when I_arc is 10",
"I_arc and I_bf, and size # correction factor CF, # * D is",
"sqrt from ieee_1584.cubicle import Cubicle from ieee_1584.tables import table_1, table_3, table_4, table_5 def",
"* x2 return I_a def I_arc_min(c: Cubicle, I_arc: float): # Equation 2 return",
"where there is no singular value of T, I_arc, or I_bf. # #",
"* D is the distance (mm) from the fault to where E_600 has",
"return E, AFB def intermediate_E(c: Cubicle, V_oc: float, I_arc: float, I_bf: float, T:",
"= table_5[c.EC] else: k = None # After all the explanation, calculation of",
"0.600 < V_oc <= 2.7: return x3 elif V_oc > 2.7: return x2",
"** 2 * I_bf ** 2) x4 = sqrt(x1 * (x2 - x3))",
"__the energy E_600 falls off exponentially with distance D__. If we rearrange Eq",
"4, or Table 5. # # We can calculate what F_600** would have",
"from ieee_1584.tables import table_1, table_3, table_4, table_5 def I_arc_intermediate(c: Cubicle, V_oc: float, I_bf:",
"is simply the special case where E_600' is equal to exactly # 1.2",
"# # First, we recognise that the relationship between incident energy E_600 (J/cm²)",
"F_600 * ( D' ^ k12 ) # # Alternately, we can calculate",
"equal to exactly # 1.2 cal/cm². Noting that 1.2 cal/cm² * 4.184 J/cal",
"^ ( 1 / k12 ) # # ** Sidenote: The physical meaning",
"AFB_600 = (5.0208 / F_600) ^ ( 1 / k12 ) # #",
"# different for each time-step. What single value of I_arc would you plug",
"(V_oc - 2.7)) + x_2700 # Eq 17, Eq 20, Eq 23 x2",
"* log10(I_arc) \\ + log10(1 / c.CF) x5 = k[\"k12\"] * log10(c.D) #",
"I_arc, and I_bf are # different for each time-step. What single value of",
"* I_arc_600 x3_den = + k[\"k4\"] * I_bf ** 7 \\ + k[\"k5\"]",
"== 14.3: k = table_5[c.EC] else: k = None x1 = 12.552 /",
"/ F_600) ^ ( 1 / k12 ) # # ** Sidenote: The",
"2.7, 14.3,) k = table_1[(c.EC, V_oc,)] x1 = + k[\"k1\"] \\ + k[\"k2\"]",
"= k[\"k3\"] * I_arc else: # LV case. Eq 6. x3_num = k[\"k3\"]",
"of E_600'. # # D' = (E_600' / F_600) ^ ( 1 /",
"= 5.0208 J/cm², # # AFB_600 = (5.0208 / F_600) ^ ( 1",
"where E_600 has been measured. # * k12 is a constant __\"distance exponent\"__",
"T, G, I_arc, I_bf, and CF is not required, as it would be",
"> 2.7: return x2 def I_arc_final_LV(c: Cubicle, I_arc_600, I_bf): # Equation 25 V_oc",
"4, 5, 6 for \"intermediate incident energy\". assert (V_oc <= 0.6) or (V_oc",
"can calculate the distance D' that will give a particular value of E_600'.",
"k[\"k11\"] * log10(I_bf) \\ + k[\"k13\"] * log10(I_arc) \\ + log10(1 / c.CF)",
"# Equation 1 assert V_oc in (0.6, 2.7, 14.3,) k = table_1[(c.EC, V_oc,)]",
"2 x2 = 1 / (I_arc_600 ** 2) x3 = (0.6 ** 2",
"/ (0.6 ** 2 * I_bf ** 2) x4 = sqrt(x1 * (x2",
"size # correction factor CF, # * D is the distance (mm) from",
"\\ + k[\"k9\"] * I_bf ** 1 \\ + k[\"k10\"] I_a = (10",
"= F_600 * ( D' ^ k12 ) # # Alternately, we can",
"we like: # # E_600' (at distance D') = F_600 * ( D'",
"of J/cm², # * F_600 is a (reasonably complicated) function of time T,",
"a particular value of E_600'. # # D' = (E_600' / F_600) ^",
"# correction factor CF, # * D is the distance (mm) from the",
"/ 12.552 * 5.0208 = 20 (exact) assert (V_oc <= 0.6) or (V_oc",
"intermediate_E(c, V_oc, I_arc, I_bf, T, I_arc_600) AFB = intermediate_AFB_from_E(c, V_oc, E) return E,",
"AFB requires knowledge of time T, busbar gap G, the currents I_arc and",
"= + k[\"k1\"] \\ + k[\"k2\"] * log10(I_bf) \\ + k[\"k3\"] * log10(c.G)",
"Use intermediate_E() and intermediate_AFB_from_E() instead.\") E = intermediate_E(c, V_oc, I_arc, I_bf, T, I_arc_600)",
"knowledge of time T, busbar gap G, the currents I_arc and I_bf, #",
"1584-2018 formulas for arc flash boundary (AFB), i.e. eq's 7, 8, 9, and",
"with units of J/cm², # * F_600 is a (reasonably complicated) function of",
"\"Function E_AFB_intermediate() is deprecated. Use intermediate_E() and intermediate_AFB_from_E() instead.\") E = intermediate_E(c, V_oc,",
"arc flash boundary (AFB), i.e. eq's 7, 8, 9, and 10, are pretty",
"I_arc_intermediate(c: Cubicle, V_oc: float, I_bf: float): # Equation 1 assert V_oc in (0.6,",
"Eq 3/4/5/6 turns into the magic number 20 in Eq 7/8/9/10. # 1.2",
"float, I_arc_600: float = None): logging.warning( \"Function E_AFB_intermediate() is deprecated. Use intermediate_E() and",
"E) return E, AFB def intermediate_E(c: Cubicle, V_oc: float, I_arc: float, I_bf: float,",
"= k[\"k1\"] + k[\"k2\"] * log10(c.G) if I_arc_600 is None: # HV case.",
"* I_bf ** 6 \\ + k[\"k5\"] * I_bf ** 5 \\ +",
"6 for \"intermediate incident energy\". assert (V_oc <= 0.6) or (V_oc in (0.6,",
"CF, # * D is the distance (mm) from the fault to where",
"we know the value of F_600, we can calculate E_600' at any distance",
"x_14300 # Eq 18, Eq 21, Eq 24 x3 = ((x1 * (2.7",
"** 4 \\ + k[\"k7\"] * I_bf ** 3 \\ + k[\"k8\"] *",
"for multi-time-step calculations where there is no singular value of T, I_arc, or",
"2 \\ + k[\"k10\"] * I_bf x3 = x3_num / x3_den x4 =",
"F_600, we can calculate E_600' at any distance D' we like: # #",
"/ F) ** (1 / k[\"k12\"]) assert AFB >= 0 return AFB def",
"(intermediate) AFB is simply 2 lines. F = E / (c.D ** k[\"k12\"])",
"/ k12 ) # # Finally note that the arc flash boundary, AFB_600,",
"7, when I_arc is 10 kA for 100 ms, # then 5 kA",
"of time T, busbar gap G, currents I_arc and I_bf, and size #",
"each time-step. What single value of I_arc would you plug into Eq 7,",
"D' = (E_600' / F_600) ^ ( 1 / k12 ) # #",
"incident energy E_600 (J/cm²) and distance D (mm) is simply # that __the",
"+ k[\"k5\"] * I_bf ** 6 \\ + k[\"k6\"] * I_bf ** 5",
"ms, # then 5 kA for 900 ms, then 2 kA for 1,000",
"are pretty complicated. # # In particular, the equations for AFB requires knowledge",
"different for each time-step. What single value of I_arc would you plug into",
"= c.V_oc x1 = (0.6 / V_oc) ** 2 x2 = 1 /",
"k[\"k3\"] * I_arc_600 x3_den = + k[\"k4\"] * I_bf ** 7 \\ +",
"we can simplify to: # # E_600 = F * ( D ^",
"of # energy released (i.e. Joules). (The distribution of energy is not isotropic,",
"10 directly. # This is useful for multi-time-step calculations where there is no",
"** 2 \\ + k[\"k10\"] * I_bf x3 = x3_num / x3_den x4",
"x1 = + k[\"k1\"] \\ + k[\"k2\"] * log10(I_bf) \\ + k[\"k3\"] *",
"# # Consider Eq 3 for the quantity E_600. # # First, we",
"# 1.2 cal/cm² × 4.184 J/cal = 5.0208 J/cm² # 50 / 12.552",
"4 \\ + k[\"k8\"] * I_bf ** 3 \\ + k[\"k9\"] * I_bf",
"<NAME> - https://www.penwatch.net # Licensed under the MIT License. Refer LICENSE.txt. import logging",
"# interpretation is not exact.) # # Sidenote 2: the funny number \"50/12.552\"",
"to exactly # 1.2 cal/cm². Noting that 1.2 cal/cm² * 4.184 J/cal =",
"x_2700) / 11.6) * (V_oc - 14.3)) + x_14300 # Eq 18, Eq",
"and CF is not required, as it would be if using Eq's 7,",
"distance D (mm) is simply # that __the energy E_600 falls off exponentially",
"for 900 ms, then 2 kA for 1,000 ms? # # Details: #",
"would you plug into Eq 7, when I_arc is 10 kA for 100",
"and 10, are pretty complicated. # # In particular, the equations for AFB",
"energy i.e. E_600 only. # Knowledge of T, G, I_arc, I_bf, and CF",
"E_600 = F * ( D ^ k12 ) # # Where: #",
"T: float, I_arc_600: float = None): # Implements equations 3, 4, 5, 6",
"V_oc == 14.3: k = table_5[c.EC] else: k = None x1 = 12.552",
"# Equation 2 return I_arc * (1 - 0.5 * c.VarCF) def E_AFB_intermediate(c:",
"for 100 ms, # then 5 kA for 900 ms, then 2 kA",
"Details: # ======== # # Consider Eq 3 for the quantity E_600. #",
"correction factor CF, # * D is the distance (mm) from the fault",
"# Once we know the value of F_600, we can calculate E_600' at",
"I_a def I_arc_min(c: Cubicle, I_arc: float): # Equation 2 return I_arc * (1",
"* (V_oc - 0.6)) / 2.1) if 0.600 < V_oc <= 2.7: return",
"** 3 \\ + k[\"k8\"] * I_bf ** 2 \\ + k[\"k9\"] *",
") # # Alternately, we can calculate the distance D' that will give",
"# First, we recognise that the relationship between incident energy E_600 (J/cm²) and",
"= intermediate_AFB_from_E(c, V_oc, E) return E, AFB def intermediate_E(c: Cubicle, V_oc: float, I_arc:",
"None): # Implements equations 3, 4, 5, 6 for \"intermediate incident energy\". assert",
"+ k[\"k2\"] * log10(c.G) if I_arc_600 is None: # HV case. Eqs 3,",
"\"intermediate arc flash boundary\", in a simpler way. # # Calculates the (intermediate)",
"In particular, the equations for AFB requires knowledge of time T, busbar gap",
"** 6 \\ + k[\"k5\"] * I_bf ** 5 \\ + k[\"k6\"] *",
"the relationship between incident energy E_600 (J/cm²) and distance D (mm) is simply",
"float, I_bf: float, T: float, I_arc_600: float = None): logging.warning( \"Function E_AFB_intermediate() is",
"log10(c.G) if I_arc_600 is None: # HV case. Eqs 3, 4, 5 x3_num",
"* I_bf ** 2 \\ + k[\"k9\"] * I_bf ** 1 \\ +",
"3 \\ + k[\"k9\"] * I_bf ** 2 \\ + k[\"k10\"] * I_bf",
"magic number 20 in Eq 7/8/9/10. # 1.2 cal/cm² × 4.184 J/cal =",
"(I_arc_600 ** 2) x3 = (0.6 ** 2 - V_oc ** 2) /",
"V_oc = c.V_oc # Eq 16, Eq 19, Eq 22 x1 = (((x_2700",
"x2 = k[\"k1\"] + k[\"k2\"] * log10(c.G) if I_arc_600 is None: # HV",
"multi-time-step arc flash calculations where the values of T, I_arc, and I_bf are",
"interpretation is not exact.) # # Sidenote 2: the funny number \"50/12.552\" in",
"E_600 / ( D ^ k12 ) # # Once we know the",
"V = 0.6 kV, with units of J/cm², # * F_600 is a",
"1 / k12 ) # # Finally note that the arc flash boundary,",
"k[\"k7\"] * I_bf ** 4 \\ + k[\"k8\"] * I_bf ** 3 \\",
"= 0.6 kV, with units of J/cm², # * F_600 is a (reasonably",
"table_4, table_5 def I_arc_intermediate(c: Cubicle, V_oc: float, I_bf: float): # Equation 1 assert",
"I_bf ** 5 \\ + k[\"k7\"] * I_bf ** 4 \\ + k[\"k8\"]",
"# In particular, the equations for AFB requires knowledge of time T, busbar",
"log10(c.D) # Equations 3, 4, 5, 6 E = x1 * 10 **",
"Eq 7, when I_arc is 10 kA for 100 ms, # then 5",
"* F_600 is a (reasonably complicated) function of time T, busbar gap G,",
"E_600'. # # D' = (E_600' / F_600) ^ ( 1 / k12",
"* I_bf ** 3 \\ + k[\"k8\"] * I_bf ** 2 \\ +",
"= intermediate_E(c, V_oc, I_arc, I_bf, T, I_arc_600) AFB = intermediate_AFB_from_E(c, V_oc, E) return",
"simply the special case where E_600' is equal to exactly # 1.2 cal/cm².",
"= x3_num / x3_den x4 = + k[\"k11\"] * log10(I_bf) \\ + k[\"k13\"]",
"* I_bf ** 6 \\ + k[\"k6\"] * I_bf ** 5 \\ +",
"= None x1 = 12.552 / 50 * T x2 = k[\"k1\"] +",
"k12 ) # # Alternately, we can calculate the distance D' that will",
"1 / (I_arc_600 ** 2) x3 = (0.6 ** 2 - V_oc **",
"that 1.2 cal/cm² * 4.184 J/cal = 5.0208 J/cm², # # AFB_600 =",
"Alternately, we can calculate the distance D' that will give a particular value",
"= ((x1 * (2.7 - V_oc)) / 2.1) + ((x2 * (V_oc -",
"\\ + k[\"k6\"] * I_bf ** 5 \\ + k[\"k7\"] * I_bf **",
"2 lines. F = E / (c.D ** k[\"k12\"]) AFB = (5.0208 /",
"I_arc_600 is None: # HV case. Eqs 3, 4, 5 x3_num = k[\"k3\"]",
"logging from math import log10, sqrt from ieee_1584.cubicle import Cubicle from ieee_1584.tables import",
"or (V_oc in (0.6, 2.7, 14.3,)) if V_oc <= 0.6: k = table_3[c.EC]",
"8, 9, 10, for \"intermediate arc flash boundary\", in a simpler way. #",
"\"intermediate incident energy\". assert (V_oc <= 0.6) or (V_oc in (0.6, 2.7, 14.3,))",
"x3 + x4 + x5) assert E >= 0 return E def intermediate_AFB_from_E(c:",
"equations 7, 8, 9, 10, for \"intermediate arc flash boundary\", in a simpler",
"D is the distance (mm) from the fault to where E_600 has been",
"AFB >= 0 return AFB def interpolate(c: Cubicle, x_600, x_2700, x_14300): V_oc =",
"G, the currents I_arc and I_bf, # and size correction factor CF. #",
"ms? # # Details: # ======== # # Consider Eq 3 for the",
"is None: # HV case. Eqs 3, 4, 5 x3_num = k[\"k3\"] *",
"float = None): logging.warning( \"Function E_AFB_intermediate() is deprecated. Use intermediate_E() and intermediate_AFB_from_E() instead.\")",
"arc flash boundary, i.e. AFB_600, from the incident energy i.e. E_600 only. #",
"log10(1 / c.CF) x5 = k[\"k12\"] * log10(c.D) # Equations 3, 4, 5,",
"= E_600 / ( D ^ k12 ) # # Once we know",
"** Sidenote: The physical meaning of the quantity \"F_600\" is that F_600 is,",
"I_bf ** 5 \\ + k[\"k6\"] * I_bf ** 4 \\ + k[\"k7\"]",
"3 \\ + k[\"k8\"] * I_bf ** 2 \\ + k[\"k9\"] * I_bf",
"* log10(c.G) x2 = + k[\"k4\"] * I_bf ** 6 \\ + k[\"k5\"]",
"know the value of F_600, we can calculate E_600' at any distance D'",
"+ x3 + x4 + x5) assert E >= 0 return E def",
"E = intermediate_E(c, V_oc, I_arc, I_bf, T, I_arc_600) AFB = intermediate_AFB_from_E(c, V_oc, E)",
"# * E_600 is the __intermediate__ arcing energy at V = 0.6 kV,",
"x3 elif V_oc > 2.7: return x2 def I_arc_final_LV(c: Cubicle, I_arc_600, I_bf): #",
"I_arc_600: float = None): logging.warning( \"Function E_AFB_intermediate() is deprecated. Use intermediate_E() and intermediate_AFB_from_E()",
"1,000 ms? # # Details: # ======== # # Consider Eq 3 for",
"time T, busbar gap G, the currents I_arc and I_bf, # and size",
"# 50 / 12.552 * 5.0208 = 20 (exact) assert (V_oc <= 0.6)",
"((x2 * (V_oc - 0.6)) / 2.1) if 0.600 < V_oc <= 2.7:",
"+ k[\"k7\"] * I_bf ** 4 \\ + k[\"k8\"] * I_bf ** 3",
"/ (I_arc_600 ** 2) x3 = (0.6 ** 2 - V_oc ** 2)",
"not required, as it would be if using Eq's 7, 8, 9, 10",
"arc flash boundary, AFB_600, is simply the special case where E_600' is equal",
"k12 != -2.00, so this # interpretation is not exact.) # # Sidenote",
"= (((x_2700 - x_600) / 2.1) * (V_oc - 2.7)) + x_2700 #",
"falls off exponentially with distance D__. If we rearrange Eq 3 using exponent",
"AFB_600, from the incident energy i.e. E_600 only. # Knowledge of T, G,",
"(exact) assert (V_oc <= 0.6) or (V_oc in (0.6, 2.7, 14.3,)) if V_oc",
"(V_oc - 0.6)) / 2.1) if 0.600 < V_oc <= 2.7: return x3",
"are # different for each time-step. What single value of I_arc would you",
"= (5.0208 / F) ** (1 / k[\"k12\"]) assert AFB >= 0 return",
"Eq 20, Eq 23 x2 = (((x_14300 - x_2700) / 11.6) * (V_oc",
"is a (reasonably complicated) function of time T, busbar gap G, currents I_arc",
"is that F_600 is, in some sense, the total amount of # energy",
"We can calculate what F_600** would have been: # # F_600 = E_600",
"the magic number 20 in Eq 7/8/9/10. # 1.2 cal/cm² × 4.184 J/cal",
"Equation 1 assert V_oc in (0.6, 2.7, 14.3,) k = table_1[(c.EC, V_oc,)] x1",
"import Cubicle from ieee_1584.tables import table_1, table_3, table_4, table_5 def I_arc_intermediate(c: Cubicle, V_oc:",
"https://www.penwatch.net # Licensed under the MIT License. Refer LICENSE.txt. import logging from math",
"(1 / k[\"k12\"]) assert AFB >= 0 return AFB def interpolate(c: Cubicle, x_600,",
"x3 = ((x1 * (2.7 - V_oc)) / 2.1) + ((x2 * (V_oc",
"x2 return I_a def I_arc_min(c: Cubicle, I_arc: float): # Equation 2 return I_arc",
"energy\". assert (V_oc <= 0.6) or (V_oc in (0.6, 2.7, 14.3,)) if V_oc",
"# HV case. Eqs 3, 4, 5 x3_num = k[\"k3\"] * I_arc else:",
"= (10 ** x1) * x2 return I_a def I_arc_min(c: Cubicle, I_arc: float):",
"k[\"k8\"] * I_bf ** 2 \\ + k[\"k9\"] * I_bf ** 1 \\",
"** 1 \\ + k[\"k10\"] I_a = (10 ** x1) * x2 return",
"kA for 100 ms, # then 5 kA for 900 ms, then 2",
"E, AFB def intermediate_E(c: Cubicle, V_oc: float, I_arc: float, I_bf: float, T: float,",
"D__. If we rearrange Eq 3 using exponent identities # we can simplify",
"kA for 1,000 ms? # # Details: # ======== # # Consider Eq",
"= (E_600' / F_600) ^ ( 1 / k12 ) # # Finally",
"F = E / (c.D ** k[\"k12\"]) AFB = (5.0208 / F) **",
"14.3)) + x_14300 # Eq 18, Eq 21, Eq 24 x3 = ((x1",
"if I_arc_600 is None: # HV case. Eqs 3, 4, 5 x3_num =",
"G, I_arc, I_bf, and CF is not required, as it would be if",
"Eq 19, Eq 22 x1 = (((x_2700 - x_600) / 2.1) * (V_oc",
"+ k[\"k10\"] I_a = (10 ** x1) * x2 return I_a def I_arc_min(c:",
"V_oc > 2.7: return x2 def I_arc_final_LV(c: Cubicle, I_arc_600, I_bf): # Equation 25",
"some sense, the total amount of # energy released (i.e. Joules). (The distribution",
"x2 = (((x_14300 - x_2700) / 11.6) * (V_oc - 14.3)) + x_14300",
"pretty complicated. # # In particular, the equations for AFB requires knowledge of",
"k[\"k3\"] * log10(c.G) x2 = + k[\"k4\"] * I_bf ** 6 \\ +",
"E_600 only. # Knowledge of T, G, I_arc, I_bf, and CF is not",
"This is a problem when doing multi-time-step arc flash calculations where the values",
"18, Eq 21, Eq 24 x3 = ((x1 * (2.7 - V_oc)) /",
"\"50/12.552\" in Eq 3/4/5/6 turns into the magic number 20 in Eq 7/8/9/10.",
"3/4/5/6 turns into the magic number 20 in Eq 7/8/9/10. # 1.2 cal/cm²",
"1 assert V_oc in (0.6, 2.7, 14.3,) k = table_1[(c.EC, V_oc,)] x1 =",
"T x2 = k[\"k1\"] + k[\"k2\"] * log10(c.G) if I_arc_600 is None: #",
"This is useful for multi-time-step calculations where there is no singular value of",
"\\ + k[\"k13\"] * log10(I_arc) \\ + log10(1 / c.CF) x5 = k[\"k12\"]",
"Joules). (The distribution of energy is not isotropic, i.e. k12 != -2.00, so",
"If we rearrange Eq 3 using exponent identities # we can simplify to:",
"else: k = None # After all the explanation, calculation of the (intermediate)",
"(2.7 - V_oc)) / 2.1) + ((x2 * (V_oc - 0.6)) / 2.1)",
"c.V_oc # Eq 16, Eq 19, Eq 22 x1 = (((x_2700 - x_600)",
"that __the energy E_600 falls off exponentially with distance D__. If we rearrange",
"distance D') = F_600 * ( D' ^ k12 ) # # Alternately,",
"Equation 2 return I_arc * (1 - 0.5 * c.VarCF) def E_AFB_intermediate(c: Cubicle,",
"0 return E def intermediate_AFB_from_E(c: Cubicle, V_oc: float, E: float): # Implements equations",
"I_bf x3 = x3_num / x3_den x4 = + k[\"k11\"] * log10(I_bf) \\",
"** (1 / k[\"k12\"]) assert AFB >= 0 return AFB def interpolate(c: Cubicle,",
"** 2) / (0.6 ** 2 * I_bf ** 2) x4 = sqrt(x1",
"correction factor CF. # # This is a problem when doing multi-time-step arc",
"** 2 - V_oc ** 2) / (0.6 ** 2 * I_bf **",
"Calculates the (intermediate) arc flash boundary, i.e. AFB_600, from the incident energy i.e.",
"k[\"k12\"] * log10(c.D) # Equations 3, 4, 5, 6 E = x1 *",
"6 E = x1 * 10 ** (x2 + x3 + x4 +",
"k[\"k5\"] * I_bf ** 6 \\ + k[\"k6\"] * I_bf ** 5 \\",
"useful for multi-time-step calculations where there is no singular value of T, I_arc,",
"cal/cm² * 4.184 J/cal = 5.0208 J/cm², # # AFB_600 = (5.0208 /",
") # # ** Sidenote: The physical meaning of the quantity \"F_600\" is",
"17, Eq 20, Eq 23 x2 = (((x_14300 - x_2700) / 11.6) *",
"+ k[\"k13\"] * log10(I_arc) \\ + log10(1 / c.CF) x5 = k[\"k12\"] *",
"LICENSE.txt. import logging from math import log10, sqrt from ieee_1584.cubicle import Cubicle from",
"* I_bf ** 1 \\ + k[\"k10\"] I_a = (10 ** x1) *",
"+ k[\"k10\"] * I_bf x3 = x3_num / x3_den x4 = + k[\"k11\"]",
"E / (c.D ** k[\"k12\"]) AFB = (5.0208 / F) ** (1 /",
"* 10 ** (x2 + x3 + x4 + x5) assert E >=",
"10, are pretty complicated. # # In particular, the equations for AFB requires",
"into Eq 7, when I_arc is 10 kA for 100 ms, # then",
"((x1 * (2.7 - V_oc)) / 2.1) + ((x2 * (V_oc - 0.6))",
"+ x4 + x5) assert E >= 0 return E def intermediate_AFB_from_E(c: Cubicle,",
") # # Once we know the value of F_600, we can calculate",
"equations for AFB requires knowledge of time T, busbar gap G, the currents",
"I_bf are # different for each time-step. What single value of I_arc would",
"+ k[\"k8\"] * I_bf ** 3 \\ + k[\"k9\"] * I_bf ** 2",
"V_oc <= 0.6: k = table_3[c.EC] elif V_oc == 2.7: k = table_4[c.EC]",
"T, busbar gap G, currents I_arc and I_bf, and size # correction factor",
"25 V_oc = c.V_oc x1 = (0.6 / V_oc) ** 2 x2 =",
"** 2) x3 = (0.6 ** 2 - V_oc ** 2) / (0.6",
"distance D' that will give a particular value of E_600'. # # D'",
"V_oc = c.V_oc x1 = (0.6 / V_oc) ** 2 x2 = 1",
"/ F_600) ^ ( 1 / k12 ) # # Finally note that",
"a constant __\"distance exponent\"__ from Table 3, Table 4, or Table 5. #",
"I_arc, I_bf, T, I_arc_600) AFB = intermediate_AFB_from_E(c, V_oc, E) return E, AFB def",
"in (0.6, 2.7, 14.3,) k = table_1[(c.EC, V_oc,)] x1 = + k[\"k1\"] \\",
"of time T, busbar gap G, the currents I_arc and I_bf, # and",
"simplify to: # # E_600 = F * ( D ^ k12 )",
"x4 = + k[\"k11\"] * log10(I_bf) \\ + k[\"k13\"] * log10(I_arc) \\ +",
"AFB is simply 2 lines. F = E / (c.D ** k[\"k12\"]) AFB",
"- V_oc)) / 2.1) + ((x2 * (V_oc - 0.6)) / 2.1) if",
"# F_600 = E_600 / ( D ^ k12 ) # # Once",
"Cubicle, V_oc: float, I_arc: float, I_bf: float, T: float, I_arc_600: float = None):",
"units of J/cm², # * F_600 is a (reasonably complicated) function of time",
"/ k[\"k12\"]) assert AFB >= 0 return AFB def interpolate(c: Cubicle, x_600, x_2700,",
"IEEE 1584-2018 formulas for arc flash boundary (AFB), i.e. eq's 7, 8, 9,",
"D' that will give a particular value of E_600'. # # D' =",
"V_oc <= 2.7: return x3 elif V_oc > 2.7: return x2 def I_arc_final_LV(c:",
"2 \\ + k[\"k9\"] * I_bf ** 1 \\ + k[\"k10\"] I_a =",
"* k12 is a constant __\"distance exponent\"__ from Table 3, Table 4, or",
"x3_num = k[\"k3\"] * I_arc else: # LV case. Eq 6. x3_num =",
"the equations for AFB requires knowledge of time T, busbar gap G, the",
"E_600' (at distance D') = F_600 * ( D' ^ k12 ) #",
"14.3,)) if V_oc <= 0.6: k = table_3[c.EC] elif V_oc == 2.7: k",
"J/cm² # 50 / 12.552 * 5.0208 = 20 (exact) assert (V_oc <=",
"k[\"k13\"] * log10(I_arc) \\ + log10(1 / c.CF) x5 = k[\"k12\"] * log10(c.D)",
"= F * ( D ^ k12 ) # # Where: # *",
"return I_arc * (1 - 0.5 * c.VarCF) def E_AFB_intermediate(c: Cubicle, V_oc: float,",
"the funny number \"50/12.552\" in Eq 3/4/5/6 turns into the magic number 20",
"incident energy\". assert (V_oc <= 0.6) or (V_oc in (0.6, 2.7, 14.3,)) if",
"^ k12 ) # # Alternately, we can calculate the distance D' that",
"I_arc_600 x3_den = + k[\"k4\"] * I_bf ** 7 \\ + k[\"k5\"] *",
"(V_oc <= 0.6) or (V_oc in (0.6, 2.7, 14.3,)) if V_oc <= 0.6:",
"size correction factor CF. # # This is a problem when doing multi-time-step",
"1.2 cal/cm² × 4.184 J/cal = 5.0208 J/cm² # 50 / 12.552 *",
"def intermediate_E(c: Cubicle, V_oc: float, I_arc: float, I_bf: float, T: float, I_arc_600: float",
"particular, the equations for AFB requires knowledge of time T, busbar gap G,",
"3, 4, 5, 6 for \"intermediate incident energy\". assert (V_oc <= 0.6) or",
"singular value of T, I_arc, or I_bf. # # Motivation: # =========== #",
"11.6) * (V_oc - 14.3)) + x_14300 # Eq 18, Eq 21, Eq",
"= (0.6 / V_oc) ** 2 x2 = 1 / (I_arc_600 ** 2)",
"of energy is not isotropic, i.e. k12 != -2.00, so this # interpretation",
"= None): # Implements equations 3, 4, 5, 6 for \"intermediate incident energy\".",
"/ k12 ) # # ** Sidenote: The physical meaning of the quantity",
"elif V_oc == 14.3: k = table_5[c.EC] else: k = None # After",
"def E_AFB_intermediate(c: Cubicle, V_oc: float, I_arc: float, I_bf: float, T: float, I_arc_600: float",
"0.6) or (V_oc in (0.6, 2.7, 14.3,)) if V_oc <= 0.6: k =",
"900 ms, then 2 kA for 1,000 ms? # # Details: # ========",
"if 0.600 < V_oc <= 2.7: return x3 elif V_oc > 2.7: return",
"arc flash calculations where the values of T, I_arc, and I_bf are #",
"only. # Knowledge of T, G, I_arc, I_bf, and CF is not required,",
"the currents I_arc and I_bf, # and size correction factor CF. # #",
"(The distribution of energy is not isotropic, i.e. k12 != -2.00, so this",
"I_arc and I_bf, # and size correction factor CF. # # This is",
"F_600 = E_600 / ( D ^ k12 ) # # Once we",
"the total amount of # energy released (i.e. Joules). (The distribution of energy",
"6. x3_num = k[\"k3\"] * I_arc_600 x3_den = + k[\"k4\"] * I_bf **",
"from the fault to where E_600 has been measured. # * k12 is",
"__\"distance exponent\"__ from Table 3, Table 4, or Table 5. # # We",
"* 4.184 J/cal = 5.0208 J/cm², # # AFB_600 = (5.0208 / F_600)",
"is useful for multi-time-step calculations where there is no singular value of T,",
"x_600, x_2700, x_14300): V_oc = c.V_oc # Eq 16, Eq 19, Eq 22",
"= table_1[(c.EC, V_oc,)] x1 = + k[\"k1\"] \\ + k[\"k2\"] * log10(I_bf) \\",
"= + k[\"k11\"] * log10(I_bf) \\ + k[\"k13\"] * log10(I_arc) \\ + log10(1",
"float): # Implements equations 7, 8, 9, 10, for \"intermediate arc flash boundary\",",
"The IEEE 1584-2018 formulas for arc flash boundary (AFB), i.e. eq's 7, 8,",
"2: the funny number \"50/12.552\" in Eq 3/4/5/6 turns into the magic number",
"instead.\") E = intermediate_E(c, V_oc, I_arc, I_bf, T, I_arc_600) AFB = intermediate_AFB_from_E(c, V_oc,",
"# Details: # ======== # # Consider Eq 3 for the quantity E_600.",
"<= 2.7: return x3 elif V_oc > 2.7: return x2 def I_arc_final_LV(c: Cubicle,",
"or Table 5. # # We can calculate what F_600** would have been:",
"= table_4[c.EC] elif V_oc == 14.3: k = table_5[c.EC] else: k = None",
"+ k[\"k11\"] * log10(I_bf) \\ + k[\"k13\"] * log10(I_arc) \\ + log10(1 /",
"+ k[\"k9\"] * I_bf ** 1 \\ + k[\"k10\"] I_a = (10 **",
"k12 ) # # Where: # * E_600 is the __intermediate__ arcing energy",
"E_600. # # First, we recognise that the relationship between incident energy E_600",
"3 for the quantity E_600. # # First, we recognise that the relationship",
"way. # # Calculates the (intermediate) arc flash boundary, i.e. AFB_600, from the",
"for AFB requires knowledge of time T, busbar gap G, the currents I_arc",
"First, we recognise that the relationship between incident energy E_600 (J/cm²) and distance",
"!= -2.00, so this # interpretation is not exact.) # # Sidenote 2:",
"# Eq 18, Eq 21, Eq 24 x3 = ((x1 * (2.7 -",
"** 2 x2 = 1 / (I_arc_600 ** 2) x3 = (0.6 **",
"of the quantity \"F_600\" is that F_600 is, in some sense, the total",
"plug into Eq 7, when I_arc is 10 kA for 100 ms, #",
"(((x_14300 - x_2700) / 11.6) * (V_oc - 14.3)) + x_14300 # Eq",
"float, I_bf: float, T: float, I_arc_600: float = None): # Implements equations 3,",
"2.7: k = table_4[c.EC] elif V_oc == 14.3: k = table_5[c.EC] else: k",
"k[\"k2\"] * log10(c.G) if I_arc_600 is None: # HV case. Eqs 3, 4,",
"i.e. eq's 7, 8, 9, and 10, are pretty complicated. # # In",
") # # Where: # * E_600 is the __intermediate__ arcing energy at",
"has been measured. # * k12 is a constant __\"distance exponent\"__ from Table",
"intermediate_AFB_from_E(c, V_oc, E) return E, AFB def intermediate_E(c: Cubicle, V_oc: float, I_arc: float,",
"the incident energy i.e. E_600 only. # Knowledge of T, G, I_arc, I_bf,",
"incident energy i.e. E_600 only. # Knowledge of T, G, I_arc, I_bf, and",
"like: # # E_600' (at distance D') = F_600 * ( D' ^",
"# that __the energy E_600 falls off exponentially with distance D__. If we",
"(E_600' / F_600) ^ ( 1 / k12 ) # # Finally note",
"Noting that 1.2 cal/cm² * 4.184 J/cal = 5.0208 J/cm², # # AFB_600",
"4, 5 x3_num = k[\"k3\"] * I_arc else: # LV case. Eq 6.",
"required, as it would be if using Eq's 7, 8, 9, 10 directly.",
"ms, then 2 kA for 1,000 ms? # # Details: # ======== #",
"float = None): # Implements equations 3, 4, 5, 6 for \"intermediate incident",
"is no singular value of T, I_arc, or I_bf. # # Motivation: #",
"x3 = (0.6 ** 2 - V_oc ** 2) / (0.6 ** 2",
"/ ( D ^ k12 ) # # Once we know the value",
"physical meaning of the quantity \"F_600\" is that F_600 is, in some sense,",
"I_bf: float, T: float, I_arc_600: float = None): logging.warning( \"Function E_AFB_intermediate() is deprecated.",
"= None): logging.warning( \"Function E_AFB_intermediate() is deprecated. Use intermediate_E() and intermediate_AFB_from_E() instead.\") E",
"\\ + log10(1 / c.CF) x5 = k[\"k12\"] * log10(c.D) # Equations 3,",
"so this # interpretation is not exact.) # # Sidenote 2: the funny",
"or I_bf. # # Motivation: # =========== # # The IEEE 1584-2018 formulas",
"( D ^ k12 ) # # Where: # * E_600 is the",
"+ k[\"k6\"] * I_bf ** 4 \\ + k[\"k7\"] * I_bf ** 3",
"(mm) from the fault to where E_600 has been measured. # * k12",
"D ^ k12 ) # # Once we know the value of F_600,",
"calculate E_600' at any distance D' we like: # # E_600' (at distance",
">= 0 return AFB def interpolate(c: Cubicle, x_600, x_2700, x_14300): V_oc = c.V_oc",
"k = None # After all the explanation, calculation of the (intermediate) AFB",
"float, I_bf: float): # Equation 1 assert V_oc in (0.6, 2.7, 14.3,) k",
"/ 11.6) * (V_oc - 14.3)) + x_14300 # Eq 18, Eq 21,",
"Consider Eq 3 for the quantity E_600. # # First, we recognise that",
"I_arc_min(c: Cubicle, I_arc: float): # Equation 2 return I_arc * (1 - 0.5",
"table_4[c.EC] elif V_oc == 14.3: k = table_5[c.EC] else: k = None x1",
"Sidenote 2: the funny number \"50/12.552\" in Eq 3/4/5/6 turns into the magic",
"F_600** would have been: # # F_600 = E_600 / ( D ^",
"single value of I_arc would you plug into Eq 7, when I_arc is",
"for arc flash boundary (AFB), i.e. eq's 7, 8, 9, and 10, are",
"\\ + k[\"k2\"] * log10(I_bf) \\ + k[\"k3\"] * log10(c.G) x2 = +",
"off exponentially with distance D__. If we rearrange Eq 3 using exponent identities",
"in (0.6, 2.7, 14.3,)) if V_oc <= 0.6: k = table_3[c.EC] elif V_oc",
"x3_num / x3_den x4 = + k[\"k11\"] * log10(I_bf) \\ + k[\"k13\"] *",
"I_bf ** 3 \\ + k[\"k9\"] * I_bf ** 2 \\ + k[\"k10\"]",
"return I_a def I_arc_min(c: Cubicle, I_arc: float): # Equation 2 return I_arc *",
"with distance D__. If we rearrange Eq 3 using exponent identities # we",
"5, 6 for \"intermediate incident energy\". assert (V_oc <= 0.6) or (V_oc in",
"k[\"k7\"] * I_bf ** 3 \\ + k[\"k8\"] * I_bf ** 2 \\",
"I_arc, I_bf, and CF is not required, as it would be if using",
"value of T, I_arc, or I_bf. # # Motivation: # =========== # #",
"I_bf, and size # correction factor CF, # * D is the distance",
"I_arc is 10 kA for 100 ms, # then 5 kA for 900",
"all the explanation, calculation of the (intermediate) AFB is simply 2 lines. F",
"0.6)) / 2.1) if 0.600 < V_oc <= 2.7: return x3 elif V_oc",
"I_bf ** 4 \\ + k[\"k8\"] * I_bf ** 3 \\ + k[\"k9\"]",
"k = table_5[c.EC] else: k = None # After all the explanation, calculation",
"* log10(I_bf) \\ + k[\"k3\"] * log10(c.G) x2 = + k[\"k4\"] * I_bf",
"float, I_arc_600: float = None): # Implements equations 3, 4, 5, 6 for",
"Motivation: # =========== # # The IEEE 1584-2018 formulas for arc flash boundary",
"* I_bf ** 3 \\ + k[\"k9\"] * I_bf ** 2 \\ +",
"I_bf ** 2 \\ + k[\"k10\"] * I_bf x3 = x3_num / x3_den",
"doing multi-time-step arc flash calculations where the values of T, I_arc, and I_bf",
"Eq 3 using exponent identities # we can simplify to: # # E_600",
"busbar gap G, currents I_arc and I_bf, and size # correction factor CF,",
"8, 9, 10 directly. # This is useful for multi-time-step calculations where there",
"= 12.552 / 50 * T x2 = k[\"k1\"] + k[\"k2\"] * log10(c.G)",
"* I_bf ** 2 \\ + k[\"k10\"] * I_bf x3 = x3_num /",
"k[\"k2\"] * log10(I_bf) \\ + k[\"k3\"] * log10(c.G) x2 = + k[\"k4\"] *",
"it would be if using Eq's 7, 8, 9, 10 directly. # This",
"- 0.5 * c.VarCF) def E_AFB_intermediate(c: Cubicle, V_oc: float, I_arc: float, I_bf: float,",
"<= 0.6) or (V_oc in (0.6, 2.7, 14.3,)) if V_oc <= 0.6: k",
"I_arc else: # LV case. Eq 6. x3_num = k[\"k3\"] * I_arc_600 x3_den",
"is simply # that __the energy E_600 falls off exponentially with distance D__.",
"# * F_600 is a (reasonably complicated) function of time T, busbar gap",
"constant __\"distance exponent\"__ from Table 3, Table 4, or Table 5. # #",
"x4 + x5) assert E >= 0 return E def intermediate_AFB_from_E(c: Cubicle, V_oc:",
"6 \\ + k[\"k6\"] * I_bf ** 5 \\ + k[\"k7\"] * I_bf",
"# # E_600' (at distance D') = F_600 * ( D' ^ k12",
"where E_600' is equal to exactly # 1.2 cal/cm². Noting that 1.2 cal/cm²",
"intermediate_AFB_from_E(c: Cubicle, V_oc: float, E: float): # Implements equations 7, 8, 9, 10,",
"i.e. AFB_600, from the incident energy i.e. E_600 only. # Knowledge of T,",
"I_arc: float, I_bf: float, T: float, I_arc_600: float = None): # Implements equations",
"# Implements equations 3, 4, 5, 6 for \"intermediate incident energy\". assert (V_oc",
"# # E_600 = F * ( D ^ k12 ) # #",
"+ x_14300 # Eq 18, Eq 21, Eq 24 x3 = ((x1 *",
"V_oc == 2.7: k = table_4[c.EC] elif V_oc == 14.3: k = table_5[c.EC]",
"/ V_oc) ** 2 x2 = 1 / (I_arc_600 ** 2) x3 =",
"between incident energy E_600 (J/cm²) and distance D (mm) is simply # that",
"Eq 3 for the quantity E_600. # # First, we recognise that the",
"# we can simplify to: # # E_600 = F * ( D",
"2.1) if 0.600 < V_oc <= 2.7: return x3 elif V_oc > 2.7:",
"\\ + k[\"k6\"] * I_bf ** 4 \\ + k[\"k7\"] * I_bf **",
"* I_bf ** 7 \\ + k[\"k5\"] * I_bf ** 6 \\ +",
"Cubicle from ieee_1584.tables import table_1, table_3, table_4, table_5 def I_arc_intermediate(c: Cubicle, V_oc: float,",
"elif V_oc == 2.7: k = table_4[c.EC] elif V_oc == 14.3: k =",
"in some sense, the total amount of # energy released (i.e. Joules). (The",
"table_3[c.EC] elif V_oc == 2.7: k = table_4[c.EC] elif V_oc == 14.3: k",
"intermediate_E() and intermediate_AFB_from_E() instead.\") E = intermediate_E(c, V_oc, I_arc, I_bf, T, I_arc_600) AFB",
"multi-time-step calculations where there is no singular value of T, I_arc, or I_bf.",
"return E def intermediate_AFB_from_E(c: Cubicle, V_oc: float, E: float): # Implements equations 7,",
"that will give a particular value of E_600'. # # D' = (E_600'",
"k[\"k10\"] I_a = (10 ** x1) * x2 return I_a def I_arc_min(c: Cubicle,",
"and I_bf, and size # correction factor CF, # * D is the",
"released (i.e. Joules). (The distribution of energy is not isotropic, i.e. k12 !=",
"^ ( 1 / k12 ) # # Finally note that the arc",
"boundary (AFB), i.e. eq's 7, 8, 9, and 10, are pretty complicated. #",
"# LV case. Eq 6. x3_num = k[\"k3\"] * I_arc_600 x3_den = +",
"is not exact.) # # Sidenote 2: the funny number \"50/12.552\" in Eq",
"20 in Eq 7/8/9/10. # 1.2 cal/cm² × 4.184 J/cal = 5.0208 J/cm²",
"complicated) function of time T, busbar gap G, currents I_arc and I_bf, and",
"of T, G, I_arc, I_bf, and CF is not required, as it would",
"( 1 / k12 ) # # ** Sidenote: The physical meaning of",
"== 2.7: k = table_4[c.EC] elif V_oc == 14.3: k = table_5[c.EC] else:",
"from Table 3, Table 4, or Table 5. # # We can calculate",
"5 x3_num = k[\"k3\"] * I_arc else: # LV case. Eq 6. x3_num",
"- V_oc ** 2) / (0.6 ** 2 * I_bf ** 2) x4",
"particular value of E_600'. # # D' = (E_600' / F_600) ^ (",
"is the __intermediate__ arcing energy at V = 0.6 kV, with units of",
"factor CF, # * D is the distance (mm) from the fault to",
"# E_600 = F * ( D ^ k12 ) # # Where:",
"x3_den x4 = + k[\"k11\"] * log10(I_bf) \\ + k[\"k13\"] * log10(I_arc) \\",
"def intermediate_AFB_from_E(c: Cubicle, V_oc: float, E: float): # Implements equations 7, 8, 9,",
"calculations where there is no singular value of T, I_arc, or I_bf. #",
"+ k[\"k9\"] * I_bf ** 2 \\ + k[\"k10\"] * I_bf x3 =",
"* 5.0208 = 20 (exact) assert (V_oc <= 0.6) or (V_oc in (0.6,",
"# We can calculate what F_600** would have been: # # F_600 =",
"License. Refer LICENSE.txt. import logging from math import log10, sqrt from ieee_1584.cubicle import",
"** (x2 + x3 + x4 + x5) assert E >= 0 return",
"is a constant __\"distance exponent\"__ from Table 3, Table 4, or Table 5.",
"the __intermediate__ arcing energy at V = 0.6 kV, with units of J/cm²,",
"F * ( D ^ k12 ) # # Where: # * E_600",
"V_oc) ** 2 x2 = 1 / (I_arc_600 ** 2) x3 = (0.6",
"simply # that __the energy E_600 falls off exponentially with distance D__. If",
"Eqs 3, 4, 5 x3_num = k[\"k3\"] * I_arc else: # LV case.",
"directly. # This is useful for multi-time-step calculations where there is no singular",
"# Finally note that the arc flash boundary, AFB_600, is simply the special",
"Knowledge of T, G, I_arc, I_bf, and CF is not required, as it",
"24 x3 = ((x1 * (2.7 - V_oc)) / 2.1) + ((x2 *",
"and I_bf, # and size correction factor CF. # # This is a",
"J/cm², # # AFB_600 = (5.0208 / F_600) ^ ( 1 / k12",
"(5.0208 / F_600) ^ ( 1 / k12 ) # # ** Sidenote:",
"# This is useful for multi-time-step calculations where there is no singular value",
"I_bf, T, I_arc_600) AFB = intermediate_AFB_from_E(c, V_oc, E) return E, AFB def intermediate_E(c:",
"is the distance (mm) from the fault to where E_600 has been measured.",
"elif V_oc > 2.7: return x2 def I_arc_final_LV(c: Cubicle, I_arc_600, I_bf): # Equation",
"Refer LICENSE.txt. import logging from math import log10, sqrt from ieee_1584.cubicle import Cubicle",
"x5 = k[\"k12\"] * log10(c.D) # Equations 3, 4, 5, 6 E =",
"(0.6, 2.7, 14.3,) k = table_1[(c.EC, V_oc,)] x1 = + k[\"k1\"] \\ +",
"k[\"k3\"] * I_arc else: # LV case. Eq 6. x3_num = k[\"k3\"] *",
"= k[\"k3\"] * I_arc_600 x3_den = + k[\"k4\"] * I_bf ** 7 \\",
"I_arc, or I_bf. # # Motivation: # =========== # # The IEEE 1584-2018",
"* ( D' ^ k12 ) # # Alternately, we can calculate the",
"in Eq 3/4/5/6 turns into the magic number 20 in Eq 7/8/9/10. #",
"calculation of the (intermediate) AFB is simply 2 lines. F = E /",
"2.1) * (V_oc - 2.7)) + x_2700 # Eq 17, Eq 20, Eq",
"* I_bf ** 2) x4 = sqrt(x1 * (x2 - x3)) return 1",
"# ======== # # Consider Eq 3 for the quantity E_600. # #",
"Eq 6. x3_num = k[\"k3\"] * I_arc_600 x3_den = + k[\"k4\"] * I_bf",
"5 kA for 900 ms, then 2 kA for 1,000 ms? # #",
"Where: # * E_600 is the __intermediate__ arcing energy at V = 0.6",
"D (mm) is simply # that __the energy E_600 falls off exponentially with",
"+ k[\"k8\"] * I_bf ** 2 \\ + k[\"k9\"] * I_bf ** 1",
"E_600 is the __intermediate__ arcing energy at V = 0.6 kV, with units",
"(reasonably complicated) function of time T, busbar gap G, currents I_arc and I_bf,",
"7, 8, 9, 10 directly. # This is useful for multi-time-step calculations where",
"* log10(I_bf) \\ + k[\"k13\"] * log10(I_arc) \\ + log10(1 / c.CF) x5",
"(V_oc - 14.3)) + x_14300 # Eq 18, Eq 21, Eq 24 x3",
"in Eq 7/8/9/10. # 1.2 cal/cm² × 4.184 J/cal = 5.0208 J/cm² #",
"I_bf ** 7 \\ + k[\"k5\"] * I_bf ** 6 \\ + k[\"k6\"]",
"the quantity E_600. # # First, we recognise that the relationship between incident",
"x2 def I_arc_final_LV(c: Cubicle, I_arc_600, I_bf): # Equation 25 V_oc = c.V_oc x1",
"5.0208 J/cm² # 50 / 12.552 * 5.0208 = 20 (exact) assert (V_oc",
"= (0.6 ** 2 - V_oc ** 2) / (0.6 ** 2 *",
"ieee_1584.tables import table_1, table_3, table_4, table_5 def I_arc_intermediate(c: Cubicle, V_oc: float, I_bf: float):",
"1.2 cal/cm² * 4.184 J/cal = 5.0208 J/cm², # # AFB_600 = (5.0208",
"k[\"k12\"]) AFB = (5.0208 / F) ** (1 / k[\"k12\"]) assert AFB >=",
"x5) assert E >= 0 return E def intermediate_AFB_from_E(c: Cubicle, V_oc: float, E:",
"deprecated. Use intermediate_E() and intermediate_AFB_from_E() instead.\") E = intermediate_E(c, V_oc, I_arc, I_bf, T,",
"9, 10 directly. # This is useful for multi-time-step calculations where there is",
"under the MIT License. Refer LICENSE.txt. import logging from math import log10, sqrt",
"the (intermediate) arc flash boundary, i.e. AFB_600, from the incident energy i.e. E_600",
"E_600 has been measured. # * k12 is a constant __\"distance exponent\"__ from",
"I_bf. # # Motivation: # =========== # # The IEEE 1584-2018 formulas for",
"V_oc ** 2) / (0.6 ** 2 * I_bf ** 2) x4 =",
"** 3 \\ + k[\"k9\"] * I_bf ** 2 \\ + k[\"k10\"] *",
"/ 2.1) if 0.600 < V_oc <= 2.7: return x3 elif V_oc >",
"* (V_oc - 14.3)) + x_14300 # Eq 18, Eq 21, Eq 24",
"( 1 / k12 ) # # Finally note that the arc flash",
"table_4[c.EC] elif V_oc == 14.3: k = table_5[c.EC] else: k = None #",
"I_bf, and CF is not required, as it would be if using Eq's",
"flash boundary, AFB_600, is simply the special case where E_600' is equal to",
"c.VarCF) def E_AFB_intermediate(c: Cubicle, V_oc: float, I_arc: float, I_bf: float, T: float, I_arc_600:",
"1.2 cal/cm². Noting that 1.2 cal/cm² * 4.184 J/cal = 5.0208 J/cm², #",
"# # Details: # ======== # # Consider Eq 3 for the quantity",
"CF. # # This is a problem when doing multi-time-step arc flash calculations",
"Eq 18, Eq 21, Eq 24 x3 = ((x1 * (2.7 - V_oc))",
"# Copyright 2022, <NAME> - https://www.penwatch.net # Licensed under the MIT License. Refer",
"I_bf ** 6 \\ + k[\"k6\"] * I_bf ** 5 \\ + k[\"k7\"]",
"I_bf): # Equation 25 V_oc = c.V_oc x1 = (0.6 / V_oc) **",
"(V_oc in (0.6, 2.7, 14.3,)) if V_oc <= 0.6: k = table_3[c.EC] elif",
"__intermediate__ arcing energy at V = 0.6 kV, with units of J/cm², #",
"LV case. Eq 6. x3_num = k[\"k3\"] * I_arc_600 x3_den = + k[\"k4\"]",
"Eq 21, Eq 24 x3 = ((x1 * (2.7 - V_oc)) / 2.1)",
"======== # # Consider Eq 3 for the quantity E_600. # # First,",
"is equal to exactly # 1.2 cal/cm². Noting that 1.2 cal/cm² * 4.184",
"AFB = intermediate_AFB_from_E(c, V_oc, E) return E, AFB def intermediate_E(c: Cubicle, V_oc: float,",
"# # F_600 = E_600 / ( D ^ k12 ) # #",
"fault to where E_600 has been measured. # * k12 is a constant",
"special case where E_600' is equal to exactly # 1.2 cal/cm². Noting that",
"1 / k12 ) # # ** Sidenote: The physical meaning of the",
"# # The IEEE 1584-2018 formulas for arc flash boundary (AFB), i.e. eq's",
"4.184 J/cal = 5.0208 J/cm², # # AFB_600 = (5.0208 / F_600) ^",
"I_arc: float): # Equation 2 return I_arc * (1 - 0.5 * c.VarCF)",
"formulas for arc flash boundary (AFB), i.e. eq's 7, 8, 9, and 10,",
"# # Calculates the (intermediate) arc flash boundary, i.e. AFB_600, from the incident",
"0 return AFB def interpolate(c: Cubicle, x_600, x_2700, x_14300): V_oc = c.V_oc #",
"/ 2.1) * (V_oc - 2.7)) + x_2700 # Eq 17, Eq 20,",
"will give a particular value of E_600'. # # D' = (E_600' /",
"funny number \"50/12.552\" in Eq 3/4/5/6 turns into the magic number 20 in",
"float): # Equation 2 return I_arc * (1 - 0.5 * c.VarCF) def",
"Sidenote: The physical meaning of the quantity \"F_600\" is that F_600 is, in",
"Eq 17, Eq 20, Eq 23 x2 = (((x_14300 - x_2700) / 11.6)",
"Eq 7/8/9/10. # 1.2 cal/cm² × 4.184 J/cal = 5.0208 J/cm² # 50",
"i.e. k12 != -2.00, so this # interpretation is not exact.) # #",
"** x1) * x2 return I_a def I_arc_min(c: Cubicle, I_arc: float): # Equation",
"def I_arc_intermediate(c: Cubicle, V_oc: float, I_bf: float): # Equation 1 assert V_oc in",
"k[\"k4\"] * I_bf ** 6 \\ + k[\"k5\"] * I_bf ** 5 \\",
"** 5 \\ + k[\"k6\"] * I_bf ** 4 \\ + k[\"k7\"] *",
"16, Eq 19, Eq 22 x1 = (((x_2700 - x_600) / 2.1) *",
"Eq 22 x1 = (((x_2700 - x_600) / 2.1) * (V_oc - 2.7))",
"we recognise that the relationship between incident energy E_600 (J/cm²) and distance D",
"^ k12 ) # # Where: # * E_600 is the __intermediate__ arcing"
] |
[
"import setup setup(name='logware', version='0.1.4', description='Logging middleware for python web services', url='https://github.com/Pratilipi-Labs/python-logware', author='Giridhar', author_email='<EMAIL>',",
"middleware for python web services', url='https://github.com/Pratilipi-Labs/python-logware', author='Giridhar', author_email='<EMAIL>', license='MIT', packages=['logware'], install_requires=[ 'webob' ],",
"version='0.1.4', description='Logging middleware for python web services', url='https://github.com/Pratilipi-Labs/python-logware', author='Giridhar', author_email='<EMAIL>', license='MIT', packages=['logware'], install_requires=[",
"for python web services', url='https://github.com/Pratilipi-Labs/python-logware', author='Giridhar', author_email='<EMAIL>', license='MIT', packages=['logware'], install_requires=[ 'webob' ], zip_safe=False)",
"setuptools import setup setup(name='logware', version='0.1.4', description='Logging middleware for python web services', url='https://github.com/Pratilipi-Labs/python-logware', author='Giridhar',",
"setup setup(name='logware', version='0.1.4', description='Logging middleware for python web services', url='https://github.com/Pratilipi-Labs/python-logware', author='Giridhar', author_email='<EMAIL>', license='MIT',",
"description='Logging middleware for python web services', url='https://github.com/Pratilipi-Labs/python-logware', author='Giridhar', author_email='<EMAIL>', license='MIT', packages=['logware'], install_requires=[ 'webob'",
"setup(name='logware', version='0.1.4', description='Logging middleware for python web services', url='https://github.com/Pratilipi-Labs/python-logware', author='Giridhar', author_email='<EMAIL>', license='MIT', packages=['logware'],",
"from setuptools import setup setup(name='logware', version='0.1.4', description='Logging middleware for python web services', url='https://github.com/Pratilipi-Labs/python-logware',",
"<reponame>Pratilipi-Labs/python-logware<gh_stars>0 from setuptools import setup setup(name='logware', version='0.1.4', description='Logging middleware for python web services',"
] |
[
"t == TAG: remove.append(j) m = len(indices[i]) remove_part = [0] * m for",
"remove_part[j] = rr result = [] for ind, rem in zip(indices[i], remove_part): #print",
"len(indices[i]) remove_part = [0] * m for j, ind in enumerate(indices[i]): rr =",
"int(line[3]) indices[line_ind].append(term_ind) whole_ind = [] for i, line in enumerate(fopen(pos_file)): line = line.split()",
"j, ind in enumerate(indices[i]): rr = 0 for tag_ind in remove: if tag_ind",
"in aw_lines: line = line.split() line_ind = int(line[2]) term_ind = int(line[3]) indices[line_ind].append(term_ind) whole_ind",
"= [] for i, line in enumerate(fopen(pos_file)): line = line.split() remove = []",
"from nlp_utils import fopen pos_file = sys.argv[1] aw_file = sys.argv[2] TAG = \"-NONE-\"",
"for ind, rem in zip(indices[i], remove_part): #print ind, rem result.append(ind - rem) whole_ind.extend(result)",
"for j, t in enumerate(line): if t == TAG: remove.append(j) m = len(indices[i])",
"1 remove_part[j] = rr result = [] for ind, rem in zip(indices[i], remove_part):",
"import fopen pos_file = sys.argv[1] aw_file = sys.argv[2] TAG = \"-NONE-\" aw_lines =",
"remove_part): #print ind, rem result.append(ind - rem) whole_ind.extend(result) #print ' '.join([str(k) for k",
"* m for j, ind in enumerate(indices[i]): rr = 0 for tag_ind in",
"= \"<NAME>\" import sys from collections import defaultdict as dd from nlp_utils import",
"= fopen(aw_file).readlines() indices = dd(list) for line in aw_lines: line = line.split() line_ind",
"[0] * m for j, ind in enumerate(indices[i]): rr = 0 for tag_ind",
"in remove: if tag_ind < ind: rr += 1 remove_part[j] = rr result",
"pos_file = sys.argv[1] aw_file = sys.argv[2] TAG = \"-NONE-\" aw_lines = fopen(aw_file).readlines() indices",
"import sys from collections import defaultdict as dd from nlp_utils import fopen pos_file",
"enumerate(line): if t == TAG: remove.append(j) m = len(indices[i]) remove_part = [0] *",
"result]) for line, new_index in zip(aw_lines, whole_ind): line = line.split() line.append(str(new_index)) print \"\\t\".join(line)",
"aw_lines = fopen(aw_file).readlines() indices = dd(list) for line in aw_lines: line = line.split()",
"fopen(aw_file).readlines() indices = dd(list) for line in aw_lines: line = line.split() line_ind =",
"-*- coding: utf-8 -*- __author__ = \"<NAME>\" import sys from collections import defaultdict",
"k in result]) for line, new_index in zip(aw_lines, whole_ind): line = line.split() line.append(str(new_index))",
"for i, line in enumerate(fopen(pos_file)): line = line.split() remove = [] for j,",
"dd from nlp_utils import fopen pos_file = sys.argv[1] aw_file = sys.argv[2] TAG =",
"= line.split() remove = [] for j, t in enumerate(line): if t ==",
"[] for ind, rem in zip(indices[i], remove_part): #print ind, rem result.append(ind - rem)",
"line in enumerate(fopen(pos_file)): line = line.split() remove = [] for j, t in",
"nlp_utils import fopen pos_file = sys.argv[1] aw_file = sys.argv[2] TAG = \"-NONE-\" aw_lines",
"line in aw_lines: line = line.split() line_ind = int(line[2]) term_ind = int(line[3]) indices[line_ind].append(term_ind)",
"remove: if tag_ind < ind: rr += 1 remove_part[j] = rr result =",
"as dd from nlp_utils import fopen pos_file = sys.argv[1] aw_file = sys.argv[2] TAG",
"if tag_ind < ind: rr += 1 remove_part[j] = rr result = []",
"line.split() remove = [] for j, t in enumerate(line): if t == TAG:",
"= [0] * m for j, ind in enumerate(indices[i]): rr = 0 for",
"# -*- coding: utf-8 -*- __author__ = \"<NAME>\" import sys from collections import",
"rr result = [] for ind, rem in zip(indices[i], remove_part): #print ind, rem",
"ind, rem result.append(ind - rem) whole_ind.extend(result) #print ' '.join([str(k) for k in result])",
"= int(line[2]) term_ind = int(line[3]) indices[line_ind].append(term_ind) whole_ind = [] for i, line in",
"TAG: remove.append(j) m = len(indices[i]) remove_part = [0] * m for j, ind",
"< ind: rr += 1 remove_part[j] = rr result = [] for ind,",
"rr += 1 remove_part[j] = rr result = [] for ind, rem in",
"= sys.argv[1] aw_file = sys.argv[2] TAG = \"-NONE-\" aw_lines = fopen(aw_file).readlines() indices =",
"aw_file = sys.argv[2] TAG = \"-NONE-\" aw_lines = fopen(aw_file).readlines() indices = dd(list) for",
"dd(list) for line in aw_lines: line = line.split() line_ind = int(line[2]) term_ind =",
"remove = [] for j, t in enumerate(line): if t == TAG: remove.append(j)",
"for line in aw_lines: line = line.split() line_ind = int(line[2]) term_ind = int(line[3])",
"import defaultdict as dd from nlp_utils import fopen pos_file = sys.argv[1] aw_file =",
"for j, ind in enumerate(indices[i]): rr = 0 for tag_ind in remove: if",
"coding: utf-8 -*- __author__ = \"<NAME>\" import sys from collections import defaultdict as",
"line_ind = int(line[2]) term_ind = int(line[3]) indices[line_ind].append(term_ind) whole_ind = [] for i, line",
"ind: rr += 1 remove_part[j] = rr result = [] for ind, rem",
"' '.join([str(k) for k in result]) for line, new_index in zip(aw_lines, whole_ind): line",
"= dd(list) for line in aw_lines: line = line.split() line_ind = int(line[2]) term_ind",
"[] for j, t in enumerate(line): if t == TAG: remove.append(j) m =",
"0 for tag_ind in remove: if tag_ind < ind: rr += 1 remove_part[j]",
"\"<NAME>\" import sys from collections import defaultdict as dd from nlp_utils import fopen",
"remove.append(j) m = len(indices[i]) remove_part = [0] * m for j, ind in",
"sys from collections import defaultdict as dd from nlp_utils import fopen pos_file =",
"remove_part = [0] * m for j, ind in enumerate(indices[i]): rr = 0",
"int(line[2]) term_ind = int(line[3]) indices[line_ind].append(term_ind) whole_ind = [] for i, line in enumerate(fopen(pos_file)):",
"i, line in enumerate(fopen(pos_file)): line = line.split() remove = [] for j, t",
"= line.split() line_ind = int(line[2]) term_ind = int(line[3]) indices[line_ind].append(term_ind) whole_ind = [] for",
"in enumerate(indices[i]): rr = 0 for tag_ind in remove: if tag_ind < ind:",
"rem in zip(indices[i], remove_part): #print ind, rem result.append(ind - rem) whole_ind.extend(result) #print '",
"tag_ind in remove: if tag_ind < ind: rr += 1 remove_part[j] = rr",
"= \"-NONE-\" aw_lines = fopen(aw_file).readlines() indices = dd(list) for line in aw_lines: line",
"if t == TAG: remove.append(j) m = len(indices[i]) remove_part = [0] * m",
"zip(indices[i], remove_part): #print ind, rem result.append(ind - rem) whole_ind.extend(result) #print ' '.join([str(k) for",
"m for j, ind in enumerate(indices[i]): rr = 0 for tag_ind in remove:",
"= 0 for tag_ind in remove: if tag_ind < ind: rr += 1",
"from collections import defaultdict as dd from nlp_utils import fopen pos_file = sys.argv[1]",
"result = [] for ind, rem in zip(indices[i], remove_part): #print ind, rem result.append(ind",
"t in enumerate(line): if t == TAG: remove.append(j) m = len(indices[i]) remove_part =",
"= sys.argv[2] TAG = \"-NONE-\" aw_lines = fopen(aw_file).readlines() indices = dd(list) for line",
"enumerate(fopen(pos_file)): line = line.split() remove = [] for j, t in enumerate(line): if",
"__author__ = \"<NAME>\" import sys from collections import defaultdict as dd from nlp_utils",
"= len(indices[i]) remove_part = [0] * m for j, ind in enumerate(indices[i]): rr",
"tag_ind < ind: rr += 1 remove_part[j] = rr result = [] for",
"for k in result]) for line, new_index in zip(aw_lines, whole_ind): line = line.split()",
"TAG = \"-NONE-\" aw_lines = fopen(aw_file).readlines() indices = dd(list) for line in aw_lines:",
"line.split() line_ind = int(line[2]) term_ind = int(line[3]) indices[line_ind].append(term_ind) whole_ind = [] for i,",
"m = len(indices[i]) remove_part = [0] * m for j, ind in enumerate(indices[i]):",
"sys.argv[2] TAG = \"-NONE-\" aw_lines = fopen(aw_file).readlines() indices = dd(list) for line in",
"= rr result = [] for ind, rem in zip(indices[i], remove_part): #print ind,",
"enumerate(indices[i]): rr = 0 for tag_ind in remove: if tag_ind < ind: rr",
"in enumerate(line): if t == TAG: remove.append(j) m = len(indices[i]) remove_part = [0]",
"indices[line_ind].append(term_ind) whole_ind = [] for i, line in enumerate(fopen(pos_file)): line = line.split() remove",
"aw_lines: line = line.split() line_ind = int(line[2]) term_ind = int(line[3]) indices[line_ind].append(term_ind) whole_ind =",
"== TAG: remove.append(j) m = len(indices[i]) remove_part = [0] * m for j,",
"= [] for j, t in enumerate(line): if t == TAG: remove.append(j) m",
"defaultdict as dd from nlp_utils import fopen pos_file = sys.argv[1] aw_file = sys.argv[2]",
"line = line.split() line_ind = int(line[2]) term_ind = int(line[3]) indices[line_ind].append(term_ind) whole_ind = []",
"'.join([str(k) for k in result]) for line, new_index in zip(aw_lines, whole_ind): line =",
"whole_ind = [] for i, line in enumerate(fopen(pos_file)): line = line.split() remove =",
"j, t in enumerate(line): if t == TAG: remove.append(j) m = len(indices[i]) remove_part",
"in result]) for line, new_index in zip(aw_lines, whole_ind): line = line.split() line.append(str(new_index)) print",
"ind in enumerate(indices[i]): rr = 0 for tag_ind in remove: if tag_ind <",
"utf-8 -*- __author__ = \"<NAME>\" import sys from collections import defaultdict as dd",
"for tag_ind in remove: if tag_ind < ind: rr += 1 remove_part[j] =",
"+= 1 remove_part[j] = rr result = [] for ind, rem in zip(indices[i],",
"= int(line[3]) indices[line_ind].append(term_ind) whole_ind = [] for i, line in enumerate(fopen(pos_file)): line =",
"line = line.split() remove = [] for j, t in enumerate(line): if t",
"#print ind, rem result.append(ind - rem) whole_ind.extend(result) #print ' '.join([str(k) for k in",
"-*- __author__ = \"<NAME>\" import sys from collections import defaultdict as dd from",
"ind, rem in zip(indices[i], remove_part): #print ind, rem result.append(ind - rem) whole_ind.extend(result) #print",
"\"-NONE-\" aw_lines = fopen(aw_file).readlines() indices = dd(list) for line in aw_lines: line =",
"rr = 0 for tag_ind in remove: if tag_ind < ind: rr +=",
"/usr/bin/python # -*- coding: utf-8 -*- __author__ = \"<NAME>\" import sys from collections",
"- rem) whole_ind.extend(result) #print ' '.join([str(k) for k in result]) for line, new_index",
"indices = dd(list) for line in aw_lines: line = line.split() line_ind = int(line[2])",
"= [] for ind, rem in zip(indices[i], remove_part): #print ind, rem result.append(ind -",
"#print ' '.join([str(k) for k in result]) for line, new_index in zip(aw_lines, whole_ind):",
"fopen pos_file = sys.argv[1] aw_file = sys.argv[2] TAG = \"-NONE-\" aw_lines = fopen(aw_file).readlines()",
"[] for i, line in enumerate(fopen(pos_file)): line = line.split() remove = [] for",
"sys.argv[1] aw_file = sys.argv[2] TAG = \"-NONE-\" aw_lines = fopen(aw_file).readlines() indices = dd(list)",
"whole_ind.extend(result) #print ' '.join([str(k) for k in result]) for line, new_index in zip(aw_lines,",
"result.append(ind - rem) whole_ind.extend(result) #print ' '.join([str(k) for k in result]) for line,",
"rem) whole_ind.extend(result) #print ' '.join([str(k) for k in result]) for line, new_index in",
"collections import defaultdict as dd from nlp_utils import fopen pos_file = sys.argv[1] aw_file",
"in zip(indices[i], remove_part): #print ind, rem result.append(ind - rem) whole_ind.extend(result) #print ' '.join([str(k)",
"term_ind = int(line[3]) indices[line_ind].append(term_ind) whole_ind = [] for i, line in enumerate(fopen(pos_file)): line",
"#! /usr/bin/python # -*- coding: utf-8 -*- __author__ = \"<NAME>\" import sys from",
"rem result.append(ind - rem) whole_ind.extend(result) #print ' '.join([str(k) for k in result]) for",
"in enumerate(fopen(pos_file)): line = line.split() remove = [] for j, t in enumerate(line):"
] |
[
"ValueError((\"data shape [NOBJ, NSAMPLE, NDIM] = [{}, {}, {}]\" + \" inconsistent with",
"# interim_prior should have dims [NOBJ, NSAMPLE] self.data = data self.interim_prior = interim_prior",
"D): pass def __call__(self, D): return D def unmanip(self, D): return D def",
"Data should have dims [NOBJ, NSAMPLE, NDIM] or [NOBJ, NSAMPLE] if NDIM is",
"= type(self) # *Leave* a shallow axis in the case a single object",
"index]) else: return cls(self.data[index], self.interim_prior[index]) def random_sample(self): \"\"\"Return a [NOBJ, NDIM] numpy array",
"NDIM] or [NOBJ, NSAMPLE] if NDIM is 1 # interim_prior should have dims",
"def __getitem__(self, index): import numbers cls = type(self) # *Leave* a shallow axis",
"np.sum(ps, axis=1)[:, np.newaxis] return np.array([self.data[i, pick_discrete(p)] for i, p in enumerate(ps)]) class NullManip(object):",
"shape [NOBJ, NSAMPLE] = [{}, {}]\") .format(ds[0], ds[1], ds[2], ips[0], ips[2])) def __len__(self):",
"import numpy as np from utils import pick_discrete class PseudoMarginalData(object): def __init__(self, data,",
"if NDIM is 1 # interim_prior should have dims [NOBJ, NSAMPLE] self.data =",
"is 1 # interim_prior should have dims [NOBJ, NSAMPLE] self.data = data self.interim_prior",
"return cls(self.data[np.newaxis, index], self.interim_prior[np.newaxis, index]) else: return cls(self.data[index], self.interim_prior[index]) def random_sample(self): \"\"\"Return a",
"[{}, {}, {}]\" + \" inconsistent with interim_prior shape [NOBJ, NSAMPLE] = [{},",
"# Data should have dims [NOBJ, NSAMPLE, NDIM] or [NOBJ, NSAMPLE] if NDIM",
"isinstance(index, numbers.Integral): return cls(self.data[np.newaxis, index], self.interim_prior[np.newaxis, index]) else: return cls(self.data[index], self.interim_prior[index]) def random_sample(self):",
"a shallow axis in the case a single object is requested. if isinstance(index,",
"import numbers cls = type(self) # *Leave* a shallow axis in the case",
"NSAMPLE using inverse interim_prior weights. Needed to compute a posterior object.\"\"\" ps =",
"self.nobj, self.nsample = self.data.shape else: self.nobj, self.nsample, self.ndim = self.data.shape if self.interim_prior.shape !=",
"have dims [NOBJ, NSAMPLE, NDIM] or [NOBJ, NSAMPLE] if NDIM is 1 #",
"a [NOBJ, NDIM] numpy array sampling over NSAMPLE using inverse interim_prior weights. Needed",
"# *Leave* a shallow axis in the case a single object is requested.",
"self.interim_prior.shape raise ValueError((\"data shape [NOBJ, NSAMPLE, NDIM] = [{}, {}, {}]\" + \"",
"for i, p in enumerate(ps)]) class NullManip(object): def init(self, D): pass def __call__(self,",
"__getitem__(self, index): import numbers cls = type(self) # *Leave* a shallow axis in",
"__len__(self): return self.nobj def __getitem__(self, index): import numbers cls = type(self) # *Leave*",
"object is requested. if isinstance(index, numbers.Integral): return cls(self.data[np.newaxis, index], self.interim_prior[np.newaxis, index]) else: return",
"should have dims [NOBJ, NSAMPLE, NDIM] or [NOBJ, NSAMPLE] if NDIM is 1",
"def __len__(self): return self.nobj def __getitem__(self, index): import numbers cls = type(self) #",
"self.ndim = self.data.shape if self.interim_prior.shape != (self.nobj, self.nsample): ds = self.data.shape ips =",
"if self.interim_prior.shape != (self.nobj, self.nsample): ds = self.data.shape ips = self.interim_prior.shape raise ValueError((\"data",
"= self.interim_prior.shape raise ValueError((\"data shape [NOBJ, NSAMPLE, NDIM] = [{}, {}, {}]\" +",
"case a single object is requested. if isinstance(index, numbers.Integral): return cls(self.data[np.newaxis, index], self.interim_prior[np.newaxis,",
"!= (self.nobj, self.nsample): ds = self.data.shape ips = self.interim_prior.shape raise ValueError((\"data shape [NOBJ,",
"D): return D def unmanip(self, D): return D def update(self, D, phi, c,",
"def random_sample(self): \"\"\"Return a [NOBJ, NDIM] numpy array sampling over NSAMPLE using inverse",
"i, p in enumerate(ps)]) class NullManip(object): def init(self, D): pass def __call__(self, D):",
"== 2: self.nobj, self.nsample = self.data.shape else: self.nobj, self.nsample, self.ndim = self.data.shape if",
"a posterior object.\"\"\" ps = 1./self.interim_prior ps /= np.sum(ps, axis=1)[:, np.newaxis] return np.array([self.data[i,",
"= self.data.shape if self.interim_prior.shape != (self.nobj, self.nsample): ds = self.data.shape ips = self.interim_prior.shape",
"a single object is requested. if isinstance(index, numbers.Integral): return cls(self.data[np.newaxis, index], self.interim_prior[np.newaxis, index])",
"ds = self.data.shape ips = self.interim_prior.shape raise ValueError((\"data shape [NOBJ, NSAMPLE, NDIM] =",
"self.interim_prior = interim_prior if self.data.ndim == 2: self.nobj, self.nsample = self.data.shape else: self.nobj,",
"import pick_discrete class PseudoMarginalData(object): def __init__(self, data, interim_prior): # Data should have dims",
"object.\"\"\" ps = 1./self.interim_prior ps /= np.sum(ps, axis=1)[:, np.newaxis] return np.array([self.data[i, pick_discrete(p)] for",
"should have dims [NOBJ, NSAMPLE] self.data = data self.interim_prior = interim_prior if self.data.ndim",
"return D def unmanip(self, D): return D def update(self, D, phi, c, prior):",
"[NOBJ, NDIM] numpy array sampling over NSAMPLE using inverse interim_prior weights. Needed to",
"[{}, {}]\") .format(ds[0], ds[1], ds[2], ips[0], ips[2])) def __len__(self): return self.nobj def __getitem__(self,",
"1 # interim_prior should have dims [NOBJ, NSAMPLE] self.data = data self.interim_prior =",
"= [{}, {}]\") .format(ds[0], ds[1], ds[2], ips[0], ips[2])) def __len__(self): return self.nobj def",
"numbers.Integral): return cls(self.data[np.newaxis, index], self.interim_prior[np.newaxis, index]) else: return cls(self.data[index], self.interim_prior[index]) def random_sample(self): \"\"\"Return",
"random_sample(self): \"\"\"Return a [NOBJ, NDIM] numpy array sampling over NSAMPLE using inverse interim_prior",
"self.nobj, self.nsample, self.ndim = self.data.shape if self.interim_prior.shape != (self.nobj, self.nsample): ds = self.data.shape",
"cls(self.data[index], self.interim_prior[index]) def random_sample(self): \"\"\"Return a [NOBJ, NDIM] numpy array sampling over NSAMPLE",
"NDIM] = [{}, {}, {}]\" + \" inconsistent with interim_prior shape [NOBJ, NSAMPLE]",
"ips[2])) def __len__(self): return self.nobj def __getitem__(self, index): import numbers cls = type(self)",
"requested. if isinstance(index, numbers.Integral): return cls(self.data[np.newaxis, index], self.interim_prior[np.newaxis, index]) else: return cls(self.data[index], self.interim_prior[index])",
"np.newaxis] return np.array([self.data[i, pick_discrete(p)] for i, p in enumerate(ps)]) class NullManip(object): def init(self,",
"*Leave* a shallow axis in the case a single object is requested. if",
"to compute a posterior object.\"\"\" ps = 1./self.interim_prior ps /= np.sum(ps, axis=1)[:, np.newaxis]",
"NSAMPLE, NDIM] or [NOBJ, NSAMPLE] if NDIM is 1 # interim_prior should have",
"\" inconsistent with interim_prior shape [NOBJ, NSAMPLE] = [{}, {}]\") .format(ds[0], ds[1], ds[2],",
"self.nsample = self.data.shape else: self.nobj, self.nsample, self.ndim = self.data.shape if self.interim_prior.shape != (self.nobj,",
"ips = self.interim_prior.shape raise ValueError((\"data shape [NOBJ, NSAMPLE, NDIM] = [{}, {}, {}]\"",
"axis in the case a single object is requested. if isinstance(index, numbers.Integral): return",
"p in enumerate(ps)]) class NullManip(object): def init(self, D): pass def __call__(self, D): return",
"interim_prior): # Data should have dims [NOBJ, NSAMPLE, NDIM] or [NOBJ, NSAMPLE] if",
"index], self.interim_prior[np.newaxis, index]) else: return cls(self.data[index], self.interim_prior[index]) def random_sample(self): \"\"\"Return a [NOBJ, NDIM]",
"numpy array sampling over NSAMPLE using inverse interim_prior weights. Needed to compute a",
"[NOBJ, NSAMPLE] self.data = data self.interim_prior = interim_prior if self.data.ndim == 2: self.nobj,",
"self.interim_prior.shape != (self.nobj, self.nsample): ds = self.data.shape ips = self.interim_prior.shape raise ValueError((\"data shape",
".format(ds[0], ds[1], ds[2], ips[0], ips[2])) def __len__(self): return self.nobj def __getitem__(self, index): import",
"the case a single object is requested. if isinstance(index, numbers.Integral): return cls(self.data[np.newaxis, index],",
"in enumerate(ps)]) class NullManip(object): def init(self, D): pass def __call__(self, D): return D",
"self.nsample, self.ndim = self.data.shape if self.interim_prior.shape != (self.nobj, self.nsample): ds = self.data.shape ips",
"{}, {}]\" + \" inconsistent with interim_prior shape [NOBJ, NSAMPLE] = [{}, {}]\")",
"as np from utils import pick_discrete class PseudoMarginalData(object): def __init__(self, data, interim_prior): #",
"array sampling over NSAMPLE using inverse interim_prior weights. Needed to compute a posterior",
"sampling over NSAMPLE using inverse interim_prior weights. Needed to compute a posterior object.\"\"\"",
"= [{}, {}, {}]\" + \" inconsistent with interim_prior shape [NOBJ, NSAMPLE] =",
"PseudoMarginalData(object): def __init__(self, data, interim_prior): # Data should have dims [NOBJ, NSAMPLE, NDIM]",
"self.data.ndim == 2: self.nobj, self.nsample = self.data.shape else: self.nobj, self.nsample, self.ndim = self.data.shape",
"self.interim_prior[index]) def random_sample(self): \"\"\"Return a [NOBJ, NDIM] numpy array sampling over NSAMPLE using",
"data, interim_prior): # Data should have dims [NOBJ, NSAMPLE, NDIM] or [NOBJ, NSAMPLE]",
"cls = type(self) # *Leave* a shallow axis in the case a single",
"numpy as np from utils import pick_discrete class PseudoMarginalData(object): def __init__(self, data, interim_prior):",
"[NOBJ, NSAMPLE, NDIM] or [NOBJ, NSAMPLE] if NDIM is 1 # interim_prior should",
"interim_prior if self.data.ndim == 2: self.nobj, self.nsample = self.data.shape else: self.nobj, self.nsample, self.ndim",
"self.data.shape else: self.nobj, self.nsample, self.ndim = self.data.shape if self.interim_prior.shape != (self.nobj, self.nsample): ds",
"in the case a single object is requested. if isinstance(index, numbers.Integral): return cls(self.data[np.newaxis,",
"return cls(self.data[index], self.interim_prior[index]) def random_sample(self): \"\"\"Return a [NOBJ, NDIM] numpy array sampling over",
"axis=1)[:, np.newaxis] return np.array([self.data[i, pick_discrete(p)] for i, p in enumerate(ps)]) class NullManip(object): def",
"D def unmanip(self, D): return D def update(self, D, phi, c, prior): pass",
"init(self, D): pass def __call__(self, D): return D def unmanip(self, D): return D",
"= self.data.shape ips = self.interim_prior.shape raise ValueError((\"data shape [NOBJ, NSAMPLE, NDIM] = [{},",
"utils import pick_discrete class PseudoMarginalData(object): def __init__(self, data, interim_prior): # Data should have",
"posterior object.\"\"\" ps = 1./self.interim_prior ps /= np.sum(ps, axis=1)[:, np.newaxis] return np.array([self.data[i, pick_discrete(p)]",
"= interim_prior if self.data.ndim == 2: self.nobj, self.nsample = self.data.shape else: self.nobj, self.nsample,",
"over NSAMPLE using inverse interim_prior weights. Needed to compute a posterior object.\"\"\" ps",
"(self.nobj, self.nsample): ds = self.data.shape ips = self.interim_prior.shape raise ValueError((\"data shape [NOBJ, NSAMPLE,",
"dims [NOBJ, NSAMPLE, NDIM] or [NOBJ, NSAMPLE] if NDIM is 1 # interim_prior",
"self.data.shape ips = self.interim_prior.shape raise ValueError((\"data shape [NOBJ, NSAMPLE, NDIM] = [{}, {},",
"__init__(self, data, interim_prior): # Data should have dims [NOBJ, NSAMPLE, NDIM] or [NOBJ,",
"[NOBJ, NSAMPLE] if NDIM is 1 # interim_prior should have dims [NOBJ, NSAMPLE]",
"type(self) # *Leave* a shallow axis in the case a single object is",
"raise ValueError((\"data shape [NOBJ, NSAMPLE, NDIM] = [{}, {}, {}]\" + \" inconsistent",
"from utils import pick_discrete class PseudoMarginalData(object): def __init__(self, data, interim_prior): # Data should",
"using inverse interim_prior weights. Needed to compute a posterior object.\"\"\" ps = 1./self.interim_prior",
"2: self.nobj, self.nsample = self.data.shape else: self.nobj, self.nsample, self.ndim = self.data.shape if self.interim_prior.shape",
"dims [NOBJ, NSAMPLE] self.data = data self.interim_prior = interim_prior if self.data.ndim == 2:",
"= data self.interim_prior = interim_prior if self.data.ndim == 2: self.nobj, self.nsample = self.data.shape",
"class NullManip(object): def init(self, D): pass def __call__(self, D): return D def unmanip(self,",
"numbers cls = type(self) # *Leave* a shallow axis in the case a",
"1./self.interim_prior ps /= np.sum(ps, axis=1)[:, np.newaxis] return np.array([self.data[i, pick_discrete(p)] for i, p in",
"class PseudoMarginalData(object): def __init__(self, data, interim_prior): # Data should have dims [NOBJ, NSAMPLE,",
"NSAMPLE] if NDIM is 1 # interim_prior should have dims [NOBJ, NSAMPLE] self.data",
"if isinstance(index, numbers.Integral): return cls(self.data[np.newaxis, index], self.interim_prior[np.newaxis, index]) else: return cls(self.data[index], self.interim_prior[index]) def",
"interim_prior weights. Needed to compute a posterior object.\"\"\" ps = 1./self.interim_prior ps /=",
"NSAMPLE] = [{}, {}]\") .format(ds[0], ds[1], ds[2], ips[0], ips[2])) def __len__(self): return self.nobj",
"pick_discrete(p)] for i, p in enumerate(ps)]) class NullManip(object): def init(self, D): pass def",
"with interim_prior shape [NOBJ, NSAMPLE] = [{}, {}]\") .format(ds[0], ds[1], ds[2], ips[0], ips[2]))",
"shallow axis in the case a single object is requested. if isinstance(index, numbers.Integral):",
"ps /= np.sum(ps, axis=1)[:, np.newaxis] return np.array([self.data[i, pick_discrete(p)] for i, p in enumerate(ps)])",
"inconsistent with interim_prior shape [NOBJ, NSAMPLE] = [{}, {}]\") .format(ds[0], ds[1], ds[2], ips[0],",
"have dims [NOBJ, NSAMPLE] self.data = data self.interim_prior = interim_prior if self.data.ndim ==",
"enumerate(ps)]) class NullManip(object): def init(self, D): pass def __call__(self, D): return D def",
"NSAMPLE, NDIM] = [{}, {}, {}]\" + \" inconsistent with interim_prior shape [NOBJ,",
"shape [NOBJ, NSAMPLE, NDIM] = [{}, {}, {}]\" + \" inconsistent with interim_prior",
"ds[1], ds[2], ips[0], ips[2])) def __len__(self): return self.nobj def __getitem__(self, index): import numbers",
"if self.data.ndim == 2: self.nobj, self.nsample = self.data.shape else: self.nobj, self.nsample, self.ndim =",
"{}]\") .format(ds[0], ds[1], ds[2], ips[0], ips[2])) def __len__(self): return self.nobj def __getitem__(self, index):",
"compute a posterior object.\"\"\" ps = 1./self.interim_prior ps /= np.sum(ps, axis=1)[:, np.newaxis] return",
"cls(self.data[np.newaxis, index], self.interim_prior[np.newaxis, index]) else: return cls(self.data[index], self.interim_prior[index]) def random_sample(self): \"\"\"Return a [NOBJ,",
"return np.array([self.data[i, pick_discrete(p)] for i, p in enumerate(ps)]) class NullManip(object): def init(self, D):",
"index): import numbers cls = type(self) # *Leave* a shallow axis in the",
"+ \" inconsistent with interim_prior shape [NOBJ, NSAMPLE] = [{}, {}]\") .format(ds[0], ds[1],",
"single object is requested. if isinstance(index, numbers.Integral): return cls(self.data[np.newaxis, index], self.interim_prior[np.newaxis, index]) else:",
"else: return cls(self.data[index], self.interim_prior[index]) def random_sample(self): \"\"\"Return a [NOBJ, NDIM] numpy array sampling",
"Needed to compute a posterior object.\"\"\" ps = 1./self.interim_prior ps /= np.sum(ps, axis=1)[:,",
"interim_prior should have dims [NOBJ, NSAMPLE] self.data = data self.interim_prior = interim_prior if",
"weights. Needed to compute a posterior object.\"\"\" ps = 1./self.interim_prior ps /= np.sum(ps,",
"self.data = data self.interim_prior = interim_prior if self.data.ndim == 2: self.nobj, self.nsample =",
"ips[0], ips[2])) def __len__(self): return self.nobj def __getitem__(self, index): import numbers cls =",
"pick_discrete class PseudoMarginalData(object): def __init__(self, data, interim_prior): # Data should have dims [NOBJ,",
"ds[2], ips[0], ips[2])) def __len__(self): return self.nobj def __getitem__(self, index): import numbers cls",
"NDIM is 1 # interim_prior should have dims [NOBJ, NSAMPLE] self.data = data",
"[NOBJ, NSAMPLE] = [{}, {}]\") .format(ds[0], ds[1], ds[2], ips[0], ips[2])) def __len__(self): return",
"return self.nobj def __getitem__(self, index): import numbers cls = type(self) # *Leave* a",
"def init(self, D): pass def __call__(self, D): return D def unmanip(self, D): return",
"def __init__(self, data, interim_prior): # Data should have dims [NOBJ, NSAMPLE, NDIM] or",
"np from utils import pick_discrete class PseudoMarginalData(object): def __init__(self, data, interim_prior): # Data",
"self.interim_prior[np.newaxis, index]) else: return cls(self.data[index], self.interim_prior[index]) def random_sample(self): \"\"\"Return a [NOBJ, NDIM] numpy",
"self.nobj def __getitem__(self, index): import numbers cls = type(self) # *Leave* a shallow",
"interim_prior shape [NOBJ, NSAMPLE] = [{}, {}]\") .format(ds[0], ds[1], ds[2], ips[0], ips[2])) def",
"def __call__(self, D): return D def unmanip(self, D): return D def update(self, D,",
"self.data.shape if self.interim_prior.shape != (self.nobj, self.nsample): ds = self.data.shape ips = self.interim_prior.shape raise",
"inverse interim_prior weights. Needed to compute a posterior object.\"\"\" ps = 1./self.interim_prior ps",
"NullManip(object): def init(self, D): pass def __call__(self, D): return D def unmanip(self, D):",
"NSAMPLE] self.data = data self.interim_prior = interim_prior if self.data.ndim == 2: self.nobj, self.nsample",
"= 1./self.interim_prior ps /= np.sum(ps, axis=1)[:, np.newaxis] return np.array([self.data[i, pick_discrete(p)] for i, p",
"/= np.sum(ps, axis=1)[:, np.newaxis] return np.array([self.data[i, pick_discrete(p)] for i, p in enumerate(ps)]) class",
"is requested. if isinstance(index, numbers.Integral): return cls(self.data[np.newaxis, index], self.interim_prior[np.newaxis, index]) else: return cls(self.data[index],",
"\"\"\"Return a [NOBJ, NDIM] numpy array sampling over NSAMPLE using inverse interim_prior weights.",
"= self.data.shape else: self.nobj, self.nsample, self.ndim = self.data.shape if self.interim_prior.shape != (self.nobj, self.nsample):",
"self.nsample): ds = self.data.shape ips = self.interim_prior.shape raise ValueError((\"data shape [NOBJ, NSAMPLE, NDIM]",
"[NOBJ, NSAMPLE, NDIM] = [{}, {}, {}]\" + \" inconsistent with interim_prior shape",
"{}]\" + \" inconsistent with interim_prior shape [NOBJ, NSAMPLE] = [{}, {}]\") .format(ds[0],",
"np.array([self.data[i, pick_discrete(p)] for i, p in enumerate(ps)]) class NullManip(object): def init(self, D): pass",
"pass def __call__(self, D): return D def unmanip(self, D): return D def update(self,",
"data self.interim_prior = interim_prior if self.data.ndim == 2: self.nobj, self.nsample = self.data.shape else:",
"NDIM] numpy array sampling over NSAMPLE using inverse interim_prior weights. Needed to compute",
"ps = 1./self.interim_prior ps /= np.sum(ps, axis=1)[:, np.newaxis] return np.array([self.data[i, pick_discrete(p)] for i,",
"else: self.nobj, self.nsample, self.ndim = self.data.shape if self.interim_prior.shape != (self.nobj, self.nsample): ds =",
"__call__(self, D): return D def unmanip(self, D): return D def update(self, D, phi,",
"or [NOBJ, NSAMPLE] if NDIM is 1 # interim_prior should have dims [NOBJ,"
] |
[
"\"option -w {{ wallet_id }} is required for send\" sys.exit(1) if len(args) !=",
"getpass.getpass('otp: ') bitgo.unlock(otp) print bitgo.get_wallet(options.wallet_id) elif action == 'get_unspents': if options.wallet_id is None:",
"= raw_input('username: ') password = <PASSWORD>('password: ') otp = raw_input('otp: ') bitgo =",
"BitGo(access_token=access_token) if action == 'get_wallets': print bitgo.get_wallets() elif action == 'get_balance': if options.wallet_id",
"are required\" sys.exit(1) otp = raw_input('otp: ') passcode = getpass.getpass('passcode: ') bitgo.unlock(otp) print",
"passcode = getpass.getpass('passcode: ') bitgo.unlock(otp) print bitgo.send(options.wallet_id, passcode, args[1], float(args[2]) * 10**8) else:",
"BitGo() access_token = bitgo.get_access_token(username, password, otp) print \"access_token: \", access_token if raw_input(\"store y/n?",
"import getpass from optparse import OptionParser import sys from os.path import expanduser home",
"BitGo def load_config(filename): if os.path.exists(filename): try: return json.load(open(filename, 'rb')) except ValueError: return {}",
"None: print \"option -w {{ wallet_id }} is required for get_balance\" sys.exit(1) print",
"args[1], float(args[2]) * 10**8) else: print \"invalid command\" if __name__ == '__main__': main()",
"sys from os.path import expanduser home = expanduser(\"~\") parser = OptionParser() parser.add_option(\"-a\", \"--access-token\",",
"bitgo.get_unspents(options.wallet_id) elif action == 'send': if options.wallet_id is None: print \"option -w {{",
"otp) print \"access_token: \", access_token if raw_input(\"store y/n? \") == \"y\": update_config(config_filename, {'access_token':",
"\"--wallet-id\", dest=\"wallet_id\", help=\"wallet id\") (options, args) = parser.parse_args() if len(args) == 0: print",
"-w {{ wallet_id }} is required for get_balance\" sys.exit(1) print bitgo.get_balance(options.wallet_id) / float(10**8)",
"access_token if raw_input(\"store y/n? \") == \"y\": update_config(config_filename, {'access_token': access_token}) sys.exit(0) if options.access_token:",
"None: print \"option -w {{ wallet_id }} is required for get_wallet\" sys.exit(1) otp",
"required for send\" sys.exit(1) if len(args) != 3: print \"address and amount are",
"raw_input('otp: ') passcode = getpass.getpass('passcode: ') bitgo.unlock(otp) print bitgo.send(options.wallet_id, passcode, args[1], float(args[2]) *",
"cnf): config = load_config(filename) config.update(cnf) json.dump(config, open(filename, \"wb\")) def main(): import getpass from",
"in config: access_token = config['access_token'] else: print \"the --access-token is a required parameter\"",
"elif action == 'get_balance': if options.wallet_id is None: print \"option -w {{ wallet_id",
"for get_balance\" sys.exit(1) print bitgo.get_balance(options.wallet_id) / float(10**8) elif action == 'get_wallet': if options.wallet_id",
"= parser.parse_args() if len(args) == 0: print \"a command is required, available: access_token,",
"{{ wallet_id }} is required for get_wallet\" sys.exit(1) otp = getpass.getpass('otp: ') bitgo.unlock(otp)",
"\"address and amount are required\" sys.exit(1) otp = raw_input('otp: ') passcode = getpass.getpass('passcode:",
"len(args) == 0: print \"a command is required, available: access_token, get_wallets, get_balance, send\"",
"print \"option -w {{ wallet_id }} is required for send\" sys.exit(1) if len(args)",
"== \"y\": update_config(config_filename, {'access_token': access_token}) sys.exit(0) if options.access_token: access_token = options.access_token elif 'access_token'",
"'get_wallets': print bitgo.get_wallets() elif action == 'get_balance': if options.wallet_id is None: print \"option",
"for get_unspents\" sys.exit(1) print bitgo.get_unspents(options.wallet_id) elif action == 'send': if options.wallet_id is None:",
"elif action == 'send': if options.wallet_id is None: print \"option -w {{ wallet_id",
"== 'send': if options.wallet_id is None: print \"option -w {{ wallet_id }} is",
"print \"option -w {{ wallet_id }} is required for get_wallet\" sys.exit(1) otp =",
"optparse import OptionParser import sys from os.path import expanduser home = expanduser(\"~\") parser",
"return {} def update_config(filename, cnf): config = load_config(filename) config.update(cnf) json.dump(config, open(filename, \"wb\")) def",
"parser.add_option(\"-w\", \"--wallet-id\", dest=\"wallet_id\", help=\"wallet id\") (options, args) = parser.parse_args() if len(args) == 0:",
"= raw_input('otp: ') passcode = getpass.getpass('passcode: ') bitgo.unlock(otp) print bitgo.send(options.wallet_id, passcode, args[1], float(args[2])",
"\"y\": update_config(config_filename, {'access_token': access_token}) sys.exit(0) if options.access_token: access_token = options.access_token elif 'access_token' in",
"print bitgo.get_balance(options.wallet_id) / float(10**8) elif action == 'get_wallet': if options.wallet_id is None: print",
"is required for get_wallet\" sys.exit(1) otp = getpass.getpass('otp: ') bitgo.unlock(otp) print bitgo.get_wallet(options.wallet_id) elif",
"get_wallet\" sys.exit(1) otp = getpass.getpass('otp: ') bitgo.unlock(otp) print bitgo.get_wallet(options.wallet_id) elif action == 'get_unspents':",
"amount are required\" sys.exit(1) otp = raw_input('otp: ') passcode = getpass.getpass('passcode: ') bitgo.unlock(otp)",
"{} else: return {} def update_config(filename, cnf): config = load_config(filename) config.update(cnf) json.dump(config, open(filename,",
"id\") (options, args) = parser.parse_args() if len(args) == 0: print \"a command is",
"required for get_wallet\" sys.exit(1) otp = getpass.getpass('otp: ') bitgo.unlock(otp) print bitgo.get_wallet(options.wallet_id) elif action",
"expanduser(\"~\") parser = OptionParser() parser.add_option(\"-a\", \"--access-token\", dest=\"access_token\", help=\"access token\") parser.add_option(\"-w\", \"--wallet-id\", dest=\"wallet_id\", help=\"wallet",
"\"access_token: \", access_token if raw_input(\"store y/n? \") == \"y\": update_config(config_filename, {'access_token': access_token}) sys.exit(0)",
"wallet_id }} is required for get_unspents\" sys.exit(1) print bitgo.get_unspents(options.wallet_id) elif action == 'send':",
"== 'get_balance': if options.wallet_id is None: print \"option -w {{ wallet_id }} is",
"{} def update_config(filename, cnf): config = load_config(filename) config.update(cnf) json.dump(config, open(filename, \"wb\")) def main():",
"update_config(filename, cnf): config = load_config(filename) config.update(cnf) json.dump(config, open(filename, \"wb\")) def main(): import getpass",
"from os.path import expanduser home = expanduser(\"~\") parser = OptionParser() parser.add_option(\"-a\", \"--access-token\", dest=\"access_token\",",
".bitgo import BitGo def load_config(filename): if os.path.exists(filename): try: return json.load(open(filename, 'rb')) except ValueError:",
"send\" sys.exit(1) if len(args) != 3: print \"address and amount are required\" sys.exit(1)",
"\"option -w {{ wallet_id }} is required for get_unspents\" sys.exit(1) print bitgo.get_unspents(options.wallet_id) elif",
"print bitgo.get_wallet(options.wallet_id) elif action == 'get_unspents': if options.wallet_id is None: print \"option -w",
"= getpass.getpass('otp: ') bitgo.unlock(otp) print bitgo.get_wallet(options.wallet_id) elif action == 'get_unspents': if options.wallet_id is",
"\") == \"y\": update_config(config_filename, {'access_token': access_token}) sys.exit(0) if options.access_token: access_token = options.access_token elif",
"') passcode = getpass.getpass('passcode: ') bitgo.unlock(otp) print bitgo.send(options.wallet_id, passcode, args[1], float(args[2]) * 10**8)",
"'sserrano' import json import os from .bitgo import BitGo def load_config(filename): if os.path.exists(filename):",
"== 'get_unspents': if options.wallet_id is None: print \"option -w {{ wallet_id }} is",
"def load_config(filename): if os.path.exists(filename): try: return json.load(open(filename, 'rb')) except ValueError: return {} else:",
"parameter\" sys.exit(1) bitgo = BitGo(access_token=access_token) if action == 'get_wallets': print bitgo.get_wallets() elif action",
"get_unspents\" sys.exit(1) print bitgo.get_unspents(options.wallet_id) elif action == 'send': if options.wallet_id is None: print",
"open(filename, \"wb\")) def main(): import getpass from optparse import OptionParser import sys from",
"access_token = options.access_token elif 'access_token' in config: access_token = config['access_token'] else: print \"the",
"'access_token' in config: access_token = config['access_token'] else: print \"the --access-token is a required",
"= BitGo(access_token=access_token) if action == 'get_wallets': print bitgo.get_wallets() elif action == 'get_balance': if",
"config = load_config(filename) config.update(cnf) json.dump(config, open(filename, \"wb\")) def main(): import getpass from optparse",
"}} is required for get_unspents\" sys.exit(1) print bitgo.get_unspents(options.wallet_id) elif action == 'send': if",
"try: return json.load(open(filename, 'rb')) except ValueError: return {} else: return {} def update_config(filename,",
"action == 'get_wallet': if options.wallet_id is None: print \"option -w {{ wallet_id }}",
"print bitgo.get_unspents(options.wallet_id) elif action == 'send': if options.wallet_id is None: print \"option -w",
"help=\"access token\") parser.add_option(\"-w\", \"--wallet-id\", dest=\"wallet_id\", help=\"wallet id\") (options, args) = parser.parse_args() if len(args)",
"json.load(open(filename, 'rb')) except ValueError: return {} else: return {} def update_config(filename, cnf): config",
"bitgo.get_wallets() elif action == 'get_balance': if options.wallet_id is None: print \"option -w {{",
"parser = OptionParser() parser.add_option(\"-a\", \"--access-token\", dest=\"access_token\", help=\"access token\") parser.add_option(\"-w\", \"--wallet-id\", dest=\"wallet_id\", help=\"wallet id\")",
"config['access_token'] else: print \"the --access-token is a required parameter\" sys.exit(1) bitgo = BitGo(access_token=access_token)",
"return {} else: return {} def update_config(filename, cnf): config = load_config(filename) config.update(cnf) json.dump(config,",
"otp = raw_input('otp: ') bitgo = BitGo() access_token = bitgo.get_access_token(username, password, otp) print",
"sys.exit(1) action = args[0] config_filename = os.path.join(home, \".bitgo\") config = load_config(config_filename) if action",
"= BitGo() access_token = bitgo.get_access_token(username, password, otp) print \"access_token: \", access_token if raw_input(\"store",
"\"option -w {{ wallet_id }} is required for get_wallet\" sys.exit(1) otp = getpass.getpass('otp:",
"import sys from os.path import expanduser home = expanduser(\"~\") parser = OptionParser() parser.add_option(\"-a\",",
"def main(): import getpass from optparse import OptionParser import sys from os.path import",
"== 0: print \"a command is required, available: access_token, get_wallets, get_balance, send\" sys.exit(1)",
"available: access_token, get_wallets, get_balance, send\" sys.exit(1) action = args[0] config_filename = os.path.join(home, \".bitgo\")",
"raw_input('username: ') password = <PASSWORD>('password: ') otp = raw_input('otp: ') bitgo = BitGo()",
"wallet_id }} is required for send\" sys.exit(1) if len(args) != 3: print \"address",
"= config['access_token'] else: print \"the --access-token is a required parameter\" sys.exit(1) bitgo =",
"otp = getpass.getpass('otp: ') bitgo.unlock(otp) print bitgo.get_wallet(options.wallet_id) elif action == 'get_unspents': if options.wallet_id",
"') bitgo.unlock(otp) print bitgo.send(options.wallet_id, passcode, args[1], float(args[2]) * 10**8) else: print \"invalid command\"",
"wallet_id }} is required for get_wallet\" sys.exit(1) otp = getpass.getpass('otp: ') bitgo.unlock(otp) print",
"}} is required for send\" sys.exit(1) if len(args) != 3: print \"address and",
"print \"access_token: \", access_token if raw_input(\"store y/n? \") == \"y\": update_config(config_filename, {'access_token': access_token})",
"sys.exit(1) print bitgo.get_balance(options.wallet_id) / float(10**8) elif action == 'get_wallet': if options.wallet_id is None:",
"required for get_balance\" sys.exit(1) print bitgo.get_balance(options.wallet_id) / float(10**8) elif action == 'get_wallet': if",
"get_balance\" sys.exit(1) print bitgo.get_balance(options.wallet_id) / float(10**8) elif action == 'get_wallet': if options.wallet_id is",
"= load_config(filename) config.update(cnf) json.dump(config, open(filename, \"wb\")) def main(): import getpass from optparse import",
"options.access_token: access_token = options.access_token elif 'access_token' in config: access_token = config['access_token'] else: print",
"'rb')) except ValueError: return {} else: return {} def update_config(filename, cnf): config =",
"\"a command is required, available: access_token, get_wallets, get_balance, send\" sys.exit(1) action = args[0]",
"{{ wallet_id }} is required for get_balance\" sys.exit(1) print bitgo.get_balance(options.wallet_id) / float(10**8) elif",
"for send\" sys.exit(1) if len(args) != 3: print \"address and amount are required\"",
"= raw_input('otp: ') bitgo = BitGo() access_token = bitgo.get_access_token(username, password, otp) print \"access_token:",
"bitgo.unlock(otp) print bitgo.send(options.wallet_id, passcode, args[1], float(args[2]) * 10**8) else: print \"invalid command\" if",
"password = <PASSWORD>('password: ') otp = raw_input('otp: ') bitgo = BitGo() access_token =",
"{'access_token': access_token}) sys.exit(0) if options.access_token: access_token = options.access_token elif 'access_token' in config: access_token",
"if options.access_token: access_token = options.access_token elif 'access_token' in config: access_token = config['access_token'] else:",
"a required parameter\" sys.exit(1) bitgo = BitGo(access_token=access_token) if action == 'get_wallets': print bitgo.get_wallets()",
"except ValueError: return {} else: return {} def update_config(filename, cnf): config = load_config(filename)",
"import OptionParser import sys from os.path import expanduser home = expanduser(\"~\") parser =",
"is a required parameter\" sys.exit(1) bitgo = BitGo(access_token=access_token) if action == 'get_wallets': print",
"passcode, args[1], float(args[2]) * 10**8) else: print \"invalid command\" if __name__ == '__main__':",
"help=\"wallet id\") (options, args) = parser.parse_args() if len(args) == 0: print \"a command",
"action == 'get_wallets': print bitgo.get_wallets() elif action == 'get_balance': if options.wallet_id is None:",
"def update_config(filename, cnf): config = load_config(filename) config.update(cnf) json.dump(config, open(filename, \"wb\")) def main(): import",
"') bitgo = BitGo() access_token = bitgo.get_access_token(username, password, otp) print \"access_token: \", access_token",
"= 'sserrano' import json import os from .bitgo import BitGo def load_config(filename): if",
"access_token}) sys.exit(0) if options.access_token: access_token = options.access_token elif 'access_token' in config: access_token =",
"bitgo = BitGo(access_token=access_token) if action == 'get_wallets': print bitgo.get_wallets() elif action == 'get_balance':",
"print bitgo.get_wallets() elif action == 'get_balance': if options.wallet_id is None: print \"option -w",
"config_filename = os.path.join(home, \".bitgo\") config = load_config(config_filename) if action == 'access_token': username =",
"= bitgo.get_access_token(username, password, otp) print \"access_token: \", access_token if raw_input(\"store y/n? \") ==",
"sys.exit(1) otp = getpass.getpass('otp: ') bitgo.unlock(otp) print bitgo.get_wallet(options.wallet_id) elif action == 'get_unspents': if",
"else: print \"the --access-token is a required parameter\" sys.exit(1) bitgo = BitGo(access_token=access_token) if",
"bitgo.get_access_token(username, password, otp) print \"access_token: \", access_token if raw_input(\"store y/n? \") == \"y\":",
"== 'get_wallet': if options.wallet_id is None: print \"option -w {{ wallet_id }} is",
"options.wallet_id is None: print \"option -w {{ wallet_id }} is required for get_wallet\"",
"float(10**8) elif action == 'get_wallet': if options.wallet_id is None: print \"option -w {{",
"load_config(filename): if os.path.exists(filename): try: return json.load(open(filename, 'rb')) except ValueError: return {} else: return",
"wallet_id }} is required for get_balance\" sys.exit(1) print bitgo.get_balance(options.wallet_id) / float(10**8) elif action",
"if raw_input(\"store y/n? \") == \"y\": update_config(config_filename, {'access_token': access_token}) sys.exit(0) if options.access_token: access_token",
"from .bitgo import BitGo def load_config(filename): if os.path.exists(filename): try: return json.load(open(filename, 'rb')) except",
"config: access_token = config['access_token'] else: print \"the --access-token is a required parameter\" sys.exit(1)",
"') bitgo.unlock(otp) print bitgo.get_wallet(options.wallet_id) elif action == 'get_unspents': if options.wallet_id is None: print",
"'send': if options.wallet_id is None: print \"option -w {{ wallet_id }} is required",
"/ float(10**8) elif action == 'get_wallet': if options.wallet_id is None: print \"option -w",
"config.update(cnf) json.dump(config, open(filename, \"wb\")) def main(): import getpass from optparse import OptionParser import",
"main(): import getpass from optparse import OptionParser import sys from os.path import expanduser",
"parser.parse_args() if len(args) == 0: print \"a command is required, available: access_token, get_wallets,",
"if os.path.exists(filename): try: return json.load(open(filename, 'rb')) except ValueError: return {} else: return {}",
"token\") parser.add_option(\"-w\", \"--wallet-id\", dest=\"wallet_id\", help=\"wallet id\") (options, args) = parser.parse_args() if len(args) ==",
"\"--access-token\", dest=\"access_token\", help=\"access token\") parser.add_option(\"-w\", \"--wallet-id\", dest=\"wallet_id\", help=\"wallet id\") (options, args) = parser.parse_args()",
"y/n? \") == \"y\": update_config(config_filename, {'access_token': access_token}) sys.exit(0) if options.access_token: access_token = options.access_token",
"elif action == 'get_wallet': if options.wallet_id is None: print \"option -w {{ wallet_id",
"sys.exit(1) print bitgo.get_unspents(options.wallet_id) elif action == 'send': if options.wallet_id is None: print \"option",
"}} is required for get_balance\" sys.exit(1) print bitgo.get_balance(options.wallet_id) / float(10**8) elif action ==",
"-w {{ wallet_id }} is required for send\" sys.exit(1) if len(args) != 3:",
"access_token, get_wallets, get_balance, send\" sys.exit(1) action = args[0] config_filename = os.path.join(home, \".bitgo\") config",
"action == 'get_balance': if options.wallet_id is None: print \"option -w {{ wallet_id }}",
"otp = raw_input('otp: ') passcode = getpass.getpass('passcode: ') bitgo.unlock(otp) print bitgo.send(options.wallet_id, passcode, args[1],",
"'get_wallet': if options.wallet_id is None: print \"option -w {{ wallet_id }} is required",
"sys.exit(1) otp = raw_input('otp: ') passcode = getpass.getpass('passcode: ') bitgo.unlock(otp) print bitgo.send(options.wallet_id, passcode,",
"__author__ = 'sserrano' import json import os from .bitgo import BitGo def load_config(filename):",
"action == 'access_token': username = raw_input('username: ') password = <PASSWORD>('password: ') otp =",
"options.wallet_id is None: print \"option -w {{ wallet_id }} is required for get_unspents\"",
"-w {{ wallet_id }} is required for get_wallet\" sys.exit(1) otp = getpass.getpass('otp: ')",
"print \"the --access-token is a required parameter\" sys.exit(1) bitgo = BitGo(access_token=access_token) if action",
"os.path.exists(filename): try: return json.load(open(filename, 'rb')) except ValueError: return {} else: return {} def",
"is required for get_unspents\" sys.exit(1) print bitgo.get_unspents(options.wallet_id) elif action == 'send': if options.wallet_id",
"getpass.getpass('passcode: ') bitgo.unlock(otp) print bitgo.send(options.wallet_id, passcode, args[1], float(args[2]) * 10**8) else: print \"invalid",
"\".bitgo\") config = load_config(config_filename) if action == 'access_token': username = raw_input('username: ') password",
"required for get_unspents\" sys.exit(1) print bitgo.get_unspents(options.wallet_id) elif action == 'send': if options.wallet_id is",
"and amount are required\" sys.exit(1) otp = raw_input('otp: ') passcode = getpass.getpass('passcode: ')",
"return json.load(open(filename, 'rb')) except ValueError: return {} else: return {} def update_config(filename, cnf):",
"options.access_token elif 'access_token' in config: access_token = config['access_token'] else: print \"the --access-token is",
"sys.exit(1) if len(args) != 3: print \"address and amount are required\" sys.exit(1) otp",
"is None: print \"option -w {{ wallet_id }} is required for get_balance\" sys.exit(1)",
"= expanduser(\"~\") parser = OptionParser() parser.add_option(\"-a\", \"--access-token\", dest=\"access_token\", help=\"access token\") parser.add_option(\"-w\", \"--wallet-id\", dest=\"wallet_id\",",
"expanduser home = expanduser(\"~\") parser = OptionParser() parser.add_option(\"-a\", \"--access-token\", dest=\"access_token\", help=\"access token\") parser.add_option(\"-w\",",
"args) = parser.parse_args() if len(args) == 0: print \"a command is required, available:",
"if len(args) != 3: print \"address and amount are required\" sys.exit(1) otp =",
"\"option -w {{ wallet_id }} is required for get_balance\" sys.exit(1) print bitgo.get_balance(options.wallet_id) /",
"action == 'send': if options.wallet_id is None: print \"option -w {{ wallet_id }}",
"config = load_config(config_filename) if action == 'access_token': username = raw_input('username: ') password =",
"') password = <PASSWORD>('password: ') otp = raw_input('otp: ') bitgo = BitGo() access_token",
"load_config(config_filename) if action == 'access_token': username = raw_input('username: ') password = <PASSWORD>('password: ')",
"print bitgo.send(options.wallet_id, passcode, args[1], float(args[2]) * 10**8) else: print \"invalid command\" if __name__",
"raw_input(\"store y/n? \") == \"y\": update_config(config_filename, {'access_token': access_token}) sys.exit(0) if options.access_token: access_token =",
"os.path import expanduser home = expanduser(\"~\") parser = OptionParser() parser.add_option(\"-a\", \"--access-token\", dest=\"access_token\", help=\"access",
"print \"address and amount are required\" sys.exit(1) otp = raw_input('otp: ') passcode =",
"OptionParser import sys from os.path import expanduser home = expanduser(\"~\") parser = OptionParser()",
"'get_unspents': if options.wallet_id is None: print \"option -w {{ wallet_id }} is required",
"raw_input('otp: ') bitgo = BitGo() access_token = bitgo.get_access_token(username, password, otp) print \"access_token: \",",
"= getpass.getpass('passcode: ') bitgo.unlock(otp) print bitgo.send(options.wallet_id, passcode, args[1], float(args[2]) * 10**8) else: print",
"dest=\"wallet_id\", help=\"wallet id\") (options, args) = parser.parse_args() if len(args) == 0: print \"a",
"bitgo = BitGo() access_token = bitgo.get_access_token(username, password, otp) print \"access_token: \", access_token if",
"required, available: access_token, get_wallets, get_balance, send\" sys.exit(1) action = args[0] config_filename = os.path.join(home,",
"os.path.join(home, \".bitgo\") config = load_config(config_filename) if action == 'access_token': username = raw_input('username: ')",
"dest=\"access_token\", help=\"access token\") parser.add_option(\"-w\", \"--wallet-id\", dest=\"wallet_id\", help=\"wallet id\") (options, args) = parser.parse_args() if",
"<PASSWORD>('password: ') otp = raw_input('otp: ') bitgo = BitGo() access_token = bitgo.get_access_token(username, password,",
"}} is required for get_wallet\" sys.exit(1) otp = getpass.getpass('otp: ') bitgo.unlock(otp) print bitgo.get_wallet(options.wallet_id)",
"if options.wallet_id is None: print \"option -w {{ wallet_id }} is required for",
"options.wallet_id is None: print \"option -w {{ wallet_id }} is required for send\"",
"sys.exit(1) bitgo = BitGo(access_token=access_token) if action == 'get_wallets': print bitgo.get_wallets() elif action ==",
"home = expanduser(\"~\") parser = OptionParser() parser.add_option(\"-a\", \"--access-token\", dest=\"access_token\", help=\"access token\") parser.add_option(\"-w\", \"--wallet-id\",",
"bitgo.unlock(otp) print bitgo.get_wallet(options.wallet_id) elif action == 'get_unspents': if options.wallet_id is None: print \"option",
"= OptionParser() parser.add_option(\"-a\", \"--access-token\", dest=\"access_token\", help=\"access token\") parser.add_option(\"-w\", \"--wallet-id\", dest=\"wallet_id\", help=\"wallet id\") (options,",
"is None: print \"option -w {{ wallet_id }} is required for get_wallet\" sys.exit(1)",
"for get_wallet\" sys.exit(1) otp = getpass.getpass('otp: ') bitgo.unlock(otp) print bitgo.get_wallet(options.wallet_id) elif action ==",
"os from .bitgo import BitGo def load_config(filename): if os.path.exists(filename): try: return json.load(open(filename, 'rb'))",
"(options, args) = parser.parse_args() if len(args) == 0: print \"a command is required,",
"from optparse import OptionParser import sys from os.path import expanduser home = expanduser(\"~\")",
"required parameter\" sys.exit(1) bitgo = BitGo(access_token=access_token) if action == 'get_wallets': print bitgo.get_wallets() elif",
"== 'get_wallets': print bitgo.get_wallets() elif action == 'get_balance': if options.wallet_id is None: print",
"access_token = config['access_token'] else: print \"the --access-token is a required parameter\" sys.exit(1) bitgo",
"= <PASSWORD>('password: ') otp = raw_input('otp: ') bitgo = BitGo() access_token = bitgo.get_access_token(username,",
"None: print \"option -w {{ wallet_id }} is required for get_unspents\" sys.exit(1) print",
"!= 3: print \"address and amount are required\" sys.exit(1) otp = raw_input('otp: ')",
"import expanduser home = expanduser(\"~\") parser = OptionParser() parser.add_option(\"-a\", \"--access-token\", dest=\"access_token\", help=\"access token\")",
"update_config(config_filename, {'access_token': access_token}) sys.exit(0) if options.access_token: access_token = options.access_token elif 'access_token' in config:",
"bitgo.get_wallet(options.wallet_id) elif action == 'get_unspents': if options.wallet_id is None: print \"option -w {{",
"= os.path.join(home, \".bitgo\") config = load_config(config_filename) if action == 'access_token': username = raw_input('username:",
"elif action == 'get_unspents': if options.wallet_id is None: print \"option -w {{ wallet_id",
"is None: print \"option -w {{ wallet_id }} is required for send\" sys.exit(1)",
"parser.add_option(\"-a\", \"--access-token\", dest=\"access_token\", help=\"access token\") parser.add_option(\"-w\", \"--wallet-id\", dest=\"wallet_id\", help=\"wallet id\") (options, args) =",
"is required, available: access_token, get_wallets, get_balance, send\" sys.exit(1) action = args[0] config_filename =",
"import os from .bitgo import BitGo def load_config(filename): if os.path.exists(filename): try: return json.load(open(filename,",
"if action == 'access_token': username = raw_input('username: ') password = <PASSWORD>('password: ') otp",
"password, otp) print \"access_token: \", access_token if raw_input(\"store y/n? \") == \"y\": update_config(config_filename,",
"'get_balance': if options.wallet_id is None: print \"option -w {{ wallet_id }} is required",
"import json import os from .bitgo import BitGo def load_config(filename): if os.path.exists(filename): try:",
"elif 'access_token' in config: access_token = config['access_token'] else: print \"the --access-token is a",
"') otp = raw_input('otp: ') bitgo = BitGo() access_token = bitgo.get_access_token(username, password, otp)",
"\", access_token if raw_input(\"store y/n? \") == \"y\": update_config(config_filename, {'access_token': access_token}) sys.exit(0) if",
"bitgo.get_balance(options.wallet_id) / float(10**8) elif action == 'get_wallet': if options.wallet_id is None: print \"option",
"action = args[0] config_filename = os.path.join(home, \".bitgo\") config = load_config(config_filename) if action ==",
"json.dump(config, open(filename, \"wb\")) def main(): import getpass from optparse import OptionParser import sys",
"if len(args) == 0: print \"a command is required, available: access_token, get_wallets, get_balance,",
"= load_config(config_filename) if action == 'access_token': username = raw_input('username: ') password = <PASSWORD>('password:",
"access_token = bitgo.get_access_token(username, password, otp) print \"access_token: \", access_token if raw_input(\"store y/n? \")",
"get_balance, send\" sys.exit(1) action = args[0] config_filename = os.path.join(home, \".bitgo\") config = load_config(config_filename)",
"options.wallet_id is None: print \"option -w {{ wallet_id }} is required for get_balance\"",
"is None: print \"option -w {{ wallet_id }} is required for get_unspents\" sys.exit(1)",
"sys.exit(0) if options.access_token: access_token = options.access_token elif 'access_token' in config: access_token = config['access_token']",
"required\" sys.exit(1) otp = raw_input('otp: ') passcode = getpass.getpass('passcode: ') bitgo.unlock(otp) print bitgo.send(options.wallet_id,",
"ValueError: return {} else: return {} def update_config(filename, cnf): config = load_config(filename) config.update(cnf)",
"print \"option -w {{ wallet_id }} is required for get_balance\" sys.exit(1) print bitgo.get_balance(options.wallet_id)",
"len(args) != 3: print \"address and amount are required\" sys.exit(1) otp = raw_input('otp:",
"is required for send\" sys.exit(1) if len(args) != 3: print \"address and amount",
"else: return {} def update_config(filename, cnf): config = load_config(filename) config.update(cnf) json.dump(config, open(filename, \"wb\"))",
"command is required, available: access_token, get_wallets, get_balance, send\" sys.exit(1) action = args[0] config_filename",
"print \"option -w {{ wallet_id }} is required for get_unspents\" sys.exit(1) print bitgo.get_unspents(options.wallet_id)",
"{{ wallet_id }} is required for get_unspents\" sys.exit(1) print bitgo.get_unspents(options.wallet_id) elif action ==",
"username = raw_input('username: ') password = <PASSWORD>('password: ') otp = raw_input('otp: ') bitgo",
"{{ wallet_id }} is required for send\" sys.exit(1) if len(args) != 3: print",
"getpass from optparse import OptionParser import sys from os.path import expanduser home =",
"args[0] config_filename = os.path.join(home, \".bitgo\") config = load_config(config_filename) if action == 'access_token': username",
"json import os from .bitgo import BitGo def load_config(filename): if os.path.exists(filename): try: return",
"import BitGo def load_config(filename): if os.path.exists(filename): try: return json.load(open(filename, 'rb')) except ValueError: return",
"-w {{ wallet_id }} is required for get_unspents\" sys.exit(1) print bitgo.get_unspents(options.wallet_id) elif action",
"bitgo.send(options.wallet_id, passcode, args[1], float(args[2]) * 10**8) else: print \"invalid command\" if __name__ ==",
"action == 'get_unspents': if options.wallet_id is None: print \"option -w {{ wallet_id }}",
"= args[0] config_filename = os.path.join(home, \".bitgo\") config = load_config(config_filename) if action == 'access_token':",
"if action == 'get_wallets': print bitgo.get_wallets() elif action == 'get_balance': if options.wallet_id is",
"get_wallets, get_balance, send\" sys.exit(1) action = args[0] config_filename = os.path.join(home, \".bitgo\") config =",
"None: print \"option -w {{ wallet_id }} is required for send\" sys.exit(1) if",
"\"wb\")) def main(): import getpass from optparse import OptionParser import sys from os.path",
"send\" sys.exit(1) action = args[0] config_filename = os.path.join(home, \".bitgo\") config = load_config(config_filename) if",
"print \"a command is required, available: access_token, get_wallets, get_balance, send\" sys.exit(1) action =",
"is required for get_balance\" sys.exit(1) print bitgo.get_balance(options.wallet_id) / float(10**8) elif action == 'get_wallet':",
"load_config(filename) config.update(cnf) json.dump(config, open(filename, \"wb\")) def main(): import getpass from optparse import OptionParser",
"'access_token': username = raw_input('username: ') password = <PASSWORD>('password: ') otp = raw_input('otp: ')",
"= options.access_token elif 'access_token' in config: access_token = config['access_token'] else: print \"the --access-token",
"OptionParser() parser.add_option(\"-a\", \"--access-token\", dest=\"access_token\", help=\"access token\") parser.add_option(\"-w\", \"--wallet-id\", dest=\"wallet_id\", help=\"wallet id\") (options, args)",
"== 'access_token': username = raw_input('username: ') password = <PASSWORD>('password: ') otp = raw_input('otp:",
"<filename>bitgo/cmd.py<gh_stars>1-10 __author__ = 'sserrano' import json import os from .bitgo import BitGo def",
"3: print \"address and amount are required\" sys.exit(1) otp = raw_input('otp: ') passcode",
"--access-token is a required parameter\" sys.exit(1) bitgo = BitGo(access_token=access_token) if action == 'get_wallets':",
"\"the --access-token is a required parameter\" sys.exit(1) bitgo = BitGo(access_token=access_token) if action ==",
"0: print \"a command is required, available: access_token, get_wallets, get_balance, send\" sys.exit(1) action"
] |
[
"import_input(path): with open(path, encoding='utf-8') as infile: return [int(line) for line in infile] instructions",
"class Jumper: def __init__(self, instructions): self.instructions = instructions def solve(self): steps = 0",
"import eq as isequal def import_input(path): with open(path, encoding='utf-8') as infile: return [int(line)",
"encoding='utf-8') as infile: return [int(line) for line in infile] instructions = import_input(\"input.txt\") class",
"[int(line) for line in infile] instructions = import_input(\"input.txt\") class Jumper: def __init__(self, instructions):",
"self.instructions[i] += 1 i += jump steps += 1 return steps jump =",
"while 0 <= i < len(self.instructions): jump = self.instructions[i] if self.instructions[i] >= 3:",
"+= 1 i += jump steps += 1 return steps jump = Jumper(instructions)",
"operator import eq as isequal def import_input(path): with open(path, encoding='utf-8') as infile: return",
"infile: return [int(line) for line in infile] instructions = import_input(\"input.txt\") class Jumper: def",
"i < len(self.instructions): jump = self.instructions[i] if self.instructions[i] >= 3: self.instructions[i] -= 1",
"def solve(self): steps = 0 i = 0 while 0 <= i <",
"1 else: self.instructions[i] += 1 i += jump steps += 1 return steps",
"= import_input(\"input.txt\") class Jumper: def __init__(self, instructions): self.instructions = instructions def solve(self): steps",
"#!/usr/bin/env python3 from operator import eq as isequal def import_input(path): with open(path, encoding='utf-8')",
"isequal def import_input(path): with open(path, encoding='utf-8') as infile: return [int(line) for line in",
"self.instructions[i] >= 3: self.instructions[i] -= 1 else: self.instructions[i] += 1 i += jump",
"eq as isequal def import_input(path): with open(path, encoding='utf-8') as infile: return [int(line) for",
"self.instructions = instructions def solve(self): steps = 0 i = 0 while 0",
">= 3: self.instructions[i] -= 1 else: self.instructions[i] += 1 i += jump steps",
"line in infile] instructions = import_input(\"input.txt\") class Jumper: def __init__(self, instructions): self.instructions =",
"instructions def solve(self): steps = 0 i = 0 while 0 <= i",
"instructions): self.instructions = instructions def solve(self): steps = 0 i = 0 while",
"-= 1 else: self.instructions[i] += 1 i += jump steps += 1 return",
"self.instructions[i] if self.instructions[i] >= 3: self.instructions[i] -= 1 else: self.instructions[i] += 1 i",
"jump = self.instructions[i] if self.instructions[i] >= 3: self.instructions[i] -= 1 else: self.instructions[i] +=",
"as infile: return [int(line) for line in infile] instructions = import_input(\"input.txt\") class Jumper:",
"as isequal def import_input(path): with open(path, encoding='utf-8') as infile: return [int(line) for line",
"= self.instructions[i] if self.instructions[i] >= 3: self.instructions[i] -= 1 else: self.instructions[i] += 1",
"instructions = import_input(\"input.txt\") class Jumper: def __init__(self, instructions): self.instructions = instructions def solve(self):",
"0 while 0 <= i < len(self.instructions): jump = self.instructions[i] if self.instructions[i] >=",
"= 0 while 0 <= i < len(self.instructions): jump = self.instructions[i] if self.instructions[i]",
"0 i = 0 while 0 <= i < len(self.instructions): jump = self.instructions[i]",
"0 <= i < len(self.instructions): jump = self.instructions[i] if self.instructions[i] >= 3: self.instructions[i]",
"else: self.instructions[i] += 1 i += jump steps += 1 return steps jump",
"3: self.instructions[i] -= 1 else: self.instructions[i] += 1 i += jump steps +=",
"= 0 i = 0 while 0 <= i < len(self.instructions): jump =",
"open(path, encoding='utf-8') as infile: return [int(line) for line in infile] instructions = import_input(\"input.txt\")",
"__init__(self, instructions): self.instructions = instructions def solve(self): steps = 0 i = 0",
"solve(self): steps = 0 i = 0 while 0 <= i < len(self.instructions):",
"<= i < len(self.instructions): jump = self.instructions[i] if self.instructions[i] >= 3: self.instructions[i] -=",
"import_input(\"input.txt\") class Jumper: def __init__(self, instructions): self.instructions = instructions def solve(self): steps =",
"for line in infile] instructions = import_input(\"input.txt\") class Jumper: def __init__(self, instructions): self.instructions",
"1 i += jump steps += 1 return steps jump = Jumper(instructions) print(jump.solve())",
"if self.instructions[i] >= 3: self.instructions[i] -= 1 else: self.instructions[i] += 1 i +=",
"self.instructions[i] -= 1 else: self.instructions[i] += 1 i += jump steps += 1",
"infile] instructions = import_input(\"input.txt\") class Jumper: def __init__(self, instructions): self.instructions = instructions def",
"i = 0 while 0 <= i < len(self.instructions): jump = self.instructions[i] if",
"from operator import eq as isequal def import_input(path): with open(path, encoding='utf-8') as infile:",
"steps = 0 i = 0 while 0 <= i < len(self.instructions): jump",
"def import_input(path): with open(path, encoding='utf-8') as infile: return [int(line) for line in infile]",
"= instructions def solve(self): steps = 0 i = 0 while 0 <=",
"def __init__(self, instructions): self.instructions = instructions def solve(self): steps = 0 i =",
"python3 from operator import eq as isequal def import_input(path): with open(path, encoding='utf-8') as",
"< len(self.instructions): jump = self.instructions[i] if self.instructions[i] >= 3: self.instructions[i] -= 1 else:",
"in infile] instructions = import_input(\"input.txt\") class Jumper: def __init__(self, instructions): self.instructions = instructions",
"Jumper: def __init__(self, instructions): self.instructions = instructions def solve(self): steps = 0 i",
"len(self.instructions): jump = self.instructions[i] if self.instructions[i] >= 3: self.instructions[i] -= 1 else: self.instructions[i]",
"with open(path, encoding='utf-8') as infile: return [int(line) for line in infile] instructions =",
"return [int(line) for line in infile] instructions = import_input(\"input.txt\") class Jumper: def __init__(self,"
] |
[
"usage: ```python3 for image in images: print(image['url'], image['filename']) ``` If images need to",
"self.digits_in_a_number(len(self.images)) ordinal = '{0:0%dd}' % total for index, image in enumerate(self.images, start=1): image['filename']",
"image in enumerate(self.images, start=1): image['filename'] = ''.join([ ordinal.format(index), '-', image['filename'] ]) def digits_in_a_number(self,",
"only if the domain is valid. \"\"\" if re.match('https?\\:\\/\\/(i\\.)?imgur\\.com\\/', url): if self.is_it_gifv(url): return",
"\"\"\" return self.url.endswith('?grid') def change_gallery(self): \"\"\" Change /gallery/ to /a/ in url. \"\"\"",
"return re.match(pattern, url).group(0) def is_it_gifv(self, url): \"\"\" Check if the supplied link points",
"Get the number of images from the images attribute. \"\"\" return len(self.images) def",
"print(e.reason) def build_image_url_list(self, filenames): \"\"\" Build list of direct links to images. Input",
"order when downloaded imgur.numerate_images() images = imgur.images ``` Note: For up to date",
"class and it obtains list of direct image urls that could be used",
"ends with an extension. \"\"\" if self.is_it_image(): if self.contains_extension(self.url): self.images.append( self.pack_image(self.url, self.get_image_filename(self.url)) )",
"url): \"\"\" Get image file name from its url. Examples: https://i.imgur.com/jedEzFL.jpg -> jedEzFL.jpg",
"'/a/') def turn_into_grid(self): \"\"\" Append ?grid to url. \"\"\" if self.is_it_album(): if not",
"total = self.digits_in_a_number(len(self.images)) ordinal = '{0:0%dd}' % total for index, image in enumerate(self.images,",
"return not self.is_it_album() def is_it_album(self): \"\"\" Check if the url points to an",
"in order they appear in an album, their filenames have to be numerated.",
"\"\"\" if '.gifv' in url: return True return False def is_it_image(self): \"\"\" Check",
"return None def get_image_filename(self, url): \"\"\" Get image file name from its url.",
"if supplied link is invalid. \"\"\" pass class ImgurFileFormats(object): \"\"\" Contains extensions for",
"filename in filenames: if filename not in clean: clean.append(filename) return clean def contains_extension(self,",
"if the supplied link points to .gifv page. \"\"\" if '.gifv' in url:",
"``` imgur.images is a deque of two keyed dictionaries. Example usage: ```python3 for",
"Example: http(s)://imgur.com/a/[album_hash]?grid \"\"\" return self.url.endswith('?grid') def change_gallery(self): \"\"\" Change /gallery/ to /a/ in",
"that could be used to download images. Example usage: ```python3 imgur = Imgur('http://imgur.com/gallery/vTTHZ')",
"numerate_images(self): \"\"\" Append ordinal number to image filename. \"\"\" total = self.digits_in_a_number(len(self.images)) ordinal",
"\"\"\" if re.match('https?\\:\\/\\/(i\\.)?imgur\\.com\\/', url): if self.is_it_gifv(url): return self.sanitize_gifv(url) return url raise ImgurException('Invalid link.')",
"return ('/a/' in self.url) or ('/gallery/' in self.url) def is_it_grid(self): \"\"\" Check if",
"/gallery/ to /a/ in url. \"\"\" return self.url.replace('/gallery/', '/a/') def turn_into_grid(self): \"\"\" Append",
"import HTTPError from urllib.error import URLError __version__ = 'v0.2' __status__ = 'Development' class",
"self.get_image_filename(url)) ) except HTTPError as e: print(e.status) except URLError as e: print(e.reason) def",
"the link already ends with an extension. \"\"\" if self.is_it_image(): if self.contains_extension(self.url): self.images.append(",
"\"\"\" Check if the image url contains extension. If there is an extension",
"class visit: https://github.com/petarGitNik/imgur-downloader \"\"\" import re from collections import deque from urllib.request import",
"Check if the supplied link points to .gifv page. \"\"\" if '.gifv' in",
"#WEBM = '.webm' #MP4 = '.mp4' @classmethod def formats(cls): \"\"\" Return a set",
"urls = self.build_image_url_list(filenames_clean) for url in urls: self.images.append( self.pack_image(url, self.get_image_filename(url)) ) except HTTPError",
"!= '__': value = getattr(ImgurFileFormats, attribute) if not callable(value): formats.add(value) return formats class",
"to .gifv page. \"\"\" if '.gifv' in url: return True return False def",
"True return False def is_it_image(self): \"\"\" Check if the url points to image.",
"def is_it_image(self): \"\"\" Check if the url points to image. Examples: http(s)://i.imgur.com/[image_hash].[extension] http(s)://i.imgur.com/[image_hash]",
"to image deque. \"\"\" pattern = '\\{\"hash\":\"([a-zA-Z0-9]+)\".*?\"ext\":\"([\\.a-zA-Z0-9\\?\\#]+)\".*?\\}' try: html = urlopen(url).read().decode('utf-8') filenames_with_duplicates =",
"of direct links to images. Input filenames list is a list of tuples",
"extension in filenames: urls.append(''.join(['https://i.imgur.com/', filename, extension])) return urls def remove_duplicates(self, filenames): \"\"\" Remove",
"enumerate(self.images, start=1): image['filename'] = ''.join([ ordinal.format(index), '-', image['filename'] ]) def digits_in_a_number(self, number): \"\"\"",
"-> jedEzFL.jpg \"\"\" candidate = url.split('/')[-1] extension = self.contains_extension(url) pattern = ''.join(['.+\\\\', extension])",
"exception if the link already ends with an extension. \"\"\" if self.is_it_image(): if",
"def parse_and_prepare_images(self, url): \"\"\" Obtain and parse html, and append image dictionaries to",
"https://i.imgur.com/jedEzFL.jpg?1 -> jedEzFL.jpg \"\"\" candidate = url.split('/')[-1] extension = self.contains_extension(url) pattern = ''.join(['.+\\\\',",
"\"\"\" This exception is raised if supplied link is invalid. \"\"\" pass class",
"ordinal.format(index), '-', image['filename'] ]) def digits_in_a_number(self, number): \"\"\" Return how many digits are",
"\"\"\" Remove duplicates from a list of tuples containing filenames with extensions. \"\"\"",
"callable(value): formats.add(value) return formats class Imgur(object): \"\"\" Imgur contains all necessary methods to",
"def contains_extension(self, url): \"\"\" Check if the image url contains extension. If there",
"it obtains list of direct image urls that could be used to download",
"or ('/gallery/' in self.url) def is_it_grid(self): \"\"\" Check if the url points to",
"https*\\:\\/\\/(i\\.)?imgur\\.com\\/[a-zA-Z0-9]*(\\.[a-zA-Z]{1,4})? return not self.is_it_album() def is_it_album(self): \"\"\" Check if the url points to",
"print(e.status) except URLError as e: print(e.reason) def build_image_url_list(self, filenames): \"\"\" Build list of",
"Examples: http(s)://i.imgur.com/[image_hash].[extension] http(s)://i.imgur.com/[image_hash] http(s)://imgur.com/[image_hash] \"\"\" # https*\\:\\/\\/(i\\.)?imgur\\.com\\/[a-zA-Z0-9]*(\\.[a-zA-Z]{1,4})? return not self.is_it_album() def is_it_album(self): \"\"\"",
"attribute in ImgurFileFormats.__dict__.keys(): if attribute[:2] != '__': value = getattr(ImgurFileFormats, attribute) if not",
"e.g. [('jedEzFL', '.jpg'), ('lciC5G8', '.jpg')]. The output looks like: ['https://i.imgur.com/jedEzFL.jpg', 'https://i.imgur.com/lciC5G8.jpg'] \"\"\" urls",
"in url. \"\"\" return self.url.replace('/gallery/', '/a/') def turn_into_grid(self): \"\"\" Append ?grid to url.",
"url points to an album. Examples: http(s)://imgur.com/a/[album_hash] http(s)://imgur.com/gallery/[album_hash] \"\"\" return ('/a/' in self.url)",
"for extension in ImgurFileFormats.formats(): if extension in url: return extension return None def",
"to date version of this class visit: https://github.com/petarGitNik/imgur-downloader \"\"\" import re from collections",
"of tuples e.g. [('jedEzFL', '.jpg'), ('lciC5G8', '.jpg')]. The output looks like: ['https://i.imgur.com/jedEzFL.jpg', 'https://i.imgur.com/lciC5G8.jpg']",
"``` Note: For up to date version of this class visit: https://github.com/petarGitNik/imgur-downloader \"\"\"",
"def numerate_images(self): \"\"\" Append ordinal number to image filename. \"\"\" total = self.digits_in_a_number(len(self.images))",
"set consisting of all class attributes. Class attributes must not be callable. \"\"\"",
"image. Examples: http(s)://i.imgur.com/[image_hash].[extension] http(s)://i.imgur.com/[image_hash] http(s)://imgur.com/[image_hash] \"\"\" # https*\\:\\/\\/(i\\.)?imgur\\.com\\/[a-zA-Z0-9]*(\\.[a-zA-Z]{1,4})? return not self.is_it_album() def is_it_album(self):",
"corresponding filename. \"\"\" return {'url' : url, 'filename' : filename} def number_of_images(self): \"\"\"",
"If not, raise ImgurException. This method checks only if the domain is valid.",
"append image dictionaries to image deque. \"\"\" pattern = '\\{\"hash\":\"([a-zA-Z0-9]+)\".*?\"ext\":\"([\\.a-zA-Z0-9\\?\\#]+)\".*?\\}' try: html =",
"-> jedEzFL.jpg https://i.imgur.com/jedEzFL.jpg?1 -> jedEzFL.jpg \"\"\" candidate = url.split('/')[-1] extension = self.contains_extension(url) pattern",
"raised if supplied link is invalid. \"\"\" pass class ImgurFileFormats(object): \"\"\" Contains extensions",
"from a list of tuples containing filenames with extensions. \"\"\" clean = []",
"that are allowed on imgur. Source: https://help.imgur.com/hc/en-us/articles/115000083326-What-files-can-I-upload- Archived: http://archive.is/89Uky https://web.archive.org/web/20170222111303/https://help.imgur.com/hc/en-us/articles/115000083326-What-files-can-I-upload- \"\"\" JPG =",
"('lciC5G8', '.jpg')]. The output looks like: ['https://i.imgur.com/jedEzFL.jpg', 'https://i.imgur.com/lciC5G8.jpg'] \"\"\" urls = [] for",
"Remove duplicates from a list of tuples containing filenames with extensions. \"\"\" clean",
"'.gif' APNG = '.apng' TIFF = '.tiff' PDF = '.pdf' XCF = '.xcf'",
"url).group(0) def is_it_gifv(self, url): \"\"\" Check if the supplied link points to .gifv",
"def number_of_images(self): \"\"\" Get the number of images from the images attribute. \"\"\"",
"must not be callable. \"\"\" formats = set() for attribute in ImgurFileFormats.__dict__.keys(): if",
"raise ImgurException('Invalid link.') def sanitize_gifv(self, url): \"\"\" Remove 'v' from .gifv \"\"\" pattern",
"they appear in an album, their filenames have to be numerated. Full examples:",
"#!/usr/bin/python3 \"\"\" This module contains classes for parsing the imgur.com site. It consists",
"view. Example: http(s)://imgur.com/a/[album_hash]?grid \"\"\" return self.url.endswith('?grid') def change_gallery(self): \"\"\" Change /gallery/ to /a/",
"remove_duplicates(self, filenames): \"\"\" Remove duplicates from a list of tuples containing filenames with",
"'https?\\:\\/\\/i\\.imgur\\.com\\/[a-zA-Z0-9]+\\.gif' return re.match(pattern, url).group(0) def is_it_gifv(self, url): \"\"\" Check if the supplied link",
"\"\"\" if self.is_it_image(): if self.contains_extension(self.url): self.images.append( self.pack_image(self.url, self.get_image_filename(self.url)) ) return else: self.parse_and_prepare_images(self.url) return",
"a deque of two keyed dictionaries. Example usage: ```python3 for image in images:",
"\"\"\" Append ?grid to url. \"\"\" if self.is_it_album(): if not self.is_it_grid(): return ''.join([self.change_gallery(),",
": filename} def number_of_images(self): \"\"\" Get the number of images from the images",
"def change_gallery(self): \"\"\" Change /gallery/ to /a/ in url. \"\"\" return self.url.replace('/gallery/', '/a/')",
"be used to download images. Example usage: ```python3 imgur = Imgur('http://imgur.com/gallery/vTTHZ') imgur.prepare_images() images",
"attribute) if not callable(value): formats.add(value) return formats class Imgur(object): \"\"\" Imgur contains all",
"list of direct links to images. Input filenames list is a list of",
"jedEzFL.jpg \"\"\" candidate = url.split('/')[-1] extension = self.contains_extension(url) pattern = ''.join(['.+\\\\', extension]) return",
"= '.mp4' @classmethod def formats(cls): \"\"\" Return a set consisting of all class",
"object. \"\"\" self.url = self.sanitize(url) self.images = deque() def sanitize(self, url): \"\"\" Check",
"dictionaries to image deque. \"\"\" pattern = '\\{\"hash\":\"([a-zA-Z0-9]+)\".*?\"ext\":\"([\\.a-zA-Z0-9\\?\\#]+)\".*?\\}' try: html = urlopen(url).read().decode('utf-8') filenames_with_duplicates",
"\"\"\" Build list of direct links to images. Input filenames list is a",
"Imgur Imgur is the main class and it obtains list of direct image",
"an extension. \"\"\" if self.is_it_image(): if self.contains_extension(self.url): self.images.append( self.pack_image(self.url, self.get_image_filename(self.url)) ) return else:",
"= urlopen(url).read().decode('utf-8') filenames_with_duplicates = re.findall(pattern, html) filenames_clean = self.remove_duplicates(filenames_with_duplicates) urls = self.build_image_url_list(filenames_clean) for",
"url. \"\"\" return self.url.replace('/gallery/', '/a/') def turn_into_grid(self): \"\"\" Append ?grid to url. \"\"\"",
"urls def remove_duplicates(self, filenames): \"\"\" Remove duplicates from a list of tuples containing",
"\"\"\" pass class ImgurFileFormats(object): \"\"\" Contains extensions for file formats that are allowed",
"points to .gifv page. \"\"\" if '.gifv' in url: return True return False",
"Examples: http(s)://imgur.com/a/[album_hash] http(s)://imgur.com/gallery/[album_hash] \"\"\" return ('/a/' in self.url) or ('/gallery/' in self.url) def",
"images: print(image['url'], image['filename']) ``` If images need to be downloaded in order they",
"be numerated. Full examples: ```python3 imgur = Imgur('http://imgur.com/gallery/vTTHZ') imgur.prepare_images() imgur.images # These are",
"\"\"\" self.url = self.sanitize(url) self.images = deque() def sanitize(self, url): \"\"\" Check if",
"url): \"\"\" Check if the image url contains extension. If there is an",
"urllib.error import URLError __version__ = 'v0.2' __status__ = 'Development' class ImgurException(Exception): \"\"\" This",
"= Imgur('http://imgur.com/gallery/vTTHZ') imgur.prepare_images() images = imgur.images ``` imgur.images is a deque of two",
"self.is_it_album() def is_it_album(self): \"\"\" Check if the url points to an album. Examples:",
"def is_it_gifv(self, url): \"\"\" Check if the supplied link points to .gifv page.",
"urls = [] for filename, extension in filenames: urls.append(''.join(['https://i.imgur.com/', filename, extension])) return urls",
"url. \"\"\" if self.is_it_album(): if not self.is_it_grid(): return ''.join([self.change_gallery(), '?grid']) else: return self.url",
"return len(self.images) def numerate_images(self): \"\"\" Append ordinal number to image filename. \"\"\" total",
"self.images = deque() def sanitize(self, url): \"\"\" Check if the supplied link is",
"= self.build_image_url_list(filenames_clean) for url in urls: self.images.append( self.pack_image(url, self.get_image_filename(url)) ) except HTTPError as",
"is invalid. \"\"\" pass class ImgurFileFormats(object): \"\"\" Contains extensions for file formats that",
"class Imgur(object): \"\"\" Imgur contains all necessary methods to extract image or album",
"Full examples: ```python3 imgur = Imgur('http://imgur.com/gallery/vTTHZ') imgur.prepare_images() imgur.images # These are not guaranteed",
"Imgur('http://imgur.com/gallery/vTTHZ') imgur.prepare_images() images = imgur.images ``` imgur.images is a deque of two keyed",
"in enumerate(self.images, start=1): image['filename'] = ''.join([ ordinal.format(index), '-', image['filename'] ]) def digits_in_a_number(self, number):",
"in images: print(image['url'], image['filename']) ``` If images need to be downloaded in order",
"[] for filename in filenames: if filename not in clean: clean.append(filename) return clean",
"need to be downloaded in order they appear in an album, their filenames",
"= 'v0.2' __status__ = 'Development' class ImgurException(Exception): \"\"\" This exception is raised if",
"= self.remove_duplicates(filenames_with_duplicates) urls = self.build_image_url_list(filenames_clean) for url in urls: self.images.append( self.pack_image(url, self.get_image_filename(url)) )",
"pattern = ''.join(['.+\\\\', extension]) return re.match(pattern, candidate).group(0) def pack_image(self, url, filename): \"\"\" Returns",
"image['filename'] = ''.join([ ordinal.format(index), '-', image['filename'] ]) def digits_in_a_number(self, number): \"\"\" Return how",
"\"\"\" Change /gallery/ to /a/ in url. \"\"\" return self.url.replace('/gallery/', '/a/') def turn_into_grid(self):",
"~ ImgurFileFormats ~ Imgur Imgur is the main class and it obtains list",
"return True return False def is_it_image(self): \"\"\" Check if the url points to",
"```python3 imgur = Imgur('http://imgur.com/gallery/vTTHZ') imgur.prepare_images() imgur.images # These are not guaranteed to appear",
"from collections import deque from urllib.request import urlopen from urllib.error import HTTPError from",
"~ Imgur Imgur is the main class and it obtains list of direct",
"total for index, image in enumerate(self.images, start=1): image['filename'] = ''.join([ ordinal.format(index), '-', image['filename']",
"for file formats that are allowed on imgur. Source: https://help.imgur.com/hc/en-us/articles/115000083326-What-files-can-I-upload- Archived: http://archive.is/89Uky https://web.archive.org/web/20170222111303/https://help.imgur.com/hc/en-us/articles/115000083326-What-files-can-I-upload-",
"'.jpeg' PNG = '.png' GIF = '.gif' APNG = '.apng' TIFF = '.tiff'",
"for url in urls: self.images.append( self.pack_image(url, self.get_image_filename(url)) ) except HTTPError as e: print(e.status)",
"get_image_filename(self, url): \"\"\" Get image file name from its url. Examples: https://i.imgur.com/jedEzFL.jpg ->",
"''.join(['.+\\\\', extension]) return re.match(pattern, candidate).group(0) def pack_image(self, url, filename): \"\"\" Returns a dictionary",
"Note: For up to date version of this class visit: https://github.com/petarGitNik/imgur-downloader \"\"\" import",
"is returned. Otherwise, None is returned. \"\"\" for extension in ImgurFileFormats.formats(): if extension",
"parse html, and append image dictionaries to image deque. \"\"\" pattern = '\\{\"hash\":\"([a-zA-Z0-9]+)\".*?\"ext\":\"([\\.a-zA-Z0-9\\?\\#]+)\".*?\\}'",
"Imgur is the main class and it obtains list of direct image urls",
"prepare_images(self): \"\"\" Parses HTML from the provided url to obtain link(s) to image(s).",
"like: ['https://i.imgur.com/jedEzFL.jpg', 'https://i.imgur.com/lciC5G8.jpg'] \"\"\" urls = [] for filename, extension in filenames: urls.append(''.join(['https://i.imgur.com/',",
"filename, extension in filenames: urls.append(''.join(['https://i.imgur.com/', filename, extension])) return urls def remove_duplicates(self, filenames): \"\"\"",
"[] for filename, extension in filenames: urls.append(''.join(['https://i.imgur.com/', filename, extension])) return urls def remove_duplicates(self,",
"downloaded imgur.numerate_images() images = imgur.images ``` Note: For up to date version of",
"Initiate Imgur object. \"\"\" self.url = self.sanitize(url) self.images = deque() def sanitize(self, url):",
"https://github.com/petarGitNik/imgur-downloader \"\"\" import re from collections import deque from urllib.request import urlopen from",
"url points to a grid view. Example: http(s)://imgur.com/a/[album_hash]?grid \"\"\" return self.url.endswith('?grid') def change_gallery(self):",
"attribute. \"\"\" return len(self.images) def numerate_images(self): \"\"\" Append ordinal number to image filename.",
"url and corresponding filename. \"\"\" return {'url' : url, 'filename' : filename} def",
"all necessary methods to extract image or album images from imgur link. \"\"\"",
"if re.match('https?\\:\\/\\/(i\\.)?imgur\\.com\\/', url): if self.is_it_gifv(url): return self.sanitize_gifv(url) return url raise ImgurException('Invalid link.') def",
"with an extension. \"\"\" if self.is_it_image(): if self.contains_extension(self.url): self.images.append( self.pack_image(self.url, self.get_image_filename(self.url)) ) return",
"date version of this class visit: https://github.com/petarGitNik/imgur-downloader \"\"\" import re from collections import",
"if self.is_it_album(): if not self.is_it_grid(): return ''.join([self.change_gallery(), '?grid']) else: return self.url raise ImgurException('Cannot",
"album. Examples: http(s)://imgur.com/a/[album_hash] http(s)://imgur.com/gallery/[album_hash] \"\"\" return ('/a/' in self.url) or ('/gallery/' in self.url)",
"ordinal number to image filename. \"\"\" total = self.digits_in_a_number(len(self.images)) ordinal = '{0:0%dd}' %",
"looks like: ['https://i.imgur.com/jedEzFL.jpg', 'https://i.imgur.com/lciC5G8.jpg'] \"\"\" urls = [] for filename, extension in filenames:",
"image urls that could be used to download images. Example usage: ```python3 imgur",
"filenames: urls.append(''.join(['https://i.imgur.com/', filename, extension])) return urls def remove_duplicates(self, filenames): \"\"\" Remove duplicates from",
"attributes. Class attributes must not be callable. \"\"\" formats = set() for attribute",
"This module contains classes for parsing the imgur.com site. It consists of three",
"in filenames: urls.append(''.join(['https://i.imgur.com/', filename, extension])) return urls def remove_duplicates(self, filenames): \"\"\" Remove duplicates",
"from the provided url to obtain link(s) to image(s). Raises exception if the",
"Imgur contains all necessary methods to extract image or album images from imgur",
"url. Examples: https://i.imgur.com/jedEzFL.jpg -> jedEzFL.jpg https://i.imgur.com/jedEzFL.jpg?1 -> jedEzFL.jpg \"\"\" candidate = url.split('/')[-1] extension",
"image dictionaries to image deque. \"\"\" pattern = '\\{\"hash\":\"([a-zA-Z0-9]+)\".*?\"ext\":\"([\\.a-zA-Z0-9\\?\\#]+)\".*?\\}' try: html = urlopen(url).read().decode('utf-8')",
"in an album, their filenames have to be numerated. Full examples: ```python3 imgur",
"from its url. Examples: https://i.imgur.com/jedEzFL.jpg -> jedEzFL.jpg https://i.imgur.com/jedEzFL.jpg?1 -> jedEzFL.jpg \"\"\" candidate =",
"imgur = Imgur('http://imgur.com/gallery/vTTHZ') imgur.prepare_images() imgur.images # These are not guaranteed to appear in",
"html = urlopen(url).read().decode('utf-8') filenames_with_duplicates = re.findall(pattern, html) filenames_clean = self.remove_duplicates(filenames_with_duplicates) urls = self.build_image_url_list(filenames_clean)",
"self.is_it_album(): if not self.is_it_grid(): return ''.join([self.change_gallery(), '?grid']) else: return self.url raise ImgurException('Cannot convert",
"clean def contains_extension(self, url): \"\"\" Check if the image url contains extension. If",
"ImgurException. This method checks only if the domain is valid. \"\"\" if re.match('https?\\:\\/\\/(i\\.)?imgur\\.com\\/',",
"to url. \"\"\" if self.is_it_album(): if not self.is_it_grid(): return ''.join([self.change_gallery(), '?grid']) else: return",
"\"\"\" Obtain and parse html, and append image dictionaries to image deque. \"\"\"",
"from urllib.error import URLError __version__ = 'v0.2' __status__ = 'Development' class ImgurException(Exception): \"\"\"",
"in url: return True return False def is_it_image(self): \"\"\" Check if the url",
"of tuples containing filenames with extensions. \"\"\" clean = [] for filename in",
"pack_image(self, url, filename): \"\"\" Returns a dictionary with image url and corresponding filename.",
"sanitize_gifv(self, url): \"\"\" Remove 'v' from .gifv \"\"\" pattern = 'https?\\:\\/\\/i\\.imgur\\.com\\/[a-zA-Z0-9]+\\.gif' return re.match(pattern,",
"Append ordinal number to image filename. \"\"\" total = self.digits_in_a_number(len(self.images)) ordinal = '{0:0%dd}'",
"def is_it_grid(self): \"\"\" Check if the url points to a grid view. Example:",
"return grid = self.turn_into_grid() self.parse_and_prepare_images(grid) return def parse_and_prepare_images(self, url): \"\"\" Obtain and parse",
"'\\{\"hash\":\"([a-zA-Z0-9]+)\".*?\"ext\":\"([\\.a-zA-Z0-9\\?\\#]+)\".*?\\}' try: html = urlopen(url).read().decode('utf-8') filenames_with_duplicates = re.findall(pattern, html) filenames_clean = self.remove_duplicates(filenames_with_duplicates) urls",
"consisting of all class attributes. Class attributes must not be callable. \"\"\" formats",
"used to download images. Example usage: ```python3 imgur = Imgur('http://imgur.com/gallery/vTTHZ') imgur.prepare_images() images =",
"filename): \"\"\" Returns a dictionary with image url and corresponding filename. \"\"\" return",
"contains classes for parsing the imgur.com site. It consists of three classes: ~",
"url.split('/')[-1] extension = self.contains_extension(url) pattern = ''.join(['.+\\\\', extension]) return re.match(pattern, candidate).group(0) def pack_image(self,",
"imgur.numerate_images() images = imgur.images ``` Note: For up to date version of this",
"invalid. \"\"\" pass class ImgurFileFormats(object): \"\"\" Contains extensions for file formats that are",
"return url raise ImgurException('Invalid link.') def sanitize_gifv(self, url): \"\"\" Remove 'v' from .gifv",
"allowed on imgur. Source: https://help.imgur.com/hc/en-us/articles/115000083326-What-files-can-I-upload- Archived: http://archive.is/89Uky https://web.archive.org/web/20170222111303/https://help.imgur.com/hc/en-us/articles/115000083326-What-files-can-I-upload- \"\"\" JPG = '.jpg' JPEG",
"self.url) def is_it_grid(self): \"\"\" Check if the url points to a grid view.",
"image into album grid.') def prepare_images(self): \"\"\" Parses HTML from the provided url",
"extension = self.contains_extension(url) pattern = ''.join(['.+\\\\', extension]) return re.match(pattern, candidate).group(0) def pack_image(self, url,",
"imgur link. \"\"\" def __init__(self, url): \"\"\" Initiate Imgur object. \"\"\" self.url =",
"tuples containing filenames with extensions. \"\"\" clean = [] for filename in filenames:",
"return def parse_and_prepare_images(self, url): \"\"\" Obtain and parse html, and append image dictionaries",
"if the url points to an album. Examples: http(s)://imgur.com/a/[album_hash] http(s)://imgur.com/gallery/[album_hash] \"\"\" return ('/a/'",
"http://archive.is/89Uky https://web.archive.org/web/20170222111303/https://help.imgur.com/hc/en-us/articles/115000083326-What-files-can-I-upload- \"\"\" JPG = '.jpg' JPEG = '.jpeg' PNG = '.png' GIF",
"clean = [] for filename in filenames: if filename not in clean: clean.append(filename)",
"```python3 imgur = Imgur('http://imgur.com/gallery/vTTHZ') imgur.prepare_images() images = imgur.images ``` imgur.images is a deque",
"url): \"\"\" Remove 'v' from .gifv \"\"\" pattern = 'https?\\:\\/\\/i\\.imgur\\.com\\/[a-zA-Z0-9]+\\.gif' return re.match(pattern, url).group(0)",
"@classmethod def formats(cls): \"\"\" Return a set consisting of all class attributes. Class",
"supplied link points to .gifv page. \"\"\" if '.gifv' in url: return True",
"digits_in_a_number(self, number): \"\"\" Return how many digits are there in a number. \"\"\"",
"download images. Example usage: ```python3 imgur = Imgur('http://imgur.com/gallery/vTTHZ') imgur.prepare_images() images = imgur.images ```",
"Example usage: ```python3 for image in images: print(image['url'], image['filename']) ``` If images need",
"filenames): \"\"\" Build list of direct links to images. Input filenames list is",
"images from imgur link. \"\"\" def __init__(self, url): \"\"\" Initiate Imgur object. \"\"\"",
"self.images.append( self.pack_image(self.url, self.get_image_filename(self.url)) ) return else: self.parse_and_prepare_images(self.url) return grid = self.turn_into_grid() self.parse_and_prepare_images(grid) return",
"for filename in filenames: if filename not in clean: clean.append(filename) return clean def",
"\"\"\" Check if the supplied link points to .gifv page. \"\"\" if '.gifv'",
"% total for index, image in enumerate(self.images, start=1): image['filename'] = ''.join([ ordinal.format(index), '-',",
"parsing the imgur.com site. It consists of three classes: ~ ImgurException ~ ImgurFileFormats",
"return ''.join([self.change_gallery(), '?grid']) else: return self.url raise ImgurException('Cannot convert single image into album",
"APNG = '.apng' TIFF = '.tiff' PDF = '.pdf' XCF = '.xcf' #WEBM",
"not, raise ImgurException. This method checks only if the domain is valid. \"\"\"",
"if the domain is valid. \"\"\" if re.match('https?\\:\\/\\/(i\\.)?imgur\\.com\\/', url): if self.is_it_gifv(url): return self.sanitize_gifv(url)",
"return extension return None def get_image_filename(self, url): \"\"\" Get image file name from",
"jedEzFL.jpg https://i.imgur.com/jedEzFL.jpg?1 -> jedEzFL.jpg \"\"\" candidate = url.split('/')[-1] extension = self.contains_extension(url) pattern =",
"version of this class visit: https://github.com/petarGitNik/imgur-downloader \"\"\" import re from collections import deque",
"image url contains extension. If there is an extension it is returned. Otherwise,",
"'.gifv' in url: return True return False def is_it_image(self): \"\"\" Check if the",
"turn_into_grid(self): \"\"\" Append ?grid to url. \"\"\" if self.is_it_album(): if not self.is_it_grid(): return",
"'__': value = getattr(ImgurFileFormats, attribute) if not callable(value): formats.add(value) return formats class Imgur(object):",
"there is an extension it is returned. Otherwise, None is returned. \"\"\" for",
"filename not in clean: clean.append(filename) return clean def contains_extension(self, url): \"\"\" Check if",
"if the image url contains extension. If there is an extension it is",
"method checks only if the domain is valid. \"\"\" if re.match('https?\\:\\/\\/(i\\.)?imgur\\.com\\/', url): if",
"Imgur('http://imgur.com/gallery/vTTHZ') imgur.prepare_images() imgur.images # These are not guaranteed to appear in order when",
"/a/ in url. \"\"\" return self.url.replace('/gallery/', '/a/') def turn_into_grid(self): \"\"\" Append ?grid to",
"from urllib.error import HTTPError from urllib.error import URLError __version__ = 'v0.2' __status__ =",
"from the images attribute. \"\"\" return len(self.images) def numerate_images(self): \"\"\" Append ordinal number",
"to images. Input filenames list is a list of tuples e.g. [('jedEzFL', '.jpg'),",
"re.findall(pattern, html) filenames_clean = self.remove_duplicates(filenames_with_duplicates) urls = self.build_image_url_list(filenames_clean) for url in urls: self.images.append(",
"for parsing the imgur.com site. It consists of three classes: ~ ImgurException ~",
"\"\"\" # https*\\:\\/\\/(i\\.)?imgur\\.com\\/[a-zA-Z0-9]*(\\.[a-zA-Z]{1,4})? return not self.is_it_album() def is_it_album(self): \"\"\" Check if the url",
"http(s)://imgur.com/a/[album_hash] http(s)://imgur.com/gallery/[album_hash] \"\"\" return ('/a/' in self.url) or ('/gallery/' in self.url) def is_it_grid(self):",
"except URLError as e: print(e.reason) def build_image_url_list(self, filenames): \"\"\" Build list of direct",
"duplicates from a list of tuples containing filenames with extensions. \"\"\" clean =",
"imgur.prepare_images() images = imgur.images ``` imgur.images is a deque of two keyed dictionaries.",
"dictionaries. Example usage: ```python3 for image in images: print(image['url'], image['filename']) ``` If images",
"'https://i.imgur.com/lciC5G8.jpg'] \"\"\" urls = [] for filename, extension in filenames: urls.append(''.join(['https://i.imgur.com/', filename, extension]))",
"\"\"\" return {'url' : url, 'filename' : filename} def number_of_images(self): \"\"\" Get the",
"have to be numerated. Full examples: ```python3 imgur = Imgur('http://imgur.com/gallery/vTTHZ') imgur.prepare_images() imgur.images #",
"\"\"\" return self.url.replace('/gallery/', '/a/') def turn_into_grid(self): \"\"\" Append ?grid to url. \"\"\" if",
"order they appear in an album, their filenames have to be numerated. Full",
"image['filename']) ``` If images need to be downloaded in order they appear in",
"re.match(pattern, url).group(0) def is_it_gifv(self, url): \"\"\" Check if the supplied link points to",
"image or album images from imgur link. \"\"\" def __init__(self, url): \"\"\" Initiate",
"could be used to download images. Example usage: ```python3 imgur = Imgur('http://imgur.com/gallery/vTTHZ') imgur.prepare_images()",
"link(s) to image(s). Raises exception if the link already ends with an extension.",
"return formats class Imgur(object): \"\"\" Imgur contains all necessary methods to extract image",
"contains all necessary methods to extract image or album images from imgur link.",
"def pack_image(self, url, filename): \"\"\" Returns a dictionary with image url and corresponding",
"points to an album. Examples: http(s)://imgur.com/a/[album_hash] http(s)://imgur.com/gallery/[album_hash] \"\"\" return ('/a/' in self.url) or",
"attributes must not be callable. \"\"\" formats = set() for attribute in ImgurFileFormats.__dict__.keys():",
"from .gifv \"\"\" pattern = 'https?\\:\\/\\/i\\.imgur\\.com\\/[a-zA-Z0-9]+\\.gif' return re.match(pattern, url).group(0) def is_it_gifv(self, url): \"\"\"",
"html) filenames_clean = self.remove_duplicates(filenames_with_duplicates) urls = self.build_image_url_list(filenames_clean) for url in urls: self.images.append( self.pack_image(url,",
"url points to image. Examples: http(s)://i.imgur.com/[image_hash].[extension] http(s)://i.imgur.com/[image_hash] http(s)://imgur.com/[image_hash] \"\"\" # https*\\:\\/\\/(i\\.)?imgur\\.com\\/[a-zA-Z0-9]*(\\.[a-zA-Z]{1,4})? return not",
"if the supplied link is valid. If not, raise ImgurException. This method checks",
"as e: print(e.status) except URLError as e: print(e.reason) def build_image_url_list(self, filenames): \"\"\" Build",
"its url. Examples: https://i.imgur.com/jedEzFL.jpg -> jedEzFL.jpg https://i.imgur.com/jedEzFL.jpg?1 -> jedEzFL.jpg \"\"\" candidate = url.split('/')[-1]",
"JPG = '.jpg' JPEG = '.jpeg' PNG = '.png' GIF = '.gif' APNG",
"grid view. Example: http(s)://imgur.com/a/[album_hash]?grid \"\"\" return self.url.endswith('?grid') def change_gallery(self): \"\"\" Change /gallery/ to",
"getattr(ImgurFileFormats, attribute) if not callable(value): formats.add(value) return formats class Imgur(object): \"\"\" Imgur contains",
"else: return self.url raise ImgurException('Cannot convert single image into album grid.') def prepare_images(self):",
"consists of three classes: ~ ImgurException ~ ImgurFileFormats ~ Imgur Imgur is the",
"number of images from the images attribute. \"\"\" return len(self.images) def numerate_images(self): \"\"\"",
"self.url) or ('/gallery/' in self.url) def is_it_grid(self): \"\"\" Check if the url points",
"ImgurFileFormats.__dict__.keys(): if attribute[:2] != '__': value = getattr(ImgurFileFormats, attribute) if not callable(value): formats.add(value)",
"number_of_images(self): \"\"\" Get the number of images from the images attribute. \"\"\" return",
"url): \"\"\" Check if the supplied link is valid. If not, raise ImgurException.",
"valid. \"\"\" if re.match('https?\\:\\/\\/(i\\.)?imgur\\.com\\/', url): if self.is_it_gifv(url): return self.sanitize_gifv(url) return url raise ImgurException('Invalid",
"formats class Imgur(object): \"\"\" Imgur contains all necessary methods to extract image or",
"deque from urllib.request import urlopen from urllib.error import HTTPError from urllib.error import URLError",
"URLError as e: print(e.reason) def build_image_url_list(self, filenames): \"\"\" Build list of direct links",
"'.jpg')]. The output looks like: ['https://i.imgur.com/jedEzFL.jpg', 'https://i.imgur.com/lciC5G8.jpg'] \"\"\" urls = [] for filename,",
"url: return True return False def is_it_image(self): \"\"\" Check if the url points",
"ImgurFileFormats(object): \"\"\" Contains extensions for file formats that are allowed on imgur. Source:",
"sanitize(self, url): \"\"\" Check if the supplied link is valid. If not, raise",
"ImgurFileFormats ~ Imgur Imgur is the main class and it obtains list of",
"self.pack_image(url, self.get_image_filename(url)) ) except HTTPError as e: print(e.status) except URLError as e: print(e.reason)",
"# These are not guaranteed to appear in order when downloaded imgur.numerate_images() images",
"HTTPError as e: print(e.status) except URLError as e: print(e.reason) def build_image_url_list(self, filenames): \"\"\"",
"not self.is_it_album() def is_it_album(self): \"\"\" Check if the url points to an album.",
"list of direct image urls that could be used to download images. Example",
"self.remove_duplicates(filenames_with_duplicates) urls = self.build_image_url_list(filenames_clean) for url in urls: self.images.append( self.pack_image(url, self.get_image_filename(url)) ) except",
"e: print(e.status) except URLError as e: print(e.reason) def build_image_url_list(self, filenames): \"\"\" Build list",
"\"\"\" pattern = 'https?\\:\\/\\/i\\.imgur\\.com\\/[a-zA-Z0-9]+\\.gif' return re.match(pattern, url).group(0) def is_it_gifv(self, url): \"\"\" Check if",
"to obtain link(s) to image(s). Raises exception if the link already ends with",
"pattern = 'https?\\:\\/\\/i\\.imgur\\.com\\/[a-zA-Z0-9]+\\.gif' return re.match(pattern, url).group(0) def is_it_gifv(self, url): \"\"\" Check if the",
"('/gallery/' in self.url) def is_it_grid(self): \"\"\" Check if the url points to a",
"ImgurException('Cannot convert single image into album grid.') def prepare_images(self): \"\"\" Parses HTML from",
"= '.gif' APNG = '.apng' TIFF = '.tiff' PDF = '.pdf' XCF =",
"album images from imgur link. \"\"\" def __init__(self, url): \"\"\" Initiate Imgur object.",
"Contains extensions for file formats that are allowed on imgur. Source: https://help.imgur.com/hc/en-us/articles/115000083326-What-files-can-I-upload- Archived:",
"\"\"\" This module contains classes for parsing the imgur.com site. It consists of",
"it is returned. Otherwise, None is returned. \"\"\" for extension in ImgurFileFormats.formats(): if",
"This method checks only if the domain is valid. \"\"\" if re.match('https?\\:\\/\\/(i\\.)?imgur\\.com\\/', url):",
"= '.xcf' #WEBM = '.webm' #MP4 = '.mp4' @classmethod def formats(cls): \"\"\" Return",
"extension in url: return extension return None def get_image_filename(self, url): \"\"\" Get image",
"is_it_album(self): \"\"\" Check if the url points to an album. Examples: http(s)://imgur.com/a/[album_hash] http(s)://imgur.com/gallery/[album_hash]",
"'.apng' TIFF = '.tiff' PDF = '.pdf' XCF = '.xcf' #WEBM = '.webm'",
"examples: ```python3 imgur = Imgur('http://imgur.com/gallery/vTTHZ') imgur.prepare_images() imgur.images # These are not guaranteed to",
"images = imgur.images ``` Note: For up to date version of this class",
"to be numerated. Full examples: ```python3 imgur = Imgur('http://imgur.com/gallery/vTTHZ') imgur.prepare_images() imgur.images # These",
"url: return extension return None def get_image_filename(self, url): \"\"\" Get image file name",
"\"\"\" return len(self.images) def numerate_images(self): \"\"\" Append ordinal number to image filename. \"\"\"",
"self.is_it_image(): if self.contains_extension(self.url): self.images.append( self.pack_image(self.url, self.get_image_filename(self.url)) ) return else: self.parse_and_prepare_images(self.url) return grid =",
"is_it_grid(self): \"\"\" Check if the url points to a grid view. Example: http(s)://imgur.com/a/[album_hash]?grid",
"of two keyed dictionaries. Example usage: ```python3 for image in images: print(image['url'], image['filename'])",
"\"\"\" Get the number of images from the images attribute. \"\"\" return len(self.images)",
"filenames_clean = self.remove_duplicates(filenames_with_duplicates) urls = self.build_image_url_list(filenames_clean) for url in urls: self.images.append( self.pack_image(url, self.get_image_filename(url))",
"is raised if supplied link is invalid. \"\"\" pass class ImgurFileFormats(object): \"\"\" Contains",
"'.pdf' XCF = '.xcf' #WEBM = '.webm' #MP4 = '.mp4' @classmethod def formats(cls):",
"self.images.append( self.pack_image(url, self.get_image_filename(url)) ) except HTTPError as e: print(e.status) except URLError as e:",
"self.url = self.sanitize(url) self.images = deque() def sanitize(self, url): \"\"\" Check if the",
"re.match('https?\\:\\/\\/(i\\.)?imgur\\.com\\/', url): if self.is_it_gifv(url): return self.sanitize_gifv(url) return url raise ImgurException('Invalid link.') def sanitize_gifv(self,",
"~ ImgurException ~ ImgurFileFormats ~ Imgur Imgur is the main class and it",
"Build list of direct links to images. Input filenames list is a list",
"filename, extension])) return urls def remove_duplicates(self, filenames): \"\"\" Remove duplicates from a list",
"not self.is_it_grid(): return ''.join([self.change_gallery(), '?grid']) else: return self.url raise ImgurException('Cannot convert single image",
"three classes: ~ ImgurException ~ ImgurFileFormats ~ Imgur Imgur is the main class",
"dictionary with image url and corresponding filename. \"\"\" return {'url' : url, 'filename'",
"to an album. Examples: http(s)://imgur.com/a/[album_hash] http(s)://imgur.com/gallery/[album_hash] \"\"\" return ('/a/' in self.url) or ('/gallery/'",
"= url.split('/')[-1] extension = self.contains_extension(url) pattern = ''.join(['.+\\\\', extension]) return re.match(pattern, candidate).group(0) def",
": url, 'filename' : filename} def number_of_images(self): \"\"\" Get the number of images",
"\"\"\" Return a set consisting of all class attributes. Class attributes must not",
"set() for attribute in ImgurFileFormats.__dict__.keys(): if attribute[:2] != '__': value = getattr(ImgurFileFormats, attribute)",
"urls: self.images.append( self.pack_image(url, self.get_image_filename(url)) ) except HTTPError as e: print(e.status) except URLError as",
"urls.append(''.join(['https://i.imgur.com/', filename, extension])) return urls def remove_duplicates(self, filenames): \"\"\" Remove duplicates from a",
"import URLError __version__ = 'v0.2' __status__ = 'Development' class ImgurException(Exception): \"\"\" This exception",
"image deque. \"\"\" pattern = '\\{\"hash\":\"([a-zA-Z0-9]+)\".*?\"ext\":\"([\\.a-zA-Z0-9\\?\\#]+)\".*?\\}' try: html = urlopen(url).read().decode('utf-8') filenames_with_duplicates = re.findall(pattern,",
"filenames with extensions. \"\"\" clean = [] for filename in filenames: if filename",
"Get image file name from its url. Examples: https://i.imgur.com/jedEzFL.jpg -> jedEzFL.jpg https://i.imgur.com/jedEzFL.jpg?1 ->",
"in self.url) def is_it_grid(self): \"\"\" Check if the url points to a grid",
"the supplied link points to .gifv page. \"\"\" if '.gifv' in url: return",
"if not callable(value): formats.add(value) return formats class Imgur(object): \"\"\" Imgur contains all necessary",
"raise ImgurException('Cannot convert single image into album grid.') def prepare_images(self): \"\"\" Parses HTML",
"= deque() def sanitize(self, url): \"\"\" Check if the supplied link is valid.",
"be downloaded in order they appear in an album, their filenames have to",
"if '.gifv' in url: return True return False def is_it_image(self): \"\"\" Check if",
"self.url raise ImgurException('Cannot convert single image into album grid.') def prepare_images(self): \"\"\" Parses",
"link.') def sanitize_gifv(self, url): \"\"\" Remove 'v' from .gifv \"\"\" pattern = 'https?\\:\\/\\/i\\.imgur\\.com\\/[a-zA-Z0-9]+\\.gif'",
"https://i.imgur.com/jedEzFL.jpg -> jedEzFL.jpg https://i.imgur.com/jedEzFL.jpg?1 -> jedEzFL.jpg \"\"\" candidate = url.split('/')[-1] extension = self.contains_extension(url)",
"necessary methods to extract image or album images from imgur link. \"\"\" def",
"album, their filenames have to be numerated. Full examples: ```python3 imgur = Imgur('http://imgur.com/gallery/vTTHZ')",
"http(s)://imgur.com/a/[album_hash]?grid \"\"\" return self.url.endswith('?grid') def change_gallery(self): \"\"\" Change /gallery/ to /a/ in url.",
"\"\"\" Check if the url points to an album. Examples: http(s)://imgur.com/a/[album_hash] http(s)://imgur.com/gallery/[album_hash] \"\"\"",
"extensions. \"\"\" clean = [] for filename in filenames: if filename not in",
"and parse html, and append image dictionaries to image deque. \"\"\" pattern =",
"\"\"\" if self.is_it_album(): if not self.is_it_grid(): return ''.join([self.change_gallery(), '?grid']) else: return self.url raise",
"link is valid. If not, raise ImgurException. This method checks only if the",
"deque. \"\"\" pattern = '\\{\"hash\":\"([a-zA-Z0-9]+)\".*?\"ext\":\"([\\.a-zA-Z0-9\\?\\#]+)\".*?\\}' try: html = urlopen(url).read().decode('utf-8') filenames_with_duplicates = re.findall(pattern, html)",
"supplied link is invalid. \"\"\" pass class ImgurFileFormats(object): \"\"\" Contains extensions for file",
"is valid. If not, raise ImgurException. This method checks only if the domain",
"grid = self.turn_into_grid() self.parse_and_prepare_images(grid) return def parse_and_prepare_images(self, url): \"\"\" Obtain and parse html,",
"imgur.images # These are not guaranteed to appear in order when downloaded imgur.numerate_images()",
"of this class visit: https://github.com/petarGitNik/imgur-downloader \"\"\" import re from collections import deque from",
"urllib.error import HTTPError from urllib.error import URLError __version__ = 'v0.2' __status__ = 'Development'",
"url to obtain link(s) to image(s). Raises exception if the link already ends",
"to image filename. \"\"\" total = self.digits_in_a_number(len(self.images)) ordinal = '{0:0%dd}' % total for",
"is a deque of two keyed dictionaries. Example usage: ```python3 for image in",
"\"\"\" Check if the url points to image. Examples: http(s)://i.imgur.com/[image_hash].[extension] http(s)://i.imgur.com/[image_hash] http(s)://imgur.com/[image_hash] \"\"\"",
"def is_it_album(self): \"\"\" Check if the url points to an album. Examples: http(s)://imgur.com/a/[album_hash]",
"\"\"\" Return how many digits are there in a number. \"\"\" return len(str(number))",
"list is a list of tuples e.g. [('jedEzFL', '.jpg'), ('lciC5G8', '.jpg')]. The output",
"formats = set() for attribute in ImgurFileFormats.__dict__.keys(): if attribute[:2] != '__': value =",
"#MP4 = '.mp4' @classmethod def formats(cls): \"\"\" Return a set consisting of all",
"self.is_it_gifv(url): return self.sanitize_gifv(url) return url raise ImgurException('Invalid link.') def sanitize_gifv(self, url): \"\"\" Remove",
"?grid to url. \"\"\" if self.is_it_album(): if not self.is_it_grid(): return ''.join([self.change_gallery(), '?grid']) else:",
"def prepare_images(self): \"\"\" Parses HTML from the provided url to obtain link(s) to",
"is returned. \"\"\" for extension in ImgurFileFormats.formats(): if extension in url: return extension",
"return re.match(pattern, candidate).group(0) def pack_image(self, url, filename): \"\"\" Returns a dictionary with image",
"the images attribute. \"\"\" return len(self.images) def numerate_images(self): \"\"\" Append ordinal number to",
"attribute[:2] != '__': value = getattr(ImgurFileFormats, attribute) if not callable(value): formats.add(value) return formats",
"HTTPError from urllib.error import URLError __version__ = 'v0.2' __status__ = 'Development' class ImgurException(Exception):",
"if self.is_it_image(): if self.contains_extension(self.url): self.images.append( self.pack_image(self.url, self.get_image_filename(self.url)) ) return else: self.parse_and_prepare_images(self.url) return grid",
"__version__ = 'v0.2' __status__ = 'Development' class ImgurException(Exception): \"\"\" This exception is raised",
"If images need to be downloaded in order they appear in an album,",
"up to date version of this class visit: https://github.com/petarGitNik/imgur-downloader \"\"\" import re from",
"url): \"\"\" Initiate Imgur object. \"\"\" self.url = self.sanitize(url) self.images = deque() def",
"callable. \"\"\" formats = set() for attribute in ImgurFileFormats.__dict__.keys(): if attribute[:2] != '__':",
"= self.contains_extension(url) pattern = ''.join(['.+\\\\', extension]) return re.match(pattern, candidate).group(0) def pack_image(self, url, filename):",
"extract image or album images from imgur link. \"\"\" def __init__(self, url): \"\"\"",
"ImgurException('Invalid link.') def sanitize_gifv(self, url): \"\"\" Remove 'v' from .gifv \"\"\" pattern =",
"in clean: clean.append(filename) return clean def contains_extension(self, url): \"\"\" Check if the image",
"extension it is returned. Otherwise, None is returned. \"\"\" for extension in ImgurFileFormats.formats():",
"returned. \"\"\" for extension in ImgurFileFormats.formats(): if extension in url: return extension return",
"re from collections import deque from urllib.request import urlopen from urllib.error import HTTPError",
"provided url to obtain link(s) to image(s). Raises exception if the link already",
"url, 'filename' : filename} def number_of_images(self): \"\"\" Get the number of images from",
"Append ?grid to url. \"\"\" if self.is_it_album(): if not self.is_it_grid(): return ''.join([self.change_gallery(), '?grid'])",
"if not self.is_it_grid(): return ''.join([self.change_gallery(), '?grid']) else: return self.url raise ImgurException('Cannot convert single",
"is valid. \"\"\" if re.match('https?\\:\\/\\/(i\\.)?imgur\\.com\\/', url): if self.is_it_gifv(url): return self.sanitize_gifv(url) return url raise",
"link is invalid. \"\"\" pass class ImgurFileFormats(object): \"\"\" Contains extensions for file formats",
"filenames list is a list of tuples e.g. [('jedEzFL', '.jpg'), ('lciC5G8', '.jpg')]. The",
"Source: https://help.imgur.com/hc/en-us/articles/115000083326-What-files-can-I-upload- Archived: http://archive.is/89Uky https://web.archive.org/web/20170222111303/https://help.imgur.com/hc/en-us/articles/115000083326-What-files-can-I-upload- \"\"\" JPG = '.jpg' JPEG = '.jpeg' PNG",
"def remove_duplicates(self, filenames): \"\"\" Remove duplicates from a list of tuples containing filenames",
"deque of two keyed dictionaries. Example usage: ```python3 for image in images: print(image['url'],",
"name from its url. Examples: https://i.imgur.com/jedEzFL.jpg -> jedEzFL.jpg https://i.imgur.com/jedEzFL.jpg?1 -> jedEzFL.jpg \"\"\" candidate",
"collections import deque from urllib.request import urlopen from urllib.error import HTTPError from urllib.error",
"return self.sanitize_gifv(url) return url raise ImgurException('Invalid link.') def sanitize_gifv(self, url): \"\"\" Remove 'v'",
"single image into album grid.') def prepare_images(self): \"\"\" Parses HTML from the provided",
"self.parse_and_prepare_images(self.url) return grid = self.turn_into_grid() self.parse_and_prepare_images(grid) return def parse_and_prepare_images(self, url): \"\"\" Obtain and",
"\"\"\" Check if the supplied link is valid. If not, raise ImgurException. This",
"be callable. \"\"\" formats = set() for attribute in ImgurFileFormats.__dict__.keys(): if attribute[:2] !=",
"link already ends with an extension. \"\"\" if self.is_it_image(): if self.contains_extension(self.url): self.images.append( self.pack_image(self.url,",
"= imgur.images ``` imgur.images is a deque of two keyed dictionaries. Example usage:",
"__status__ = 'Development' class ImgurException(Exception): \"\"\" This exception is raised if supplied link",
"for image in images: print(image['url'], image['filename']) ``` If images need to be downloaded",
"import urlopen from urllib.error import HTTPError from urllib.error import URLError __version__ = 'v0.2'",
"URLError __version__ = 'v0.2' __status__ = 'Development' class ImgurException(Exception): \"\"\" This exception is",
"from urllib.request import urlopen from urllib.error import HTTPError from urllib.error import URLError __version__",
"extension. \"\"\" if self.is_it_image(): if self.contains_extension(self.url): self.images.append( self.pack_image(self.url, self.get_image_filename(self.url)) ) return else: self.parse_and_prepare_images(self.url)",
"= ''.join(['.+\\\\', extension]) return re.match(pattern, candidate).group(0) def pack_image(self, url, filename): \"\"\" Returns a",
"on imgur. Source: https://help.imgur.com/hc/en-us/articles/115000083326-What-files-can-I-upload- Archived: http://archive.is/89Uky https://web.archive.org/web/20170222111303/https://help.imgur.com/hc/en-us/articles/115000083326-What-files-can-I-upload- \"\"\" JPG = '.jpg' JPEG =",
"def formats(cls): \"\"\" Return a set consisting of all class attributes. Class attributes",
"in self.url) or ('/gallery/' in self.url) def is_it_grid(self): \"\"\" Check if the url",
"= [] for filename, extension in filenames: urls.append(''.join(['https://i.imgur.com/', filename, extension])) return urls def",
"valid. If not, raise ImgurException. This method checks only if the domain is",
"from imgur link. \"\"\" def __init__(self, url): \"\"\" Initiate Imgur object. \"\"\" self.url",
"an album. Examples: http(s)://imgur.com/a/[album_hash] http(s)://imgur.com/gallery/[album_hash] \"\"\" return ('/a/' in self.url) or ('/gallery/' in",
"of direct image urls that could be used to download images. Example usage:",
"self.build_image_url_list(filenames_clean) for url in urls: self.images.append( self.pack_image(url, self.get_image_filename(url)) ) except HTTPError as e:",
"to a grid view. Example: http(s)://imgur.com/a/[album_hash]?grid \"\"\" return self.url.endswith('?grid') def change_gallery(self): \"\"\" Change",
"if the link already ends with an extension. \"\"\" if self.is_it_image(): if self.contains_extension(self.url):",
"\"\"\" return ('/a/' in self.url) or ('/gallery/' in self.url) def is_it_grid(self): \"\"\" Check",
"value = getattr(ImgurFileFormats, attribute) if not callable(value): formats.add(value) return formats class Imgur(object): \"\"\"",
"url): \"\"\" Check if the supplied link points to .gifv page. \"\"\" if",
"'.mp4' @classmethod def formats(cls): \"\"\" Return a set consisting of all class attributes.",
"containing filenames with extensions. \"\"\" clean = [] for filename in filenames: if",
"the url points to a grid view. Example: http(s)://imgur.com/a/[album_hash]?grid \"\"\" return self.url.endswith('?grid') def",
") return else: self.parse_and_prepare_images(self.url) return grid = self.turn_into_grid() self.parse_and_prepare_images(grid) return def parse_and_prepare_images(self, url):",
"= [] for filename in filenames: if filename not in clean: clean.append(filename) return",
"not callable(value): formats.add(value) return formats class Imgur(object): \"\"\" Imgur contains all necessary methods",
"images. Example usage: ```python3 imgur = Imgur('http://imgur.com/gallery/vTTHZ') imgur.prepare_images() images = imgur.images ``` imgur.images",
"images need to be downloaded in order they appear in an album, their",
"len(self.images) def numerate_images(self): \"\"\" Append ordinal number to image filename. \"\"\" total =",
"downloaded in order they appear in an album, their filenames have to be",
"ImgurException(Exception): \"\"\" This exception is raised if supplied link is invalid. \"\"\" pass",
"contains extension. If there is an extension it is returned. Otherwise, None is",
"urls that could be used to download images. Example usage: ```python3 imgur =",
"supplied link is valid. If not, raise ImgurException. This method checks only if",
"('/a/' in self.url) or ('/gallery/' in self.url) def is_it_grid(self): \"\"\" Check if the",
"album grid.') def prepare_images(self): \"\"\" Parses HTML from the provided url to obtain",
"\"\"\" Initiate Imgur object. \"\"\" self.url = self.sanitize(url) self.images = deque() def sanitize(self,",
"'.webm' #MP4 = '.mp4' @classmethod def formats(cls): \"\"\" Return a set consisting of",
"filenames): \"\"\" Remove duplicates from a list of tuples containing filenames with extensions.",
"url contains extension. If there is an extension it is returned. Otherwise, None",
"the imgur.com site. It consists of three classes: ~ ImgurException ~ ImgurFileFormats ~",
"a list of tuples containing filenames with extensions. \"\"\" clean = [] for",
"\"\"\" Imgur contains all necessary methods to extract image or album images from",
"image in images: print(image['url'], image['filename']) ``` If images need to be downloaded in",
"self.sanitize_gifv(url) return url raise ImgurException('Invalid link.') def sanitize_gifv(self, url): \"\"\" Remove 'v' from",
"when downloaded imgur.numerate_images() images = imgur.images ``` Note: For up to date version",
"page. \"\"\" if '.gifv' in url: return True return False def is_it_image(self): \"\"\"",
"are allowed on imgur. Source: https://help.imgur.com/hc/en-us/articles/115000083326-What-files-can-I-upload- Archived: http://archive.is/89Uky https://web.archive.org/web/20170222111303/https://help.imgur.com/hc/en-us/articles/115000083326-What-files-can-I-upload- \"\"\" JPG = '.jpg'",
"\"\"\" Get image file name from its url. Examples: https://i.imgur.com/jedEzFL.jpg -> jedEzFL.jpg https://i.imgur.com/jedEzFL.jpg?1",
"\"\"\" def __init__(self, url): \"\"\" Initiate Imgur object. \"\"\" self.url = self.sanitize(url) self.images",
"\"\"\" clean = [] for filename in filenames: if filename not in clean:",
"list of tuples containing filenames with extensions. \"\"\" clean = [] for filename",
"http(s)://i.imgur.com/[image_hash] http(s)://imgur.com/[image_hash] \"\"\" # https*\\:\\/\\/(i\\.)?imgur\\.com\\/[a-zA-Z0-9]*(\\.[a-zA-Z]{1,4})? return not self.is_it_album() def is_it_album(self): \"\"\" Check if",
"''.join([ ordinal.format(index), '-', image['filename'] ]) def digits_in_a_number(self, number): \"\"\" Return how many digits",
"e: print(e.reason) def build_image_url_list(self, filenames): \"\"\" Build list of direct links to images.",
"extensions for file formats that are allowed on imgur. Source: https://help.imgur.com/hc/en-us/articles/115000083326-What-files-can-I-upload- Archived: http://archive.is/89Uky",
"try: html = urlopen(url).read().decode('utf-8') filenames_with_duplicates = re.findall(pattern, html) filenames_clean = self.remove_duplicates(filenames_with_duplicates) urls =",
"\"\"\" Check if the url points to a grid view. Example: http(s)://imgur.com/a/[album_hash]?grid \"\"\"",
"= self.sanitize(url) self.images = deque() def sanitize(self, url): \"\"\" Check if the supplied",
"of three classes: ~ ImgurException ~ ImgurFileFormats ~ Imgur Imgur is the main",
"= imgur.images ``` Note: For up to date version of this class visit:",
"filenames_with_duplicates = re.findall(pattern, html) filenames_clean = self.remove_duplicates(filenames_with_duplicates) urls = self.build_image_url_list(filenames_clean) for url in",
"import deque from urllib.request import urlopen from urllib.error import HTTPError from urllib.error import",
"urlopen from urllib.error import HTTPError from urllib.error import URLError __version__ = 'v0.2' __status__",
"Raises exception if the link already ends with an extension. \"\"\" if self.is_it_image():",
"PNG = '.png' GIF = '.gif' APNG = '.apng' TIFF = '.tiff' PDF",
"def build_image_url_list(self, filenames): \"\"\" Build list of direct links to images. Input filenames",
"the main class and it obtains list of direct image urls that could",
"\"\"\" urls = [] for filename, extension in filenames: urls.append(''.join(['https://i.imgur.com/', filename, extension])) return",
"candidate).group(0) def pack_image(self, url, filename): \"\"\" Returns a dictionary with image url and",
"a set consisting of all class attributes. Class attributes must not be callable.",
"Check if the url points to an album. Examples: http(s)://imgur.com/a/[album_hash] http(s)://imgur.com/gallery/[album_hash] \"\"\" return",
"Class attributes must not be callable. \"\"\" formats = set() for attribute in",
"else: self.parse_and_prepare_images(self.url) return grid = self.turn_into_grid() self.parse_and_prepare_images(grid) return def parse_and_prepare_images(self, url): \"\"\" Obtain",
"except HTTPError as e: print(e.status) except URLError as e: print(e.reason) def build_image_url_list(self, filenames):",
"self.turn_into_grid() self.parse_and_prepare_images(grid) return def parse_and_prepare_images(self, url): \"\"\" Obtain and parse html, and append",
"= ''.join([ ordinal.format(index), '-', image['filename'] ]) def digits_in_a_number(self, number): \"\"\" Return how many",
"link points to .gifv page. \"\"\" if '.gifv' in url: return True return",
"PDF = '.pdf' XCF = '.xcf' #WEBM = '.webm' #MP4 = '.mp4' @classmethod",
"# https*\\:\\/\\/(i\\.)?imgur\\.com\\/[a-zA-Z0-9]*(\\.[a-zA-Z]{1,4})? return not self.is_it_album() def is_it_album(self): \"\"\" Check if the url points",
"and corresponding filename. \"\"\" return {'url' : url, 'filename' : filename} def number_of_images(self):",
"= self.turn_into_grid() self.parse_and_prepare_images(grid) return def parse_and_prepare_images(self, url): \"\"\" Obtain and parse html, and",
"is an extension it is returned. Otherwise, None is returned. \"\"\" for extension",
"imgur.images ``` Note: For up to date version of this class visit: https://github.com/petarGitNik/imgur-downloader",
"not be callable. \"\"\" formats = set() for attribute in ImgurFileFormats.__dict__.keys(): if attribute[:2]",
"HTML from the provided url to obtain link(s) to image(s). Raises exception if",
"\"\"\" total = self.digits_in_a_number(len(self.images)) ordinal = '{0:0%dd}' % total for index, image in",
"list of tuples e.g. [('jedEzFL', '.jpg'), ('lciC5G8', '.jpg')]. The output looks like: ['https://i.imgur.com/jedEzFL.jpg',",
"extension. If there is an extension it is returned. Otherwise, None is returned.",
"image(s). Raises exception if the link already ends with an extension. \"\"\" if",
"all class attributes. Class attributes must not be callable. \"\"\" formats = set()",
"= '{0:0%dd}' % total for index, image in enumerate(self.images, start=1): image['filename'] = ''.join([",
"filenames have to be numerated. Full examples: ```python3 imgur = Imgur('http://imgur.com/gallery/vTTHZ') imgur.prepare_images() imgur.images",
"the image url contains extension. If there is an extension it is returned.",
"Input filenames list is a list of tuples e.g. [('jedEzFL', '.jpg'), ('lciC5G8', '.jpg')].",
"The output looks like: ['https://i.imgur.com/jedEzFL.jpg', 'https://i.imgur.com/lciC5G8.jpg'] \"\"\" urls = [] for filename, extension",
"the number of images from the images attribute. \"\"\" return len(self.images) def numerate_images(self):",
"class attributes. Class attributes must not be callable. \"\"\" formats = set() for",
"def sanitize_gifv(self, url): \"\"\" Remove 'v' from .gifv \"\"\" pattern = 'https?\\:\\/\\/i\\.imgur\\.com\\/[a-zA-Z0-9]+\\.gif' return",
"= '.tiff' PDF = '.pdf' XCF = '.xcf' #WEBM = '.webm' #MP4 =",
"imgur.prepare_images() imgur.images # These are not guaranteed to appear in order when downloaded",
"These are not guaranteed to appear in order when downloaded imgur.numerate_images() images =",
"= getattr(ImgurFileFormats, attribute) if not callable(value): formats.add(value) return formats class Imgur(object): \"\"\" Imgur",
"'.tiff' PDF = '.pdf' XCF = '.xcf' #WEBM = '.webm' #MP4 = '.mp4'",
"as e: print(e.reason) def build_image_url_list(self, filenames): \"\"\" Build list of direct links to",
"images from the images attribute. \"\"\" return len(self.images) def numerate_images(self): \"\"\" Append ordinal",
"= '.jpg' JPEG = '.jpeg' PNG = '.png' GIF = '.gif' APNG =",
"Archived: http://archive.is/89Uky https://web.archive.org/web/20170222111303/https://help.imgur.com/hc/en-us/articles/115000083326-What-files-can-I-upload- \"\"\" JPG = '.jpg' JPEG = '.jpeg' PNG = '.png'",
"start=1): image['filename'] = ''.join([ ordinal.format(index), '-', image['filename'] ]) def digits_in_a_number(self, number): \"\"\" Return",
"extension])) return urls def remove_duplicates(self, filenames): \"\"\" Remove duplicates from a list of",
"with extensions. \"\"\" clean = [] for filename in filenames: if filename not",
"the provided url to obtain link(s) to image(s). Raises exception if the link",
"exception is raised if supplied link is invalid. \"\"\" pass class ImgurFileFormats(object): \"\"\"",
"is a list of tuples e.g. [('jedEzFL', '.jpg'), ('lciC5G8', '.jpg')]. The output looks",
"image url and corresponding filename. \"\"\" return {'url' : url, 'filename' : filename}",
"Check if the url points to a grid view. Example: http(s)://imgur.com/a/[album_hash]?grid \"\"\" return",
"clean: clean.append(filename) return clean def contains_extension(self, url): \"\"\" Check if the image url",
"candidate = url.split('/')[-1] extension = self.contains_extension(url) pattern = ''.join(['.+\\\\', extension]) return re.match(pattern, candidate).group(0)",
"re.match(pattern, candidate).group(0) def pack_image(self, url, filename): \"\"\" Returns a dictionary with image url",
"to image(s). Raises exception if the link already ends with an extension. \"\"\"",
"\"\"\" Remove 'v' from .gifv \"\"\" pattern = 'https?\\:\\/\\/i\\.imgur\\.com\\/[a-zA-Z0-9]+\\.gif' return re.match(pattern, url).group(0) def",
"to /a/ in url. \"\"\" return self.url.replace('/gallery/', '/a/') def turn_into_grid(self): \"\"\" Append ?grid",
"'Development' class ImgurException(Exception): \"\"\" This exception is raised if supplied link is invalid.",
"is the main class and it obtains list of direct image urls that",
"extension return None def get_image_filename(self, url): \"\"\" Get image file name from its",
"obtains list of direct image urls that could be used to download images.",
"an album, their filenames have to be numerated. Full examples: ```python3 imgur =",
"the supplied link is valid. If not, raise ImgurException. This method checks only",
"It consists of three classes: ~ ImgurException ~ ImgurFileFormats ~ Imgur Imgur is",
"guaranteed to appear in order when downloaded imgur.numerate_images() images = imgur.images ``` Note:",
"return self.url.replace('/gallery/', '/a/') def turn_into_grid(self): \"\"\" Append ?grid to url. \"\"\" if self.is_it_album():",
"url raise ImgurException('Invalid link.') def sanitize_gifv(self, url): \"\"\" Remove 'v' from .gifv \"\"\"",
"imgur = Imgur('http://imgur.com/gallery/vTTHZ') imgur.prepare_images() images = imgur.images ``` imgur.images is a deque of",
"for filename, extension in filenames: urls.append(''.join(['https://i.imgur.com/', filename, extension])) return urls def remove_duplicates(self, filenames):",
"extension]) return re.match(pattern, candidate).group(0) def pack_image(self, url, filename): \"\"\" Returns a dictionary with",
"urlopen(url).read().decode('utf-8') filenames_with_duplicates = re.findall(pattern, html) filenames_clean = self.remove_duplicates(filenames_with_duplicates) urls = self.build_image_url_list(filenames_clean) for url",
"= 'https?\\:\\/\\/i\\.imgur\\.com\\/[a-zA-Z0-9]+\\.gif' return re.match(pattern, url).group(0) def is_it_gifv(self, url): \"\"\" Check if the supplied",
"Remove 'v' from .gifv \"\"\" pattern = 'https?\\:\\/\\/i\\.imgur\\.com\\/[a-zA-Z0-9]+\\.gif' return re.match(pattern, url).group(0) def is_it_gifv(self,",
"classes for parsing the imgur.com site. It consists of three classes: ~ ImgurException",
"\"\"\" for extension in ImgurFileFormats.formats(): if extension in url: return extension return None",
"image file name from its url. Examples: https://i.imgur.com/jedEzFL.jpg -> jedEzFL.jpg https://i.imgur.com/jedEzFL.jpg?1 -> jedEzFL.jpg",
"html, and append image dictionaries to image deque. \"\"\" pattern = '\\{\"hash\":\"([a-zA-Z0-9]+)\".*?\"ext\":\"([\\.a-zA-Z0-9\\?\\#]+)\".*?\\}' try:",
") except HTTPError as e: print(e.status) except URLError as e: print(e.reason) def build_image_url_list(self,",
"in ImgurFileFormats.formats(): if extension in url: return extension return None def get_image_filename(self, url):",
"images. Input filenames list is a list of tuples e.g. [('jedEzFL', '.jpg'), ('lciC5G8',",
"image filename. \"\"\" total = self.digits_in_a_number(len(self.images)) ordinal = '{0:0%dd}' % total for index,",
"number to image filename. \"\"\" total = self.digits_in_a_number(len(self.images)) ordinal = '{0:0%dd}' % total",
"['https://i.imgur.com/jedEzFL.jpg', 'https://i.imgur.com/lciC5G8.jpg'] \"\"\" urls = [] for filename, extension in filenames: urls.append(''.join(['https://i.imgur.com/', filename,",
"pattern = '\\{\"hash\":\"([a-zA-Z0-9]+)\".*?\"ext\":\"([\\.a-zA-Z0-9\\?\\#]+)\".*?\\}' try: html = urlopen(url).read().decode('utf-8') filenames_with_duplicates = re.findall(pattern, html) filenames_clean =",
"Example usage: ```python3 imgur = Imgur('http://imgur.com/gallery/vTTHZ') imgur.prepare_images() images = imgur.images ``` imgur.images is",
"def turn_into_grid(self): \"\"\" Append ?grid to url. \"\"\" if self.is_it_album(): if not self.is_it_grid():",
"filename} def number_of_images(self): \"\"\" Get the number of images from the images attribute.",
"image['filename'] ]) def digits_in_a_number(self, number): \"\"\" Return how many digits are there in",
"[('jedEzFL', '.jpg'), ('lciC5G8', '.jpg')]. The output looks like: ['https://i.imgur.com/jedEzFL.jpg', 'https://i.imgur.com/lciC5G8.jpg'] \"\"\" urls =",
"GIF = '.gif' APNG = '.apng' TIFF = '.tiff' PDF = '.pdf' XCF",
"main class and it obtains list of direct image urls that could be",
"are not guaranteed to appear in order when downloaded imgur.numerate_images() images = imgur.images",
"and append image dictionaries to image deque. \"\"\" pattern = '\\{\"hash\":\"([a-zA-Z0-9]+)\".*?\"ext\":\"([\\.a-zA-Z0-9\\?\\#]+)\".*?\\}' try: html",
"file formats that are allowed on imgur. Source: https://help.imgur.com/hc/en-us/articles/115000083326-What-files-can-I-upload- Archived: http://archive.is/89Uky https://web.archive.org/web/20170222111303/https://help.imgur.com/hc/en-us/articles/115000083326-What-files-can-I-upload- \"\"\"",
"output looks like: ['https://i.imgur.com/jedEzFL.jpg', 'https://i.imgur.com/lciC5G8.jpg'] \"\"\" urls = [] for filename, extension in",
"an extension it is returned. Otherwise, None is returned. \"\"\" for extension in",
"ImgurFileFormats.formats(): if extension in url: return extension return None def get_image_filename(self, url): \"\"\"",
"self.is_it_grid(): return ''.join([self.change_gallery(), '?grid']) else: return self.url raise ImgurException('Cannot convert single image into",
"= re.findall(pattern, html) filenames_clean = self.remove_duplicates(filenames_with_duplicates) urls = self.build_image_url_list(filenames_clean) for url in urls:",
"return urls def remove_duplicates(self, filenames): \"\"\" Remove duplicates from a list of tuples",
"tuples e.g. [('jedEzFL', '.jpg'), ('lciC5G8', '.jpg')]. The output looks like: ['https://i.imgur.com/jedEzFL.jpg', 'https://i.imgur.com/lciC5G8.jpg'] \"\"\"",
"Examples: https://i.imgur.com/jedEzFL.jpg -> jedEzFL.jpg https://i.imgur.com/jedEzFL.jpg?1 -> jedEzFL.jpg \"\"\" candidate = url.split('/')[-1] extension =",
"print(image['url'], image['filename']) ``` If images need to be downloaded in order they appear",
"http(s)://i.imgur.com/[image_hash].[extension] http(s)://i.imgur.com/[image_hash] http(s)://imgur.com/[image_hash] \"\"\" # https*\\:\\/\\/(i\\.)?imgur\\.com\\/[a-zA-Z0-9]*(\\.[a-zA-Z]{1,4})? return not self.is_it_album() def is_it_album(self): \"\"\" Check",
"self.url.endswith('?grid') def change_gallery(self): \"\"\" Change /gallery/ to /a/ in url. \"\"\" return self.url.replace('/gallery/',",
"None is returned. \"\"\" for extension in ImgurFileFormats.formats(): if extension in url: return",
"return else: self.parse_and_prepare_images(self.url) return grid = self.turn_into_grid() self.parse_and_prepare_images(grid) return def parse_and_prepare_images(self, url): \"\"\"",
"None def get_image_filename(self, url): \"\"\" Get image file name from its url. Examples:",
"to download images. Example usage: ```python3 imgur = Imgur('http://imgur.com/gallery/vTTHZ') imgur.prepare_images() images = imgur.images",
"their filenames have to be numerated. Full examples: ```python3 imgur = Imgur('http://imgur.com/gallery/vTTHZ') imgur.prepare_images()",
"http(s)://imgur.com/gallery/[album_hash] \"\"\" return ('/a/' in self.url) or ('/gallery/' in self.url) def is_it_grid(self): \"\"\"",
"into album grid.') def prepare_images(self): \"\"\" Parses HTML from the provided url to",
"links to images. Input filenames list is a list of tuples e.g. [('jedEzFL',",
"Check if the supplied link is valid. If not, raise ImgurException. This method",
"is_it_image(self): \"\"\" Check if the url points to image. Examples: http(s)://i.imgur.com/[image_hash].[extension] http(s)://i.imgur.com/[image_hash] http(s)://imgur.com/[image_hash]",
"self.contains_extension(self.url): self.images.append( self.pack_image(self.url, self.get_image_filename(self.url)) ) return else: self.parse_and_prepare_images(self.url) return grid = self.turn_into_grid() self.parse_and_prepare_images(grid)",
"\"\"\" candidate = url.split('/')[-1] extension = self.contains_extension(url) pattern = ''.join(['.+\\\\', extension]) return re.match(pattern,",
"of images from the images attribute. \"\"\" return len(self.images) def numerate_images(self): \"\"\" Append",
"if the url points to image. Examples: http(s)://i.imgur.com/[image_hash].[extension] http(s)://i.imgur.com/[image_hash] http(s)://imgur.com/[image_hash] \"\"\" # https*\\:\\/\\/(i\\.)?imgur\\.com\\/[a-zA-Z0-9]*(\\.[a-zA-Z]{1,4})?",
"link. \"\"\" def __init__(self, url): \"\"\" Initiate Imgur object. \"\"\" self.url = self.sanitize(url)",
"Imgur object. \"\"\" self.url = self.sanitize(url) self.images = deque() def sanitize(self, url): \"\"\"",
"Check if the image url contains extension. If there is an extension it",
"in order when downloaded imgur.numerate_images() images = imgur.images ``` Note: For up to",
"grid.') def prepare_images(self): \"\"\" Parses HTML from the provided url to obtain link(s)",
"index, image in enumerate(self.images, start=1): image['filename'] = ''.join([ ordinal.format(index), '-', image['filename'] ]) def",
"\"\"\" Contains extensions for file formats that are allowed on imgur. Source: https://help.imgur.com/hc/en-us/articles/115000083326-What-files-can-I-upload-",
"https://web.archive.org/web/20170222111303/https://help.imgur.com/hc/en-us/articles/115000083326-What-files-can-I-upload- \"\"\" JPG = '.jpg' JPEG = '.jpeg' PNG = '.png' GIF =",
"= set() for attribute in ImgurFileFormats.__dict__.keys(): if attribute[:2] != '__': value = getattr(ImgurFileFormats,",
"clean.append(filename) return clean def contains_extension(self, url): \"\"\" Check if the image url contains",
"classes: ~ ImgurException ~ ImgurFileFormats ~ Imgur Imgur is the main class and",
"filename. \"\"\" total = self.digits_in_a_number(len(self.images)) ordinal = '{0:0%dd}' % total for index, image",
"]) def digits_in_a_number(self, number): \"\"\" Return how many digits are there in a",
"is_it_gifv(self, url): \"\"\" Check if the supplied link points to .gifv page. \"\"\"",
"'-', image['filename'] ]) def digits_in_a_number(self, number): \"\"\" Return how many digits are there",
"domain is valid. \"\"\" if re.match('https?\\:\\/\\/(i\\.)?imgur\\.com\\/', url): if self.is_it_gifv(url): return self.sanitize_gifv(url) return url",
"with image url and corresponding filename. \"\"\" return {'url' : url, 'filename' :",
"direct links to images. Input filenames list is a list of tuples e.g.",
"self.parse_and_prepare_images(grid) return def parse_and_prepare_images(self, url): \"\"\" Obtain and parse html, and append image",
"imgur.images ``` imgur.images is a deque of two keyed dictionaries. Example usage: ```python3",
"def sanitize(self, url): \"\"\" Check if the supplied link is valid. If not,",
".gifv page. \"\"\" if '.gifv' in url: return True return False def is_it_image(self):",
"For up to date version of this class visit: https://github.com/petarGitNik/imgur-downloader \"\"\" import re",
"\"\"\" JPG = '.jpg' JPEG = '.jpeg' PNG = '.png' GIF = '.gif'",
"convert single image into album grid.') def prepare_images(self): \"\"\" Parses HTML from the",
"ImgurException ~ ImgurFileFormats ~ Imgur Imgur is the main class and it obtains",
"= self.digits_in_a_number(len(self.images)) ordinal = '{0:0%dd}' % total for index, image in enumerate(self.images, start=1):",
"\"\"\" Returns a dictionary with image url and corresponding filename. \"\"\" return {'url'",
"= '\\{\"hash\":\"([a-zA-Z0-9]+)\".*?\"ext\":\"([\\.a-zA-Z0-9\\?\\#]+)\".*?\\}' try: html = urlopen(url).read().decode('utf-8') filenames_with_duplicates = re.findall(pattern, html) filenames_clean = self.remove_duplicates(filenames_with_duplicates)",
"filename. \"\"\" return {'url' : url, 'filename' : filename} def number_of_images(self): \"\"\" Get",
"\"\"\" import re from collections import deque from urllib.request import urlopen from urllib.error",
"url): if self.is_it_gifv(url): return self.sanitize_gifv(url) return url raise ImgurException('Invalid link.') def sanitize_gifv(self, url):",
"file name from its url. Examples: https://i.imgur.com/jedEzFL.jpg -> jedEzFL.jpg https://i.imgur.com/jedEzFL.jpg?1 -> jedEzFL.jpg \"\"\"",
"Returns a dictionary with image url and corresponding filename. \"\"\" return {'url' :",
"\"\"\" formats = set() for attribute in ImgurFileFormats.__dict__.keys(): if attribute[:2] != '__': value",
"points to image. Examples: http(s)://i.imgur.com/[image_hash].[extension] http(s)://i.imgur.com/[image_hash] http(s)://imgur.com/[image_hash] \"\"\" # https*\\:\\/\\/(i\\.)?imgur\\.com\\/[a-zA-Z0-9]*(\\.[a-zA-Z]{1,4})? return not self.is_it_album()",
"'{0:0%dd}' % total for index, image in enumerate(self.images, start=1): image['filename'] = ''.join([ ordinal.format(index),",
"``` If images need to be downloaded in order they appear in an",
"self.get_image_filename(self.url)) ) return else: self.parse_and_prepare_images(self.url) return grid = self.turn_into_grid() self.parse_and_prepare_images(grid) return def parse_and_prepare_images(self,",
"ordinal = '{0:0%dd}' % total for index, image in enumerate(self.images, start=1): image['filename'] =",
"= 'Development' class ImgurException(Exception): \"\"\" This exception is raised if supplied link is",
"False def is_it_image(self): \"\"\" Check if the url points to image. Examples: http(s)://i.imgur.com/[image_hash].[extension]",
"methods to extract image or album images from imgur link. \"\"\" def __init__(self,",
"'.png' GIF = '.gif' APNG = '.apng' TIFF = '.tiff' PDF = '.pdf'",
"This exception is raised if supplied link is invalid. \"\"\" pass class ImgurFileFormats(object):",
"Change /gallery/ to /a/ in url. \"\"\" return self.url.replace('/gallery/', '/a/') def turn_into_grid(self): \"\"\"",
"the domain is valid. \"\"\" if re.match('https?\\:\\/\\/(i\\.)?imgur\\.com\\/', url): if self.is_it_gifv(url): return self.sanitize_gifv(url) return",
"in urls: self.images.append( self.pack_image(url, self.get_image_filename(url)) ) except HTTPError as e: print(e.status) except URLError",
"def get_image_filename(self, url): \"\"\" Get image file name from its url. Examples: https://i.imgur.com/jedEzFL.jpg",
"direct image urls that could be used to download images. Example usage: ```python3",
"= '.jpeg' PNG = '.png' GIF = '.gif' APNG = '.apng' TIFF =",
"url, filename): \"\"\" Returns a dictionary with image url and corresponding filename. \"\"\"",
"to image. Examples: http(s)://i.imgur.com/[image_hash].[extension] http(s)://i.imgur.com/[image_hash] http(s)://imgur.com/[image_hash] \"\"\" # https*\\:\\/\\/(i\\.)?imgur\\.com\\/[a-zA-Z0-9]*(\\.[a-zA-Z]{1,4})? return not self.is_it_album() def",
"in filenames: if filename not in clean: clean.append(filename) return clean def contains_extension(self, url):",
"build_image_url_list(self, filenames): \"\"\" Build list of direct links to images. Input filenames list",
"If there is an extension it is returned. Otherwise, None is returned. \"\"\"",
"url): \"\"\" Obtain and parse html, and append image dictionaries to image deque.",
"https://help.imgur.com/hc/en-us/articles/115000083326-What-files-can-I-upload- Archived: http://archive.is/89Uky https://web.archive.org/web/20170222111303/https://help.imgur.com/hc/en-us/articles/115000083326-What-files-can-I-upload- \"\"\" JPG = '.jpg' JPEG = '.jpeg' PNG =",
"XCF = '.xcf' #WEBM = '.webm' #MP4 = '.mp4' @classmethod def formats(cls): \"\"\"",
"to extract image or album images from imgur link. \"\"\" def __init__(self, url):",
"formats.add(value) return formats class Imgur(object): \"\"\" Imgur contains all necessary methods to extract",
"if the url points to a grid view. Example: http(s)://imgur.com/a/[album_hash]?grid \"\"\" return self.url.endswith('?grid')",
"imgur.com site. It consists of three classes: ~ ImgurException ~ ImgurFileFormats ~ Imgur",
"numerated. Full examples: ```python3 imgur = Imgur('http://imgur.com/gallery/vTTHZ') imgur.prepare_images() imgur.images # These are not",
"formats that are allowed on imgur. Source: https://help.imgur.com/hc/en-us/articles/115000083326-What-files-can-I-upload- Archived: http://archive.is/89Uky https://web.archive.org/web/20170222111303/https://help.imgur.com/hc/en-us/articles/115000083326-What-files-can-I-upload- \"\"\" JPG",
"self.contains_extension(url) pattern = ''.join(['.+\\\\', extension]) return re.match(pattern, candidate).group(0) def pack_image(self, url, filename): \"\"\"",
"'filename' : filename} def number_of_images(self): \"\"\" Get the number of images from the",
"change_gallery(self): \"\"\" Change /gallery/ to /a/ in url. \"\"\" return self.url.replace('/gallery/', '/a/') def",
"return clean def contains_extension(self, url): \"\"\" Check if the image url contains extension.",
"obtain link(s) to image(s). Raises exception if the link already ends with an",
"points to a grid view. Example: http(s)://imgur.com/a/[album_hash]?grid \"\"\" return self.url.endswith('?grid') def change_gallery(self): \"\"\"",
"contains_extension(self, url): \"\"\" Check if the image url contains extension. If there is",
"def __init__(self, url): \"\"\" Initiate Imgur object. \"\"\" self.url = self.sanitize(url) self.images =",
"class ImgurException(Exception): \"\"\" This exception is raised if supplied link is invalid. \"\"\"",
"\"\"\" Parses HTML from the provided url to obtain link(s) to image(s). Raises",
"for attribute in ImgurFileFormats.__dict__.keys(): if attribute[:2] != '__': value = getattr(ImgurFileFormats, attribute) if",
"'.jpg' JPEG = '.jpeg' PNG = '.png' GIF = '.gif' APNG = '.apng'",
"to be downloaded in order they appear in an album, their filenames have",
"return False def is_it_image(self): \"\"\" Check if the url points to image. Examples:",
"import re from collections import deque from urllib.request import urlopen from urllib.error import",
"and it obtains list of direct image urls that could be used to",
"not guaranteed to appear in order when downloaded imgur.numerate_images() images = imgur.images ```",
"site. It consists of three classes: ~ ImgurException ~ ImgurFileFormats ~ Imgur Imgur",
"if filename not in clean: clean.append(filename) return clean def contains_extension(self, url): \"\"\" Check",
"'.xcf' #WEBM = '.webm' #MP4 = '.mp4' @classmethod def formats(cls): \"\"\" Return a",
"def digits_in_a_number(self, number): \"\"\" Return how many digits are there in a number.",
"urllib.request import urlopen from urllib.error import HTTPError from urllib.error import URLError __version__ =",
"if extension in url: return extension return None def get_image_filename(self, url): \"\"\" Get",
"class ImgurFileFormats(object): \"\"\" Contains extensions for file formats that are allowed on imgur.",
"= '.webm' #MP4 = '.mp4' @classmethod def formats(cls): \"\"\" Return a set consisting",
"self.pack_image(self.url, self.get_image_filename(self.url)) ) return else: self.parse_and_prepare_images(self.url) return grid = self.turn_into_grid() self.parse_and_prepare_images(grid) return def",
"extension in ImgurFileFormats.formats(): if extension in url: return extension return None def get_image_filename(self,",
"in ImgurFileFormats.__dict__.keys(): if attribute[:2] != '__': value = getattr(ImgurFileFormats, attribute) if not callable(value):",
"deque() def sanitize(self, url): \"\"\" Check if the supplied link is valid. If",
"checks only if the domain is valid. \"\"\" if re.match('https?\\:\\/\\/(i\\.)?imgur\\.com\\/', url): if self.is_it_gifv(url):",
"Check if the url points to image. Examples: http(s)://i.imgur.com/[image_hash].[extension] http(s)://i.imgur.com/[image_hash] http(s)://imgur.com/[image_hash] \"\"\" #",
"visit: https://github.com/petarGitNik/imgur-downloader \"\"\" import re from collections import deque from urllib.request import urlopen",
"a grid view. Example: http(s)://imgur.com/a/[album_hash]?grid \"\"\" return self.url.endswith('?grid') def change_gallery(self): \"\"\" Change /gallery/",
"Obtain and parse html, and append image dictionaries to image deque. \"\"\" pattern",
"\"\"\" pattern = '\\{\"hash\":\"([a-zA-Z0-9]+)\".*?\"ext\":\"([\\.a-zA-Z0-9\\?\\#]+)\".*?\\}' try: html = urlopen(url).read().decode('utf-8') filenames_with_duplicates = re.findall(pattern, html) filenames_clean",
"= '.png' GIF = '.gif' APNG = '.apng' TIFF = '.tiff' PDF =",
"return self.url.endswith('?grid') def change_gallery(self): \"\"\" Change /gallery/ to /a/ in url. \"\"\" return",
"Parses HTML from the provided url to obtain link(s) to image(s). Raises exception",
"url in urls: self.images.append( self.pack_image(url, self.get_image_filename(url)) ) except HTTPError as e: print(e.status) except",
"TIFF = '.tiff' PDF = '.pdf' XCF = '.xcf' #WEBM = '.webm' #MP4",
"module contains classes for parsing the imgur.com site. It consists of three classes:",
".gifv \"\"\" pattern = 'https?\\:\\/\\/i\\.imgur\\.com\\/[a-zA-Z0-9]+\\.gif' return re.match(pattern, url).group(0) def is_it_gifv(self, url): \"\"\" Check",
"= '.pdf' XCF = '.xcf' #WEBM = '.webm' #MP4 = '.mp4' @classmethod def",
"keyed dictionaries. Example usage: ```python3 for image in images: print(image['url'], image['filename']) ``` If",
"'?grid']) else: return self.url raise ImgurException('Cannot convert single image into album grid.') def",
"if self.is_it_gifv(url): return self.sanitize_gifv(url) return url raise ImgurException('Invalid link.') def sanitize_gifv(self, url): \"\"\"",
"parse_and_prepare_images(self, url): \"\"\" Obtain and parse html, and append image dictionaries to image",
"= '.apng' TIFF = '.tiff' PDF = '.pdf' XCF = '.xcf' #WEBM =",
"in url: return extension return None def get_image_filename(self, url): \"\"\" Get image file",
"filenames: if filename not in clean: clean.append(filename) return clean def contains_extension(self, url): \"\"\"",
"raise ImgurException. This method checks only if the domain is valid. \"\"\" if",
"appear in an album, their filenames have to be numerated. Full examples: ```python3",
"a dictionary with image url and corresponding filename. \"\"\" return {'url' : url,",
"images attribute. \"\"\" return len(self.images) def numerate_images(self): \"\"\" Append ordinal number to image",
"JPEG = '.jpeg' PNG = '.png' GIF = '.gif' APNG = '.apng' TIFF",
"if self.contains_extension(self.url): self.images.append( self.pack_image(self.url, self.get_image_filename(self.url)) ) return else: self.parse_and_prepare_images(self.url) return grid = self.turn_into_grid()",
"return {'url' : url, 'filename' : filename} def number_of_images(self): \"\"\" Get the number",
"formats(cls): \"\"\" Return a set consisting of all class attributes. Class attributes must",
"of all class attributes. Class attributes must not be callable. \"\"\" formats =",
"Imgur(object): \"\"\" Imgur contains all necessary methods to extract image or album images",
"__init__(self, url): \"\"\" Initiate Imgur object. \"\"\" self.url = self.sanitize(url) self.images = deque()",
"for index, image in enumerate(self.images, start=1): image['filename'] = ''.join([ ordinal.format(index), '-', image['filename'] ])",
"the url points to an album. Examples: http(s)://imgur.com/a/[album_hash] http(s)://imgur.com/gallery/[album_hash] \"\"\" return ('/a/' in",
"return self.url raise ImgurException('Cannot convert single image into album grid.') def prepare_images(self): \"\"\"",
"\"\"\" Append ordinal number to image filename. \"\"\" total = self.digits_in_a_number(len(self.images)) ordinal =",
"```python3 for image in images: print(image['url'], image['filename']) ``` If images need to be",
"the url points to image. Examples: http(s)://i.imgur.com/[image_hash].[extension] http(s)://i.imgur.com/[image_hash] http(s)://imgur.com/[image_hash] \"\"\" # https*\\:\\/\\/(i\\.)?imgur\\.com\\/[a-zA-Z0-9]*(\\.[a-zA-Z]{1,4})? return",
"not in clean: clean.append(filename) return clean def contains_extension(self, url): \"\"\" Check if the",
"images = imgur.images ``` imgur.images is a deque of two keyed dictionaries. Example",
"two keyed dictionaries. Example usage: ```python3 for image in images: print(image['url'], image['filename']) ```",
"Return a set consisting of all class attributes. Class attributes must not be",
"self.url.replace('/gallery/', '/a/') def turn_into_grid(self): \"\"\" Append ?grid to url. \"\"\" if self.is_it_album(): if",
"returned. Otherwise, None is returned. \"\"\" for extension in ImgurFileFormats.formats(): if extension in",
"self.sanitize(url) self.images = deque() def sanitize(self, url): \"\"\" Check if the supplied link",
"appear in order when downloaded imgur.numerate_images() images = imgur.images ``` Note: For up",
"'v0.2' __status__ = 'Development' class ImgurException(Exception): \"\"\" This exception is raised if supplied",
"{'url' : url, 'filename' : filename} def number_of_images(self): \"\"\" Get the number of",
"usage: ```python3 imgur = Imgur('http://imgur.com/gallery/vTTHZ') imgur.prepare_images() images = imgur.images ``` imgur.images is a",
"'.jpg'), ('lciC5G8', '.jpg')]. The output looks like: ['https://i.imgur.com/jedEzFL.jpg', 'https://i.imgur.com/lciC5G8.jpg'] \"\"\" urls = []",
"imgur.images is a deque of two keyed dictionaries. Example usage: ```python3 for image",
"or album images from imgur link. \"\"\" def __init__(self, url): \"\"\" Initiate Imgur",
"http(s)://imgur.com/[image_hash] \"\"\" # https*\\:\\/\\/(i\\.)?imgur\\.com\\/[a-zA-Z0-9]*(\\.[a-zA-Z]{1,4})? return not self.is_it_album() def is_it_album(self): \"\"\" Check if the",
"a list of tuples e.g. [('jedEzFL', '.jpg'), ('lciC5G8', '.jpg')]. The output looks like:",
"already ends with an extension. \"\"\" if self.is_it_image(): if self.contains_extension(self.url): self.images.append( self.pack_image(self.url, self.get_image_filename(self.url))",
"to appear in order when downloaded imgur.numerate_images() images = imgur.images ``` Note: For",
"number): \"\"\" Return how many digits are there in a number. \"\"\" return",
"'v' from .gifv \"\"\" pattern = 'https?\\:\\/\\/i\\.imgur\\.com\\/[a-zA-Z0-9]+\\.gif' return re.match(pattern, url).group(0) def is_it_gifv(self, url):",
"this class visit: https://github.com/petarGitNik/imgur-downloader \"\"\" import re from collections import deque from urllib.request",
"if attribute[:2] != '__': value = getattr(ImgurFileFormats, attribute) if not callable(value): formats.add(value) return",
"Otherwise, None is returned. \"\"\" for extension in ImgurFileFormats.formats(): if extension in url:",
"pass class ImgurFileFormats(object): \"\"\" Contains extensions for file formats that are allowed on",
"imgur. Source: https://help.imgur.com/hc/en-us/articles/115000083326-What-files-can-I-upload- Archived: http://archive.is/89Uky https://web.archive.org/web/20170222111303/https://help.imgur.com/hc/en-us/articles/115000083326-What-files-can-I-upload- \"\"\" JPG = '.jpg' JPEG = '.jpeg'",
"= Imgur('http://imgur.com/gallery/vTTHZ') imgur.prepare_images() imgur.images # These are not guaranteed to appear in order",
"''.join([self.change_gallery(), '?grid']) else: return self.url raise ImgurException('Cannot convert single image into album grid.')"
] |
[
".analysis_tools import load_databases # noqa: E501 from .plotting import confidence_ellipse, scatter, plot_roc_curve, plot_lda_analysis",
"import load_databases # noqa: E501 from .plotting import confidence_ellipse, scatter, plot_roc_curve, plot_lda_analysis #",
"from .analysis_tools import load_databases # noqa: E501 from .plotting import confidence_ellipse, scatter, plot_roc_curve,",
"# noqa: E501 from .plotting import confidence_ellipse, scatter, plot_roc_curve, plot_lda_analysis # noqa: E501",
"load_databases # noqa: E501 from .plotting import confidence_ellipse, scatter, plot_roc_curve, plot_lda_analysis # noqa:"
] |
[
"\"FTTR9EA3C16194CE354C244C1B74C46CD92E\", \"<KEY>\", \"GFT4_7DDD3D72EAD03C7518F5D47650C8572\", \"<KEY>\", \"<KEY>\", \"JKK8CA6FE7A1315AF5AFEAC2961460A80569\", \"<KEY>\", \"<KEY>\", \"L11_1415EB8519D13328091CC5C76A624E3D\", \"NBV_8B75BCBFF174C25A0161F30758509A44\", \"NV99_C9C9DBF388A8D81D8CFB4D3FC05F8E4\", \"PL98_BD8B082B7711BC980252F988BB0CA936\", \"POL55_A4F1ECC4D25B33395196B5D51A06790\",",
"\"sectionVA\": [], \"sectionVS\": [], \"sectionSR\": None, \"kernel32\": None, \"msvcrt\": None, \"shell32\": None, \"user32\":",
"str(entry.dll) == \"b'SHELL32.dll'\": for function in entry.imports: x = function.name print('\\t', x.decode('utf-8')) shell32.append(x.decode('utf-8'))",
"[] ntdll = [] # print(execs.index(a) + 1) print(\"a\") print(a) c = execs.index(a)",
"print() print(\"Entradas\") for entry in pe.DIRECTORY_ENTRY_IMPORT: print('Llamadas DLL:') print (entry.dll) l = entry.dll",
"function in entry.imports: x = function.name print('\\t', x.decode('utf-8')) ws232.append(x.decode('utf-8')) prueba[\"ws232\"] = ws232 #",
"== \"b'ntdll.dll'\": for function in entry.imports: x = function.name print('\\t', x.decode('utf-8')) ntdll.append(x.decode('utf-8')) prueba[\"ntdll\"]",
"x = function.name print('\\t', x.decode('utf-8')) PSAPI.append(x.decode('utf-8')) prueba[\"PSAPI\"] = PSAPI elif str(entry.dll) == \"b'WININET.dll'\":",
"\"<KEY>\", \"FGTR43_EF8E0FB20E7228C7492CCDC59D87C690\", \"<KEY>\", \"FTTR9EA3C16194CE354C244C1B74C46CD92E\", \"<KEY>\", \"GFT4_7DDD3D72EAD03C7518F5D47650C8572\", \"<KEY>\", \"<KEY>\", \"JKK8CA6FE7A1315AF5AFEAC2961460A80569\", \"<KEY>\", \"<KEY>\", \"L11_1415EB8519D13328091CC5C76A624E3D\", \"NBV_8B75BCBFF174C25A0161F30758509A44\",",
"[] entrysList = [] for a in execs: sectionNames = [] sectionVA =",
"= [] KERNEL32 = [] NETAPI32 = [] PSAPI = [] WININET =",
"= function.name print('\\t', x.decode('utf-8')) kernel32.append(x.decode('utf-8')) prueba[\"kernel32\"] = kernel32 elif str(entry.dll) == \"b'ADVAPI32.dll'\": for",
"= ADVAPI32 elif str(entry.dll) == \"b'GDI32.dll'\": for function in entry.imports: x = function.name",
"elif str(entry.dll) == \"b'WS2_32.dll'\": for function in entry.imports: x = function.name print('\\t', x.decode('utf-8'))",
"= sectionNames prueba[\"sectionVA\"] = sectionVA prueba[\"sectionVS\"] = sectionVS prueba[\"sectionSR\"] = sectionSR print() print()",
"GDI32.append(x.decode('utf-8')) prueba[\"GDI32\"] = GDI32 elif str(entry.dll) == \"b'KERNEL32.dll'\": for function in entry.imports: x",
"[10, 20, 30, 40, 50] granPrueba = [] entrysList = [] for a",
"None, \"TimeStamp\": None} # print(granPrueba) import pandas as pd df = pd.DataFrame(granPrueba) print(df)",
"= [] ADVAPI32 = [] GDI32 = [] KERNEL32 = [] NETAPI32 =",
"= user32 elif str(entry.dll) == \"b'WS2_32.dll'\": for function in entry.imports: x = function.name",
"\"nameExec\": None, \"sectionName\": [], \"sectionVA\": [], \"sectionVS\": [], \"sectionSR\": None, \"kernel32\": None, \"msvcrt\":",
"for section in pe.sections: print(section.Name, hex(section.VirtualAddress), hex(section.Misc_VirtualSize), section.SizeOfRawData) b = section.Name sectionNames.append(b.decode('utf-8')) sectionVA.append(section.VirtualAddress)",
"algo = [10, 20, 30, 40, 50] granPrueba = [] entrysList = []",
"= WININET elif str(entry.dll) == \"b'ntdll.dll'\": for function in entry.imports: x = function.name",
"prueba[\"sectionVA\"] = sectionVA prueba[\"sectionVS\"] = sectionVS prueba[\"sectionSR\"] = sectionSR print() print() print(\"Entradas\") for",
"x.decode('utf-8')) shell32.append(x.decode('utf-8')) prueba[\"shell32\"] = shell32 elif str(entry.dll) == \"b'USER32.dll'\": for function in entry.imports:",
"= {\"correlativo\": None, \"nameExec\": None, \"sectionName\": [], \"sectionVA\": [], \"sectionVS\": [], \"sectionSR\": None,",
"print(c) # print() # print() # print(pe.FILE_HEADER.NumberOfSections) granPrueba.append(prueba) prueba = {\"correlativo\": None, \"nameExec\":",
"NETAPI32.append(x.decode('utf-8')) prueba[\"NETAPI32\"] = NETAPI32 elif str(entry.dll) == \"b'PSAPI.DLL'\": for function in entry.imports: x",
"a print(c) print(\"Secciones\") for section in pe.sections: print(section.Name, hex(section.VirtualAddress), hex(section.Misc_VirtualSize), section.SizeOfRawData) b =",
"[] sectionSR = [] kernel32 = [] msvcrt = [] shell32 = []",
"print('\\t', x.decode('utf-8')) shell32.append(x.decode('utf-8')) prueba[\"shell32\"] = shell32 elif str(entry.dll) == \"b'USER32.dll'\": for function in",
"\"<KEY>\", \"AL65_DB05DF0498B59B42A8E493CF3C10C578\", \"B07322743778B5868475DBE66EEDAC4F\", \"B98hX8E8622C393D7E832D39E620EAD5D3B49\", \"BVJ2D9FBF759F527AF373E34673DC3ACA462\", \"DS22_A670D13D4D014169C4080328B8FEB86\", \"EEE99EC8AA67B05407C01094184C33D2B5A44\", \"F6655E39465C2FF5B016980D918EA028\", \"F8437E44748D2C3FCF84019766F4E6DC\", \"<KEY>\", \"FGTR43_EF8E0FB20E7228C7492CCDC59D87C690\", \"<KEY>\", \"FTTR9EA3C16194CE354C244C1B74C46CD92E\",",
"entry.imports: x = function.name print('\\t', x.decode('utf-8')) GDI32.append(x.decode('utf-8')) prueba[\"GDI32\"] = GDI32 elif str(entry.dll) ==",
"x.decode('utf-8')) PSAPI.append(x.decode('utf-8')) prueba[\"PSAPI\"] = PSAPI elif str(entry.dll) == \"b'WININET.dll'\": for function in entry.imports:",
"[] kernel32 = [] msvcrt = [] shell32 = [] user32 = []",
"\"1F2EB7B090018D975E6D9B40868C94CA\", \"33DE5067A433A6EC5C328067DC18EC37\", \"65018CD542145A3792BA09985734C12A\", \"<KEY>\", \"<KEY>\", \"<KEY>\", \"<KEY>\", \"<KEY>\", \"A316D5AECA269CA865077E7FFF356E7D\", \"<KEY>\", \"AL65_DB05DF0498B59B42A8E493CF3C10C578\", \"B07322743778B5868475DBE66EEDAC4F\", \"B98hX8E8622C393D7E832D39E620EAD5D3B49\",",
"ws232.append(x.decode('utf-8')) prueba[\"ws232\"] = ws232 # listamalware = os.listdir(path) print() print() print(\"TimeStamp\") print(\"TimeDateStamp :",
"prueba = {\"correlativo\": None, \"nameExec\": None, \"sectionName\": [], \"sectionVA\": [], \"sectionVS\": [], \"sectionSR\":",
"print(\"Secciones\") for section in pe.sections: print(section.Name, hex(section.VirtualAddress), hex(section.Misc_VirtualSize), section.SizeOfRawData) b = section.Name sectionNames.append(b.decode('utf-8'))",
"elif str(entry.dll) == \"b'USER32.dll'\": for function in entry.imports: x = function.name print('\\t', x.decode('utf-8'))",
"= ws232 # listamalware = os.listdir(path) print() print() print(\"TimeStamp\") print(\"TimeDateStamp : \" +",
"shell32.append(x.decode('utf-8')) prueba[\"shell32\"] = shell32 elif str(entry.dll) == \"b'USER32.dll'\": for function in entry.imports: x",
"[] sectionVS = [] sectionSR = [] kernel32 = [] msvcrt = []",
"\"b'KERNEL32.DLL'\": for function in entry.imports: x = function.name print('\\t', x.decode('utf-8')) kernel32.append(x.decode('utf-8')) prueba[\"kernel32\"] =",
"z = pe.FILE_HEADER.dump_dict()['TimeDateStamp']['Value'].split('[')[1][:-1] print(z) prueba[\"TimeStamp\"] = z print(c) # print() # print() #",
"sectionVS.append(section.Misc_VirtualSize) sectionSR.append(section.SizeOfRawData) prueba[\"sectionName\"] = sectionNames prueba[\"sectionVA\"] = sectionVA prueba[\"sectionVS\"] = sectionVS prueba[\"sectionSR\"] =",
"funciones:') entrysList.append(str(l.decode('utf-8'))) if str(entry.dll) == \"b'KERNEL32.DLL'\": for function in entry.imports: x = function.name",
"= pd.DataFrame(granPrueba) print(df) # print(entrysList) def unique(list1): x = np.array(list1) print(np.unique(x)) unique(entrysList) df.to_csv(\"dataset.csv\")",
"str(entry.dll) == \"b'PSAPI.DLL'\": for function in entry.imports: x = function.name print('\\t', x.decode('utf-8')) PSAPI.append(x.decode('utf-8'))",
"# listamalware = os.listdir(path) print() print() print(\"TimeStamp\") print(\"TimeDateStamp : \" + pe.FILE_HEADER.dump_dict()['TimeDateStamp']['Value'].split('[')[1][:-1]) z",
"sectionVS prueba[\"sectionSR\"] = sectionSR print() print() print(\"Entradas\") for entry in pe.DIRECTORY_ENTRY_IMPORT: print('Llamadas DLL:')",
"print(z) prueba[\"TimeStamp\"] = z print(c) # print() # print() # print(pe.FILE_HEADER.NumberOfSections) granPrueba.append(prueba) prueba",
"= z print(c) # print() # print() # print(pe.FILE_HEADER.NumberOfSections) granPrueba.append(prueba) prueba = {\"correlativo\":",
"+ pe.FILE_HEADER.dump_dict()['TimeDateStamp']['Value'].split('[')[1][:-1]) z = pe.FILE_HEADER.dump_dict()['TimeDateStamp']['Value'].split('[')[1][:-1] print(z) prueba[\"TimeStamp\"] = z print(c) # print() #",
"str(entry.dll) == \"b'GDI32.dll'\": for function in entry.imports: x = function.name print('\\t', x.decode('utf-8')) GDI32.append(x.decode('utf-8'))",
"os.listdir(path) print() print() print(\"TimeStamp\") print(\"TimeDateStamp : \" + pe.FILE_HEADER.dump_dict()['TimeDateStamp']['Value'].split('[')[1][:-1]) z = pe.FILE_HEADER.dump_dict()['TimeDateStamp']['Value'].split('[')[1][:-1] print(z)",
"{\"correlativo\": None, \"nameExec\": None, \"sectionName\": [], \"sectionVA\": [], \"sectionVS\": [], \"sectionSR\": [], \"kernel32\":",
"for function in entry.imports: x = function.name print('\\t', x.decode('utf-8')) ntdll.append(x.decode('utf-8')) prueba[\"ntdll\"] = ntdll",
"sectionSR = [] kernel32 = [] msvcrt = [] shell32 = [] user32",
"\"RTC_7F85D7F628CE62D1D8F7B39D8940472\", \"SAM_B659D71AE168E774FAAF38DB30F4A84\", \"TG78Z__727A6800991EEAD454E53E8AF164A99C\", \"VBMM9_149B7BD7218AAB4E257D28469FDDB0D\", \"VC990_468FF2C12CFFC7E5B2FE0EE6BB3B239E\", ] prueba = {\"correlativo\": None, \"nameExec\": None, \"sectionName\":",
"ntdll = [] # print(execs.index(a) + 1) print(\"a\") print(a) c = execs.index(a) +",
"= function.name print('\\t', x.decode('utf-8')) NETAPI32.append(x.decode('utf-8')) prueba[\"NETAPI32\"] = NETAPI32 elif str(entry.dll) == \"b'PSAPI.DLL'\": for",
"section in pe.sections: print(section.Name, hex(section.VirtualAddress), hex(section.Misc_VirtualSize), section.SizeOfRawData) b = section.Name sectionNames.append(b.decode('utf-8')) sectionVA.append(section.VirtualAddress) sectionVS.append(section.Misc_VirtualSize)",
"\"NV99_C9C9DBF388A8D81D8CFB4D3FC05F8E4\", \"PL98_BD8B082B7711BC980252F988BB0CA936\", \"POL55_A4F1ECC4D25B33395196B5D51A06790\", \"QW2_4C6BDDCCA2695D6202DF38708E14FC7E\", \"RTC_7F85D7F628CE62D1D8F7B39D8940472\", \"SAM_B659D71AE168E774FAAF38DB30F4A84\", \"TG78Z__727A6800991EEAD454E53E8AF164A99C\", \"VBMM9_149B7BD7218AAB4E257D28469FDDB0D\", \"VC990_468FF2C12CFFC7E5B2FE0EE6BB3B239E\", ] prueba = {\"correlativo\":",
"[], \"ADVAPI32\": [], \"GDI32\": [], \"KERNEL32\": [], \"NETAPI32\": [], \"PSAPI\": [], \"WININET\": [],",
"= [] shell32 = [] user32 = [] ws232 = [] ADVAPI32 =",
"None} # pe = pefile.PE(\"65018CD542145A3792BA09985734C12A\") # algo = [10, 20, 30, 40, 50]",
"x = function.name print('\\t', x.decode('utf-8')) user32.append(x.decode('utf-8')) prueba[\"user32\"] = user32 elif str(entry.dll) == \"b'WS2_32.dll'\":",
"\"DS22_A670D13D4D014169C4080328B8FEB86\", \"EEE99EC8AA67B05407C01094184C33D2B5A44\", \"F6655E39465C2FF5B016980D918EA028\", \"F8437E44748D2C3FCF84019766F4E6DC\", \"<KEY>\", \"FGTR43_EF8E0FB20E7228C7492CCDC59D87C690\", \"<KEY>\", \"FTTR9EA3C16194CE354C244C1B74C46CD92E\", \"<KEY>\", \"GFT4_7DDD3D72EAD03C7518F5D47650C8572\", \"<KEY>\", \"<KEY>\", \"JKK8CA6FE7A1315AF5AFEAC2961460A80569\",",
"function.name print('\\t', x.decode('utf-8')) msvcrt.append(x.decode('utf-8')) prueba[\"msvcrt\"] = msvcrt elif str(entry.dll) == \"b'SHELL32.dll'\": for function",
"None, \"msvcrt\": None, \"shell32\": None, \"user32\": None, \"ws232\": None, \"TimeStamp\": None} # print(granPrueba)",
"= function.name print('\\t', x.decode('utf-8')) msvcrt.append(x.decode('utf-8')) prueba[\"msvcrt\"] = msvcrt elif str(entry.dll) == \"b'SHELL32.dll'\": for",
"import os execs = [ \"1F2EB7B090018D975E6D9B40868C94CA\", \"33DE5067A433A6EC5C328067DC18EC37\", \"65018CD542145A3792BA09985734C12A\", \"<KEY>\", \"<KEY>\", \"<KEY>\", \"<KEY>\", \"<KEY>\",",
"= pefile.PE(\"65018CD542145A3792BA09985734C12A\") # algo = [10, 20, 30, 40, 50] granPrueba = []",
"user32 = [] ws232 = [] ADVAPI32 = [] GDI32 = [] KERNEL32",
"DLL:') print (entry.dll) l = entry.dll print('Llamadas a funciones:') entrysList.append(str(l.decode('utf-8'))) if str(entry.dll) ==",
"== \"b'USER32.dll'\": for function in entry.imports: x = function.name print('\\t', x.decode('utf-8')) user32.append(x.decode('utf-8')) prueba[\"user32\"]",
"None, \"shell32\": None, \"user32\": None, \"ws232\": None, \"TimeStamp\": None} # print(granPrueba) import pandas",
"1) print(\"a\") print(a) c = execs.index(a) + 1 pe = pefile.PE(a) prueba[\"correlativo\"] =",
"\"NBV_8B75BCBFF174C25A0161F30758509A44\", \"NV99_C9C9DBF388A8D81D8CFB4D3FC05F8E4\", \"PL98_BD8B082B7711BC980252F988BB0CA936\", \"POL55_A4F1ECC4D25B33395196B5D51A06790\", \"QW2_4C6BDDCCA2695D6202DF38708E14FC7E\", \"RTC_7F85D7F628CE62D1D8F7B39D8940472\", \"SAM_B659D71AE168E774FAAF38DB30F4A84\", \"TG78Z__727A6800991EEAD454E53E8AF164A99C\", \"VBMM9_149B7BD7218AAB4E257D28469FDDB0D\", \"VC990_468FF2C12CFFC7E5B2FE0EE6BB3B239E\", ] prueba =",
"[] user32 = [] ws232 = [] ADVAPI32 = [] GDI32 = []",
"= [] sectionVS = [] sectionSR = [] kernel32 = [] msvcrt =",
"print('\\t', x.decode('utf-8')) PSAPI.append(x.decode('utf-8')) prueba[\"PSAPI\"] = PSAPI elif str(entry.dll) == \"b'WININET.dll'\": for function in",
"ntdll elif str(entry.dll) == \"b'MSVCRT.dll'\": for function in entry.imports: x = function.name print('\\t',",
"c prueba[\"nameExec\"] = a print(c) print(\"Secciones\") for section in pe.sections: print(section.Name, hex(section.VirtualAddress), hex(section.Misc_VirtualSize),",
"x.decode('utf-8')) KERNEL32.append(x.decode('utf-8')) prueba[\"KERNEL32\"] = KERNEL32 elif str(entry.dll) == \"b'NETAPI32.dll'\": for function in entry.imports:",
"[], \"ws232\": [], \"ADVAPI32\": [], \"GDI32\": [], \"KERNEL32\": [], \"NETAPI32\": [], \"PSAPI\": [],",
"[], \"sectionVA\": [], \"sectionVS\": [], \"sectionSR\": [], \"kernel32\": [], \"msvcrt\": [], \"shell32\": [],",
"\"b'WS2_32.dll'\": for function in entry.imports: x = function.name print('\\t', x.decode('utf-8')) ws232.append(x.decode('utf-8')) prueba[\"ws232\"] =",
"\"shell32\": None, \"user32\": None, \"ws232\": None, \"TimeStamp\": None} # print(granPrueba) import pandas as",
"function.name print('\\t', x.decode('utf-8')) ADVAPI32.append(x.decode('utf-8')) prueba[\"ADVAPI32\"] = ADVAPI32 elif str(entry.dll) == \"b'GDI32.dll'\": for function",
"user32 elif str(entry.dll) == \"b'WS2_32.dll'\": for function in entry.imports: x = function.name print('\\t',",
"function in entry.imports: x = function.name print('\\t', x.decode('utf-8')) GDI32.append(x.decode('utf-8')) prueba[\"GDI32\"] = GDI32 elif",
"[], \"ntdll\": [], \"TimeStamp\": None} # pe = pefile.PE(\"65018CD542145A3792BA09985734C12A\") # algo = [10,",
"entry.imports: x = function.name print('\\t', x.decode('utf-8')) KERNEL32.append(x.decode('utf-8')) prueba[\"KERNEL32\"] = KERNEL32 elif str(entry.dll) ==",
"prueba[\"nameExec\"] = a print(c) print(\"Secciones\") for section in pe.sections: print(section.Name, hex(section.VirtualAddress), hex(section.Misc_VirtualSize), section.SizeOfRawData)",
"\"<KEY>\", \"<KEY>\", \"<KEY>\", \"<KEY>\", \"<KEY>\", \"A316D5AECA269CA865077E7FFF356E7D\", \"<KEY>\", \"AL65_DB05DF0498B59B42A8E493CF3C10C578\", \"B07322743778B5868475DBE66EEDAC4F\", \"B98hX8E8622C393D7E832D39E620EAD5D3B49\", \"BVJ2D9FBF759F527AF373E34673DC3ACA462\", \"DS22_A670D13D4D014169C4080328B8FEB86\", \"EEE99EC8AA67B05407C01094184C33D2B5A44\",",
"print(\"TimeStamp\") print(\"TimeDateStamp : \" + pe.FILE_HEADER.dump_dict()['TimeDateStamp']['Value'].split('[')[1][:-1]) z = pe.FILE_HEADER.dump_dict()['TimeDateStamp']['Value'].split('[')[1][:-1] print(z) prueba[\"TimeStamp\"] = z",
"prueba[\"NETAPI32\"] = NETAPI32 elif str(entry.dll) == \"b'PSAPI.DLL'\": for function in entry.imports: x =",
"[], \"sectionVS\": [], \"sectionSR\": [], \"kernel32\": [], \"msvcrt\": [], \"shell32\": [], \"user32\": [],",
"[] KERNEL32 = [] NETAPI32 = [] PSAPI = [] WININET = []",
"entry.imports: x = function.name print('\\t', x.decode('utf-8')) NETAPI32.append(x.decode('utf-8')) prueba[\"NETAPI32\"] = NETAPI32 elif str(entry.dll) ==",
"ADVAPI32 elif str(entry.dll) == \"b'GDI32.dll'\": for function in entry.imports: x = function.name print('\\t',",
"if str(entry.dll) == \"b'KERNEL32.DLL'\": for function in entry.imports: x = function.name print('\\t', x.decode('utf-8'))",
"str(entry.dll) == \"b'NETAPI32.dll'\": for function in entry.imports: x = function.name print('\\t', x.decode('utf-8')) NETAPI32.append(x.decode('utf-8'))",
"for function in entry.imports: x = function.name print('\\t', x.decode('utf-8')) msvcrt.append(x.decode('utf-8')) prueba[\"msvcrt\"] = msvcrt",
"entry.imports: x = function.name print('\\t', x.decode('utf-8')) kernel32.append(x.decode('utf-8')) prueba[\"kernel32\"] = kernel32 elif str(entry.dll) ==",
"\"sectionVA\": [], \"sectionVS\": [], \"sectionSR\": [], \"kernel32\": [], \"msvcrt\": [], \"shell32\": [], \"user32\":",
"prueba[\"TimeStamp\"] = z print(c) # print() # print() # print(pe.FILE_HEADER.NumberOfSections) granPrueba.append(prueba) prueba =",
"\"b'ntdll.dll'\": for function in entry.imports: x = function.name print('\\t', x.decode('utf-8')) ntdll.append(x.decode('utf-8')) prueba[\"ntdll\"] =",
"x = function.name print('\\t', x.decode('utf-8')) ntdll.append(x.decode('utf-8')) prueba[\"ntdll\"] = ntdll elif str(entry.dll) == \"b'MSVCRT.dll'\":",
"\"<KEY>\", \"<KEY>\", \"L11_1415EB8519D13328091CC5C76A624E3D\", \"NBV_8B75BCBFF174C25A0161F30758509A44\", \"NV99_C9C9DBF388A8D81D8CFB4D3FC05F8E4\", \"PL98_BD8B082B7711BC980252F988BB0CA936\", \"POL55_A4F1ECC4D25B33395196B5D51A06790\", \"QW2_4C6BDDCCA2695D6202DF38708E14FC7E\", \"RTC_7F85D7F628CE62D1D8F7B39D8940472\", \"SAM_B659D71AE168E774FAAF38DB30F4A84\", \"TG78Z__727A6800991EEAD454E53E8AF164A99C\", \"VBMM9_149B7BD7218AAB4E257D28469FDDB0D\", \"VC990_468FF2C12CFFC7E5B2FE0EE6BB3B239E\",",
"user32.append(x.decode('utf-8')) prueba[\"user32\"] = user32 elif str(entry.dll) == \"b'WS2_32.dll'\": for function in entry.imports: x",
"elif str(entry.dll) == \"b'ADVAPI32.dll'\": for function in entry.imports: x = function.name print('\\t', x.decode('utf-8'))",
"\"TimeStamp\": None} # print(granPrueba) import pandas as pd df = pd.DataFrame(granPrueba) print(df) #",
"\"PL98_BD8B082B7711BC980252F988BB0CA936\", \"POL55_A4F1ECC4D25B33395196B5D51A06790\", \"QW2_4C6BDDCCA2695D6202DF38708E14FC7E\", \"RTC_7F85D7F628CE62D1D8F7B39D8940472\", \"SAM_B659D71AE168E774FAAF38DB30F4A84\", \"TG78Z__727A6800991EEAD454E53E8AF164A99C\", \"VBMM9_149B7BD7218AAB4E257D28469FDDB0D\", \"VC990_468FF2C12CFFC7E5B2FE0EE6BB3B239E\", ] prueba = {\"correlativo\": None,",
"= [] sectionSR = [] kernel32 = [] msvcrt = [] shell32 =",
"str(entry.dll) == \"b'WS2_32.dll'\": for function in entry.imports: x = function.name print('\\t', x.decode('utf-8')) ws232.append(x.decode('utf-8'))",
"sectionNames = [] sectionVA = [] sectionVS = [] sectionSR = [] kernel32",
"\"b'NETAPI32.dll'\": for function in entry.imports: x = function.name print('\\t', x.decode('utf-8')) NETAPI32.append(x.decode('utf-8')) prueba[\"NETAPI32\"] =",
"= [10, 20, 30, 40, 50] granPrueba = [] entrysList = [] for",
"ntdll.append(x.decode('utf-8')) prueba[\"ntdll\"] = ntdll elif str(entry.dll) == \"b'MSVCRT.dll'\": for function in entry.imports: x",
"function in entry.imports: x = function.name print('\\t', x.decode('utf-8')) msvcrt.append(x.decode('utf-8')) prueba[\"msvcrt\"] = msvcrt elif",
"in entry.imports: x = function.name print('\\t', x.decode('utf-8')) user32.append(x.decode('utf-8')) prueba[\"user32\"] = user32 elif str(entry.dll)",
"\"B07322743778B5868475DBE66EEDAC4F\", \"B98hX8E8622C393D7E832D39E620EAD5D3B49\", \"BVJ2D9FBF759F527AF373E34673DC3ACA462\", \"DS22_A670D13D4D014169C4080328B8FEB86\", \"EEE99EC8AA67B05407C01094184C33D2B5A44\", \"F6655E39465C2FF5B016980D918EA028\", \"F8437E44748D2C3FCF84019766F4E6DC\", \"<KEY>\", \"FGTR43_EF8E0FB20E7228C7492CCDC59D87C690\", \"<KEY>\", \"FTTR9EA3C16194CE354C244C1B74C46CD92E\", \"<KEY>\", \"GFT4_7DDD3D72EAD03C7518F5D47650C8572\",",
"== \"b'WS2_32.dll'\": for function in entry.imports: x = function.name print('\\t', x.decode('utf-8')) ws232.append(x.decode('utf-8')) prueba[\"ws232\"]",
"print(execs.index(a) + 1) print(\"a\") print(a) c = execs.index(a) + 1 pe = pefile.PE(a)",
"for function in entry.imports: x = function.name print('\\t', x.decode('utf-8')) GDI32.append(x.decode('utf-8')) prueba[\"GDI32\"] = GDI32",
"in entry.imports: x = function.name print('\\t', x.decode('utf-8')) msvcrt.append(x.decode('utf-8')) prueba[\"msvcrt\"] = msvcrt elif str(entry.dll)",
"[] GDI32 = [] KERNEL32 = [] NETAPI32 = [] PSAPI = []",
"in pe.sections: print(section.Name, hex(section.VirtualAddress), hex(section.Misc_VirtualSize), section.SizeOfRawData) b = section.Name sectionNames.append(b.decode('utf-8')) sectionVA.append(section.VirtualAddress) sectionVS.append(section.Misc_VirtualSize) sectionSR.append(section.SizeOfRawData)",
"= function.name print('\\t', x.decode('utf-8')) KERNEL32.append(x.decode('utf-8')) prueba[\"KERNEL32\"] = KERNEL32 elif str(entry.dll) == \"b'NETAPI32.dll'\": for",
"== \"b'NETAPI32.dll'\": for function in entry.imports: x = function.name print('\\t', x.decode('utf-8')) NETAPI32.append(x.decode('utf-8')) prueba[\"NETAPI32\"]",
"elif str(entry.dll) == \"b'PSAPI.DLL'\": for function in entry.imports: x = function.name print('\\t', x.decode('utf-8'))",
"\"EEE99EC8AA67B05407C01094184C33D2B5A44\", \"F6655E39465C2FF5B016980D918EA028\", \"F8437E44748D2C3FCF84019766F4E6DC\", \"<KEY>\", \"FGTR43_EF8E0FB20E7228C7492CCDC59D87C690\", \"<KEY>\", \"FTTR9EA3C16194CE354C244C1B74C46CD92E\", \"<KEY>\", \"GFT4_7DDD3D72EAD03C7518F5D47650C8572\", \"<KEY>\", \"<KEY>\", \"JKK8CA6FE7A1315AF5AFEAC2961460A80569\", \"<KEY>\",",
"= function.name print('\\t', x.decode('utf-8')) ADVAPI32.append(x.decode('utf-8')) prueba[\"ADVAPI32\"] = ADVAPI32 elif str(entry.dll) == \"b'GDI32.dll'\": for",
"entry.imports: x = function.name print('\\t', x.decode('utf-8')) shell32.append(x.decode('utf-8')) prueba[\"shell32\"] = shell32 elif str(entry.dll) ==",
"\"WININET\": [], \"ntdll\": [], \"TimeStamp\": None} # pe = pefile.PE(\"65018CD542145A3792BA09985734C12A\") # algo =",
"print(pe.FILE_HEADER.NumberOfSections) granPrueba.append(prueba) prueba = {\"correlativo\": None, \"nameExec\": None, \"sectionName\": [], \"sectionVA\": [], \"sectionVS\":",
"msvcrt = [] shell32 = [] user32 = [] ws232 = [] ADVAPI32",
"prueba[\"PSAPI\"] = PSAPI elif str(entry.dll) == \"b'WININET.dll'\": for function in entry.imports: x =",
"in entry.imports: x = function.name print('\\t', x.decode('utf-8')) ADVAPI32.append(x.decode('utf-8')) prueba[\"ADVAPI32\"] = ADVAPI32 elif str(entry.dll)",
"hex(section.Misc_VirtualSize), section.SizeOfRawData) b = section.Name sectionNames.append(b.decode('utf-8')) sectionVA.append(section.VirtualAddress) sectionVS.append(section.Misc_VirtualSize) sectionSR.append(section.SizeOfRawData) prueba[\"sectionName\"] = sectionNames prueba[\"sectionVA\"]",
"= function.name print('\\t', x.decode('utf-8')) ntdll.append(x.decode('utf-8')) prueba[\"ntdll\"] = ntdll elif str(entry.dll) == \"b'MSVCRT.dll'\": for",
"\"F8437E44748D2C3FCF84019766F4E6DC\", \"<KEY>\", \"FGTR43_EF8E0FB20E7228C7492CCDC59D87C690\", \"<KEY>\", \"FTTR9EA3C16194CE354C244C1B74C46CD92E\", \"<KEY>\", \"GFT4_7DDD3D72EAD03C7518F5D47650C8572\", \"<KEY>\", \"<KEY>\", \"JKK8CA6FE7A1315AF5AFEAC2961460A80569\", \"<KEY>\", \"<KEY>\", \"L11_1415EB8519D13328091CC5C76A624E3D\",",
"function in entry.imports: x = function.name print('\\t', x.decode('utf-8')) NETAPI32.append(x.decode('utf-8')) prueba[\"NETAPI32\"] = NETAPI32 elif",
"function in entry.imports: x = function.name print('\\t', x.decode('utf-8')) ADVAPI32.append(x.decode('utf-8')) prueba[\"ADVAPI32\"] = ADVAPI32 elif",
"\"nameExec\": None, \"sectionName\": [], \"sectionVA\": [], \"sectionVS\": [], \"sectionSR\": [], \"kernel32\": [], \"msvcrt\":",
"\"msvcrt\": [], \"shell32\": [], \"user32\": [], \"ws232\": [], \"ADVAPI32\": [], \"GDI32\": [], \"KERNEL32\":",
"[] ADVAPI32 = [] GDI32 = [] KERNEL32 = [] NETAPI32 = []",
"= PSAPI elif str(entry.dll) == \"b'WININET.dll'\": for function in entry.imports: x = function.name",
"[ \"1F2EB7B090018D975E6D9B40868C94CA\", \"33DE5067A433A6EC5C328067DC18EC37\", \"65018CD542145A3792BA09985734C12A\", \"<KEY>\", \"<KEY>\", \"<KEY>\", \"<KEY>\", \"<KEY>\", \"A316D5AECA269CA865077E7FFF356E7D\", \"<KEY>\", \"AL65_DB05DF0498B59B42A8E493CF3C10C578\", \"B07322743778B5868475DBE66EEDAC4F\",",
"entry.dll print('Llamadas a funciones:') entrysList.append(str(l.decode('utf-8'))) if str(entry.dll) == \"b'KERNEL32.DLL'\": for function in entry.imports:",
"pefile import numpy as np # import os execs = [ \"1F2EB7B090018D975E6D9B40868C94CA\", \"33DE5067A433A6EC5C328067DC18EC37\",",
"GDI32 elif str(entry.dll) == \"b'KERNEL32.dll'\": for function in entry.imports: x = function.name print('\\t',",
"[] NETAPI32 = [] PSAPI = [] WININET = [] ntdll = []",
"== \"b'MSVCRT.dll'\": for function in entry.imports: x = function.name print('\\t', x.decode('utf-8')) msvcrt.append(x.decode('utf-8')) prueba[\"msvcrt\"]",
"# print(execs.index(a) + 1) print(\"a\") print(a) c = execs.index(a) + 1 pe =",
"b = section.Name sectionNames.append(b.decode('utf-8')) sectionVA.append(section.VirtualAddress) sectionVS.append(section.Misc_VirtualSize) sectionSR.append(section.SizeOfRawData) prueba[\"sectionName\"] = sectionNames prueba[\"sectionVA\"] = sectionVA",
"function.name print('\\t', x.decode('utf-8')) NETAPI32.append(x.decode('utf-8')) prueba[\"NETAPI32\"] = NETAPI32 elif str(entry.dll) == \"b'PSAPI.DLL'\": for function",
"entrysList.append(str(l.decode('utf-8'))) if str(entry.dll) == \"b'KERNEL32.DLL'\": for function in entry.imports: x = function.name print('\\t',",
"= ntdll elif str(entry.dll) == \"b'MSVCRT.dll'\": for function in entry.imports: x = function.name",
"= section.Name sectionNames.append(b.decode('utf-8')) sectionVA.append(section.VirtualAddress) sectionVS.append(section.Misc_VirtualSize) sectionSR.append(section.SizeOfRawData) prueba[\"sectionName\"] = sectionNames prueba[\"sectionVA\"] = sectionVA prueba[\"sectionVS\"]",
"None, \"kernel32\": None, \"msvcrt\": None, \"shell32\": None, \"user32\": None, \"ws232\": None, \"TimeStamp\": None}",
"msvcrt elif str(entry.dll) == \"b'SHELL32.dll'\": for function in entry.imports: x = function.name print('\\t',",
"None} # print(granPrueba) import pandas as pd df = pd.DataFrame(granPrueba) print(df) # print(entrysList)",
"\"b'ADVAPI32.dll'\": for function in entry.imports: x = function.name print('\\t', x.decode('utf-8')) ADVAPI32.append(x.decode('utf-8')) prueba[\"ADVAPI32\"] =",
"WININET elif str(entry.dll) == \"b'ntdll.dll'\": for function in entry.imports: x = function.name print('\\t',",
"= kernel32 elif str(entry.dll) == \"b'ADVAPI32.dll'\": for function in entry.imports: x = function.name",
"\"KERNEL32\": [], \"NETAPI32\": [], \"PSAPI\": [], \"WININET\": [], \"ntdll\": [], \"TimeStamp\": None} #",
"sectionVA.append(section.VirtualAddress) sectionVS.append(section.Misc_VirtualSize) sectionSR.append(section.SizeOfRawData) prueba[\"sectionName\"] = sectionNames prueba[\"sectionVA\"] = sectionVA prueba[\"sectionVS\"] = sectionVS prueba[\"sectionSR\"]",
"None, \"sectionName\": [], \"sectionVA\": [], \"sectionVS\": [], \"sectionSR\": None, \"kernel32\": None, \"msvcrt\": None,",
"[], \"PSAPI\": [], \"WININET\": [], \"ntdll\": [], \"TimeStamp\": None} # pe = pefile.PE(\"65018CD542145A3792BA09985734C12A\")",
"x.decode('utf-8')) GDI32.append(x.decode('utf-8')) prueba[\"GDI32\"] = GDI32 elif str(entry.dll) == \"b'KERNEL32.dll'\": for function in entry.imports:",
"\"TG78Z__727A6800991EEAD454E53E8AF164A99C\", \"VBMM9_149B7BD7218AAB4E257D28469FDDB0D\", \"VC990_468FF2C12CFFC7E5B2FE0EE6BB3B239E\", ] prueba = {\"correlativo\": None, \"nameExec\": None, \"sectionName\": [], \"sectionVA\":",
"GDI32 = [] KERNEL32 = [] NETAPI32 = [] PSAPI = [] WININET",
"\"user32\": None, \"ws232\": None, \"TimeStamp\": None} # print(granPrueba) import pandas as pd df",
"\"b'MSVCRT.dll'\": for function in entry.imports: x = function.name print('\\t', x.decode('utf-8')) msvcrt.append(x.decode('utf-8')) prueba[\"msvcrt\"] =",
"= [ \"1F2EB7B090018D975E6D9B40868C94CA\", \"33DE5067A433A6EC5C328067DC18EC37\", \"65018CD542145A3792BA09985734C12A\", \"<KEY>\", \"<KEY>\", \"<KEY>\", \"<KEY>\", \"<KEY>\", \"A316D5AECA269CA865077E7FFF356E7D\", \"<KEY>\", \"AL65_DB05DF0498B59B42A8E493CF3C10C578\",",
"execs: sectionNames = [] sectionVA = [] sectionVS = [] sectionSR = []",
"print(section.Name, hex(section.VirtualAddress), hex(section.Misc_VirtualSize), section.SizeOfRawData) b = section.Name sectionNames.append(b.decode('utf-8')) sectionVA.append(section.VirtualAddress) sectionVS.append(section.Misc_VirtualSize) sectionSR.append(section.SizeOfRawData) prueba[\"sectionName\"] =",
"hex(section.VirtualAddress), hex(section.Misc_VirtualSize), section.SizeOfRawData) b = section.Name sectionNames.append(b.decode('utf-8')) sectionVA.append(section.VirtualAddress) sectionVS.append(section.Misc_VirtualSize) sectionSR.append(section.SizeOfRawData) prueba[\"sectionName\"] = sectionNames",
"= pe.FILE_HEADER.dump_dict()['TimeDateStamp']['Value'].split('[')[1][:-1] print(z) prueba[\"TimeStamp\"] = z print(c) # print() # print() # print(pe.FILE_HEADER.NumberOfSections)",
"print('\\t', x.decode('utf-8')) kernel32.append(x.decode('utf-8')) prueba[\"kernel32\"] = kernel32 elif str(entry.dll) == \"b'ADVAPI32.dll'\": for function in",
"= function.name print('\\t', x.decode('utf-8')) PSAPI.append(x.decode('utf-8')) prueba[\"PSAPI\"] = PSAPI elif str(entry.dll) == \"b'WININET.dll'\": for",
"= GDI32 elif str(entry.dll) == \"b'KERNEL32.dll'\": for function in entry.imports: x = function.name",
"\"65018CD542145A3792BA09985734C12A\", \"<KEY>\", \"<KEY>\", \"<KEY>\", \"<KEY>\", \"<KEY>\", \"A316D5AECA269CA865077E7FFF356E7D\", \"<KEY>\", \"AL65_DB05DF0498B59B42A8E493CF3C10C578\", \"B07322743778B5868475DBE66EEDAC4F\", \"B98hX8E8622C393D7E832D39E620EAD5D3B49\", \"BVJ2D9FBF759F527AF373E34673DC3ACA462\", \"DS22_A670D13D4D014169C4080328B8FEB86\",",
"z print(c) # print() # print() # print(pe.FILE_HEADER.NumberOfSections) granPrueba.append(prueba) prueba = {\"correlativo\": None,",
"elif str(entry.dll) == \"b'WININET.dll'\": for function in entry.imports: x = function.name print('\\t', x.decode('utf-8'))",
"= {\"correlativo\": None, \"nameExec\": None, \"sectionName\": [], \"sectionVA\": [], \"sectionVS\": [], \"sectionSR\": [],",
"in entry.imports: x = function.name print('\\t', x.decode('utf-8')) ws232.append(x.decode('utf-8')) prueba[\"ws232\"] = ws232 # listamalware",
"sectionVA prueba[\"sectionVS\"] = sectionVS prueba[\"sectionSR\"] = sectionSR print() print() print(\"Entradas\") for entry in",
"elif str(entry.dll) == \"b'GDI32.dll'\": for function in entry.imports: x = function.name print('\\t', x.decode('utf-8'))",
"prueba[\"sectionVS\"] = sectionVS prueba[\"sectionSR\"] = sectionSR print() print() print(\"Entradas\") for entry in pe.DIRECTORY_ENTRY_IMPORT:",
"[] ws232 = [] ADVAPI32 = [] GDI32 = [] KERNEL32 = []",
"= [] GDI32 = [] KERNEL32 = [] NETAPI32 = [] PSAPI =",
"\"<KEY>\", \"<KEY>\", \"<KEY>\", \"<KEY>\", \"A316D5AECA269CA865077E7FFF356E7D\", \"<KEY>\", \"AL65_DB05DF0498B59B42A8E493CF3C10C578\", \"B07322743778B5868475DBE66EEDAC4F\", \"B98hX8E8622C393D7E832D39E620EAD5D3B49\", \"BVJ2D9FBF759F527AF373E34673DC3ACA462\", \"DS22_A670D13D4D014169C4080328B8FEB86\", \"EEE99EC8AA67B05407C01094184C33D2B5A44\", \"F6655E39465C2FF5B016980D918EA028\",",
"import numpy as np # import os execs = [ \"1F2EB7B090018D975E6D9B40868C94CA\", \"33DE5067A433A6EC5C328067DC18EC37\", \"65018CD542145A3792BA09985734C12A\",",
"= function.name print('\\t', x.decode('utf-8')) shell32.append(x.decode('utf-8')) prueba[\"shell32\"] = shell32 elif str(entry.dll) == \"b'USER32.dll'\": for",
"\"<KEY>\", \"JKK8CA6FE7A1315AF5AFEAC2961460A80569\", \"<KEY>\", \"<KEY>\", \"L11_1415EB8519D13328091CC5C76A624E3D\", \"NBV_8B75BCBFF174C25A0161F30758509A44\", \"NV99_C9C9DBF388A8D81D8CFB4D3FC05F8E4\", \"PL98_BD8B082B7711BC980252F988BB0CA936\", \"POL55_A4F1ECC4D25B33395196B5D51A06790\", \"QW2_4C6BDDCCA2695D6202DF38708E14FC7E\", \"RTC_7F85D7F628CE62D1D8F7B39D8940472\", \"SAM_B659D71AE168E774FAAF38DB30F4A84\", \"TG78Z__727A6800991EEAD454E53E8AF164A99C\",",
"x.decode('utf-8')) msvcrt.append(x.decode('utf-8')) prueba[\"msvcrt\"] = msvcrt elif str(entry.dll) == \"b'SHELL32.dll'\": for function in entry.imports:",
"for function in entry.imports: x = function.name print('\\t', x.decode('utf-8')) shell32.append(x.decode('utf-8')) prueba[\"shell32\"] = shell32",
"[], \"TimeStamp\": None} # pe = pefile.PE(\"65018CD542145A3792BA09985734C12A\") # algo = [10, 20, 30,",
"= msvcrt elif str(entry.dll) == \"b'SHELL32.dll'\": for function in entry.imports: x = function.name",
"# print() # print() # print(pe.FILE_HEADER.NumberOfSections) granPrueba.append(prueba) prueba = {\"correlativo\": None, \"nameExec\": None,",
"print (entry.dll) l = entry.dll print('Llamadas a funciones:') entrysList.append(str(l.decode('utf-8'))) if str(entry.dll) == \"b'KERNEL32.DLL'\":",
"for function in entry.imports: x = function.name print('\\t', x.decode('utf-8')) ADVAPI32.append(x.decode('utf-8')) prueba[\"ADVAPI32\"] = ADVAPI32",
"\" + pe.FILE_HEADER.dump_dict()['TimeDateStamp']['Value'].split('[')[1][:-1]) z = pe.FILE_HEADER.dump_dict()['TimeDateStamp']['Value'].split('[')[1][:-1] print(z) prueba[\"TimeStamp\"] = z print(c) # print()",
"] prueba = {\"correlativo\": None, \"nameExec\": None, \"sectionName\": [], \"sectionVA\": [], \"sectionVS\": [],",
"# print(pe.FILE_HEADER.NumberOfSections) granPrueba.append(prueba) prueba = {\"correlativo\": None, \"nameExec\": None, \"sectionName\": [], \"sectionVA\": [],",
"\"b'USER32.dll'\": for function in entry.imports: x = function.name print('\\t', x.decode('utf-8')) user32.append(x.decode('utf-8')) prueba[\"user32\"] =",
"kernel32.append(x.decode('utf-8')) prueba[\"kernel32\"] = kernel32 elif str(entry.dll) == \"b'ADVAPI32.dll'\": for function in entry.imports: x",
"elif str(entry.dll) == \"b'NETAPI32.dll'\": for function in entry.imports: x = function.name print('\\t', x.decode('utf-8'))",
"str(entry.dll) == \"b'KERNEL32.dll'\": for function in entry.imports: x = function.name print('\\t', x.decode('utf-8')) KERNEL32.append(x.decode('utf-8'))",
"= c prueba[\"nameExec\"] = a print(c) print(\"Secciones\") for section in pe.sections: print(section.Name, hex(section.VirtualAddress),",
"== \"b'GDI32.dll'\": for function in entry.imports: x = function.name print('\\t', x.decode('utf-8')) GDI32.append(x.decode('utf-8')) prueba[\"GDI32\"]",
"function.name print('\\t', x.decode('utf-8')) ntdll.append(x.decode('utf-8')) prueba[\"ntdll\"] = ntdll elif str(entry.dll) == \"b'MSVCRT.dll'\": for function",
"ADVAPI32 = [] GDI32 = [] KERNEL32 = [] NETAPI32 = [] PSAPI",
"# pe = pefile.PE(\"65018CD542145A3792BA09985734C12A\") # algo = [10, 20, 30, 40, 50] granPrueba",
"section.Name sectionNames.append(b.decode('utf-8')) sectionVA.append(section.VirtualAddress) sectionVS.append(section.Misc_VirtualSize) sectionSR.append(section.SizeOfRawData) prueba[\"sectionName\"] = sectionNames prueba[\"sectionVA\"] = sectionVA prueba[\"sectionVS\"] =",
"elif str(entry.dll) == \"b'KERNEL32.dll'\": for function in entry.imports: x = function.name print('\\t', x.decode('utf-8'))",
"function.name print('\\t', x.decode('utf-8')) shell32.append(x.decode('utf-8')) prueba[\"shell32\"] = shell32 elif str(entry.dll) == \"b'USER32.dll'\": for function",
"= [] user32 = [] ws232 = [] ADVAPI32 = [] GDI32 =",
"# algo = [10, 20, 30, 40, 50] granPrueba = [] entrysList =",
"= execs.index(a) + 1 pe = pefile.PE(a) prueba[\"correlativo\"] = c prueba[\"nameExec\"] = a",
"1 pe = pefile.PE(a) prueba[\"correlativo\"] = c prueba[\"nameExec\"] = a print(c) print(\"Secciones\") for",
"WININET.append(x.decode('utf-8')) prueba[\"WININET\"] = WININET elif str(entry.dll) == \"b'ntdll.dll'\": for function in entry.imports: x",
"pd df = pd.DataFrame(granPrueba) print(df) # print(entrysList) def unique(list1): x = np.array(list1) print(np.unique(x))",
"x = function.name print('\\t', x.decode('utf-8')) KERNEL32.append(x.decode('utf-8')) prueba[\"KERNEL32\"] = KERNEL32 elif str(entry.dll) == \"b'NETAPI32.dll'\":",
"== \"b'SHELL32.dll'\": for function in entry.imports: x = function.name print('\\t', x.decode('utf-8')) shell32.append(x.decode('utf-8')) prueba[\"shell32\"]",
"numpy as np # import os execs = [ \"1F2EB7B090018D975E6D9B40868C94CA\", \"33DE5067A433A6EC5C328067DC18EC37\", \"65018CD542145A3792BA09985734C12A\", \"<KEY>\",",
"prueba[\"ws232\"] = ws232 # listamalware = os.listdir(path) print() print() print(\"TimeStamp\") print(\"TimeDateStamp : \"",
"\"user32\": [], \"ws232\": [], \"ADVAPI32\": [], \"GDI32\": [], \"KERNEL32\": [], \"NETAPI32\": [], \"PSAPI\":",
"granPrueba.append(prueba) prueba = {\"correlativo\": None, \"nameExec\": None, \"sectionName\": [], \"sectionVA\": [], \"sectionVS\": [],",
"= function.name print('\\t', x.decode('utf-8')) ws232.append(x.decode('utf-8')) prueba[\"ws232\"] = ws232 # listamalware = os.listdir(path) print()",
"\"NETAPI32\": [], \"PSAPI\": [], \"WININET\": [], \"ntdll\": [], \"TimeStamp\": None} # pe =",
"# print(granPrueba) import pandas as pd df = pd.DataFrame(granPrueba) print(df) # print(entrysList) def",
"sectionSR.append(section.SizeOfRawData) prueba[\"sectionName\"] = sectionNames prueba[\"sectionVA\"] = sectionVA prueba[\"sectionVS\"] = sectionVS prueba[\"sectionSR\"] = sectionSR",
"\"L11_1415EB8519D13328091CC5C76A624E3D\", \"NBV_8B75BCBFF174C25A0161F30758509A44\", \"NV99_C9C9DBF388A8D81D8CFB4D3FC05F8E4\", \"PL98_BD8B082B7711BC980252F988BB0CA936\", \"POL55_A4F1ECC4D25B33395196B5D51A06790\", \"QW2_4C6BDDCCA2695D6202DF38708E14FC7E\", \"RTC_7F85D7F628CE62D1D8F7B39D8940472\", \"SAM_B659D71AE168E774FAAF38DB30F4A84\", \"TG78Z__727A6800991EEAD454E53E8AF164A99C\", \"VBMM9_149B7BD7218AAB4E257D28469FDDB0D\", \"VC990_468FF2C12CFFC7E5B2FE0EE6BB3B239E\", ] prueba",
"PSAPI = [] WININET = [] ntdll = [] # print(execs.index(a) + 1)",
"\"msvcrt\": None, \"shell32\": None, \"user32\": None, \"ws232\": None, \"TimeStamp\": None} # print(granPrueba) import",
"== \"b'KERNEL32.dll'\": for function in entry.imports: x = function.name print('\\t', x.decode('utf-8')) KERNEL32.append(x.decode('utf-8')) prueba[\"KERNEL32\"]",
"[], \"GDI32\": [], \"KERNEL32\": [], \"NETAPI32\": [], \"PSAPI\": [], \"WININET\": [], \"ntdll\": [],",
"prueba[\"ADVAPI32\"] = ADVAPI32 elif str(entry.dll) == \"b'GDI32.dll'\": for function in entry.imports: x =",
"print('\\t', x.decode('utf-8')) msvcrt.append(x.decode('utf-8')) prueba[\"msvcrt\"] = msvcrt elif str(entry.dll) == \"b'SHELL32.dll'\": for function in",
"= function.name print('\\t', x.decode('utf-8')) WININET.append(x.decode('utf-8')) prueba[\"WININET\"] = WININET elif str(entry.dll) == \"b'ntdll.dll'\": for",
"= KERNEL32 elif str(entry.dll) == \"b'NETAPI32.dll'\": for function in entry.imports: x = function.name",
"kernel32 = [] msvcrt = [] shell32 = [] user32 = [] ws232",
"function.name print('\\t', x.decode('utf-8')) user32.append(x.decode('utf-8')) prueba[\"user32\"] = user32 elif str(entry.dll) == \"b'WS2_32.dll'\": for function",
"function in entry.imports: x = function.name print('\\t', x.decode('utf-8')) user32.append(x.decode('utf-8')) prueba[\"user32\"] = user32 elif",
"sectionVS = [] sectionSR = [] kernel32 = [] msvcrt = [] shell32",
"\"b'SHELL32.dll'\": for function in entry.imports: x = function.name print('\\t', x.decode('utf-8')) shell32.append(x.decode('utf-8')) prueba[\"shell32\"] =",
"np # import os execs = [ \"1F2EB7B090018D975E6D9B40868C94CA\", \"33DE5067A433A6EC5C328067DC18EC37\", \"65018CD542145A3792BA09985734C12A\", \"<KEY>\", \"<KEY>\", \"<KEY>\",",
"x.decode('utf-8')) user32.append(x.decode('utf-8')) prueba[\"user32\"] = user32 elif str(entry.dll) == \"b'WS2_32.dll'\": for function in entry.imports:",
"= sectionVA prueba[\"sectionVS\"] = sectionVS prueba[\"sectionSR\"] = sectionSR print() print() print(\"Entradas\") for entry",
"== \"b'WININET.dll'\": for function in entry.imports: x = function.name print('\\t', x.decode('utf-8')) WININET.append(x.decode('utf-8')) prueba[\"WININET\"]",
"x = function.name print('\\t', x.decode('utf-8')) NETAPI32.append(x.decode('utf-8')) prueba[\"NETAPI32\"] = NETAPI32 elif str(entry.dll) == \"b'PSAPI.DLL'\":",
"function in entry.imports: x = function.name print('\\t', x.decode('utf-8')) KERNEL32.append(x.decode('utf-8')) prueba[\"KERNEL32\"] = KERNEL32 elif",
"df = pd.DataFrame(granPrueba) print(df) # print(entrysList) def unique(list1): x = np.array(list1) print(np.unique(x)) unique(entrysList)",
"msvcrt.append(x.decode('utf-8')) prueba[\"msvcrt\"] = msvcrt elif str(entry.dll) == \"b'SHELL32.dll'\": for function in entry.imports: x",
"\"sectionSR\": [], \"kernel32\": [], \"msvcrt\": [], \"shell32\": [], \"user32\": [], \"ws232\": [], \"ADVAPI32\":",
"entry.imports: x = function.name print('\\t', x.decode('utf-8')) ADVAPI32.append(x.decode('utf-8')) prueba[\"ADVAPI32\"] = ADVAPI32 elif str(entry.dll) ==",
"40, 50] granPrueba = [] entrysList = [] for a in execs: sectionNames",
"print('\\t', x.decode('utf-8')) ws232.append(x.decode('utf-8')) prueba[\"ws232\"] = ws232 # listamalware = os.listdir(path) print() print() print(\"TimeStamp\")",
"for entry in pe.DIRECTORY_ENTRY_IMPORT: print('Llamadas DLL:') print (entry.dll) l = entry.dll print('Llamadas a",
"for a in execs: sectionNames = [] sectionVA = [] sectionVS = []",
"entry.imports: x = function.name print('\\t', x.decode('utf-8')) msvcrt.append(x.decode('utf-8')) prueba[\"msvcrt\"] = msvcrt elif str(entry.dll) ==",
"WININET = [] ntdll = [] # print(execs.index(a) + 1) print(\"a\") print(a) c",
"[] msvcrt = [] shell32 = [] user32 = [] ws232 = []",
"\"VBMM9_149B7BD7218AAB4E257D28469FDDB0D\", \"VC990_468FF2C12CFFC7E5B2FE0EE6BB3B239E\", ] prueba = {\"correlativo\": None, \"nameExec\": None, \"sectionName\": [], \"sectionVA\": [],",
"(entry.dll) l = entry.dll print('Llamadas a funciones:') entrysList.append(str(l.decode('utf-8'))) if str(entry.dll) == \"b'KERNEL32.DLL'\": for",
"a in execs: sectionNames = [] sectionVA = [] sectionVS = [] sectionSR",
"print('\\t', x.decode('utf-8')) ADVAPI32.append(x.decode('utf-8')) prueba[\"ADVAPI32\"] = ADVAPI32 elif str(entry.dll) == \"b'GDI32.dll'\": for function in",
"None, \"ws232\": None, \"TimeStamp\": None} # print(granPrueba) import pandas as pd df =",
"x.decode('utf-8')) ntdll.append(x.decode('utf-8')) prueba[\"ntdll\"] = ntdll elif str(entry.dll) == \"b'MSVCRT.dll'\": for function in entry.imports:",
"\"<KEY>\", \"<KEY>\", \"<KEY>\", \"A316D5AECA269CA865077E7FFF356E7D\", \"<KEY>\", \"AL65_DB05DF0498B59B42A8E493CF3C10C578\", \"B07322743778B5868475DBE66EEDAC4F\", \"B98hX8E8622C393D7E832D39E620EAD5D3B49\", \"BVJ2D9FBF759F527AF373E34673DC3ACA462\", \"DS22_A670D13D4D014169C4080328B8FEB86\", \"EEE99EC8AA67B05407C01094184C33D2B5A44\", \"F6655E39465C2FF5B016980D918EA028\", \"F8437E44748D2C3FCF84019766F4E6DC\",",
"\"QW2_4C6BDDCCA2695D6202DF38708E14FC7E\", \"RTC_7F85D7F628CE62D1D8F7B39D8940472\", \"SAM_B659D71AE168E774FAAF38DB30F4A84\", \"TG78Z__727A6800991EEAD454E53E8AF164A99C\", \"VBMM9_149B7BD7218AAB4E257D28469FDDB0D\", \"VC990_468FF2C12CFFC7E5B2FE0EE6BB3B239E\", ] prueba = {\"correlativo\": None, \"nameExec\": None,",
"x.decode('utf-8')) WININET.append(x.decode('utf-8')) prueba[\"WININET\"] = WININET elif str(entry.dll) == \"b'ntdll.dll'\": for function in entry.imports:",
"function in entry.imports: x = function.name print('\\t', x.decode('utf-8')) PSAPI.append(x.decode('utf-8')) prueba[\"PSAPI\"] = PSAPI elif",
"# print() # print(pe.FILE_HEADER.NumberOfSections) granPrueba.append(prueba) prueba = {\"correlativo\": None, \"nameExec\": None, \"sectionName\": [],",
"= [] msvcrt = [] shell32 = [] user32 = [] ws232 =",
"= [] ntdll = [] # print(execs.index(a) + 1) print(\"a\") print(a) c =",
"prueba[\"user32\"] = user32 elif str(entry.dll) == \"b'WS2_32.dll'\": for function in entry.imports: x =",
"x.decode('utf-8')) ws232.append(x.decode('utf-8')) prueba[\"ws232\"] = ws232 # listamalware = os.listdir(path) print() print() print(\"TimeStamp\") print(\"TimeDateStamp",
"= NETAPI32 elif str(entry.dll) == \"b'PSAPI.DLL'\": for function in entry.imports: x = function.name",
"\"<KEY>\", \"<KEY>\", \"JKK8CA6FE7A1315AF5AFEAC2961460A80569\", \"<KEY>\", \"<KEY>\", \"L11_1415EB8519D13328091CC5C76A624E3D\", \"NBV_8B75BCBFF174C25A0161F30758509A44\", \"NV99_C9C9DBF388A8D81D8CFB4D3FC05F8E4\", \"PL98_BD8B082B7711BC980252F988BB0CA936\", \"POL55_A4F1ECC4D25B33395196B5D51A06790\", \"QW2_4C6BDDCCA2695D6202DF38708E14FC7E\", \"RTC_7F85D7F628CE62D1D8F7B39D8940472\", \"SAM_B659D71AE168E774FAAF38DB30F4A84\",",
"pe = pefile.PE(\"65018CD542145A3792BA09985734C12A\") # algo = [10, 20, 30, 40, 50] granPrueba =",
"x = function.name print('\\t', x.decode('utf-8')) ws232.append(x.decode('utf-8')) prueba[\"ws232\"] = ws232 # listamalware = os.listdir(path)",
"\"b'GDI32.dll'\": for function in entry.imports: x = function.name print('\\t', x.decode('utf-8')) GDI32.append(x.decode('utf-8')) prueba[\"GDI32\"] =",
"for function in entry.imports: x = function.name print('\\t', x.decode('utf-8')) NETAPI32.append(x.decode('utf-8')) prueba[\"NETAPI32\"] = NETAPI32",
"in entry.imports: x = function.name print('\\t', x.decode('utf-8')) GDI32.append(x.decode('utf-8')) prueba[\"GDI32\"] = GDI32 elif str(entry.dll)",
"[], \"sectionSR\": None, \"kernel32\": None, \"msvcrt\": None, \"shell32\": None, \"user32\": None, \"ws232\": None,",
"in pe.DIRECTORY_ENTRY_IMPORT: print('Llamadas DLL:') print (entry.dll) l = entry.dll print('Llamadas a funciones:') entrysList.append(str(l.decode('utf-8')))",
"prueba[\"correlativo\"] = c prueba[\"nameExec\"] = a print(c) print(\"Secciones\") for section in pe.sections: print(section.Name,",
"ws232 # listamalware = os.listdir(path) print() print() print(\"TimeStamp\") print(\"TimeDateStamp : \" + pe.FILE_HEADER.dump_dict()['TimeDateStamp']['Value'].split('[')[1][:-1])",
"prueba[\"WININET\"] = WININET elif str(entry.dll) == \"b'ntdll.dll'\": for function in entry.imports: x =",
"entry.imports: x = function.name print('\\t', x.decode('utf-8')) ws232.append(x.decode('utf-8')) prueba[\"ws232\"] = ws232 # listamalware =",
"[] sectionVA = [] sectionVS = [] sectionSR = [] kernel32 = []",
"\"TimeStamp\": None} # pe = pefile.PE(\"65018CD542145A3792BA09985734C12A\") # algo = [10, 20, 30, 40,",
"sectionNames.append(b.decode('utf-8')) sectionVA.append(section.VirtualAddress) sectionVS.append(section.Misc_VirtualSize) sectionSR.append(section.SizeOfRawData) prueba[\"sectionName\"] = sectionNames prueba[\"sectionVA\"] = sectionVA prueba[\"sectionVS\"] = sectionVS",
"\"b'PSAPI.DLL'\": for function in entry.imports: x = function.name print('\\t', x.decode('utf-8')) PSAPI.append(x.decode('utf-8')) prueba[\"PSAPI\"] =",
"\"sectionVS\": [], \"sectionSR\": None, \"kernel32\": None, \"msvcrt\": None, \"shell32\": None, \"user32\": None, \"ws232\":",
"function.name print('\\t', x.decode('utf-8')) KERNEL32.append(x.decode('utf-8')) prueba[\"KERNEL32\"] = KERNEL32 elif str(entry.dll) == \"b'NETAPI32.dll'\": for function",
"c = execs.index(a) + 1 pe = pefile.PE(a) prueba[\"correlativo\"] = c prueba[\"nameExec\"] =",
"prueba[\"shell32\"] = shell32 elif str(entry.dll) == \"b'USER32.dll'\": for function in entry.imports: x =",
"section.SizeOfRawData) b = section.Name sectionNames.append(b.decode('utf-8')) sectionVA.append(section.VirtualAddress) sectionVS.append(section.Misc_VirtualSize) sectionSR.append(section.SizeOfRawData) prueba[\"sectionName\"] = sectionNames prueba[\"sectionVA\"] =",
"= os.listdir(path) print() print() print(\"TimeStamp\") print(\"TimeDateStamp : \" + pe.FILE_HEADER.dump_dict()['TimeDateStamp']['Value'].split('[')[1][:-1]) z = pe.FILE_HEADER.dump_dict()['TimeDateStamp']['Value'].split('[')[1][:-1]",
"\"shell32\": [], \"user32\": [], \"ws232\": [], \"ADVAPI32\": [], \"GDI32\": [], \"KERNEL32\": [], \"NETAPI32\":",
"function.name print('\\t', x.decode('utf-8')) PSAPI.append(x.decode('utf-8')) prueba[\"PSAPI\"] = PSAPI elif str(entry.dll) == \"b'WININET.dll'\": for function",
"prueba[\"sectionSR\"] = sectionSR print() print() print(\"Entradas\") for entry in pe.DIRECTORY_ENTRY_IMPORT: print('Llamadas DLL:') print",
"in entry.imports: x = function.name print('\\t', x.decode('utf-8')) NETAPI32.append(x.decode('utf-8')) prueba[\"NETAPI32\"] = NETAPI32 elif str(entry.dll)",
"str(entry.dll) == \"b'ntdll.dll'\": for function in entry.imports: x = function.name print('\\t', x.decode('utf-8')) ntdll.append(x.decode('utf-8'))",
"= [] entrysList = [] for a in execs: sectionNames = [] sectionVA",
"[], \"sectionSR\": [], \"kernel32\": [], \"msvcrt\": [], \"shell32\": [], \"user32\": [], \"ws232\": [],",
"= function.name print('\\t', x.decode('utf-8')) user32.append(x.decode('utf-8')) prueba[\"user32\"] = user32 elif str(entry.dll) == \"b'WS2_32.dll'\": for",
"kernel32 elif str(entry.dll) == \"b'ADVAPI32.dll'\": for function in entry.imports: x = function.name print('\\t',",
"execs.index(a) + 1 pe = pefile.PE(a) prueba[\"correlativo\"] = c prueba[\"nameExec\"] = a print(c)",
"= [] WININET = [] ntdll = [] # print(execs.index(a) + 1) print(\"a\")",
"[], \"NETAPI32\": [], \"PSAPI\": [], \"WININET\": [], \"ntdll\": [], \"TimeStamp\": None} # pe",
"import pefile import numpy as np # import os execs = [ \"1F2EB7B090018D975E6D9B40868C94CA\",",
"os execs = [ \"1F2EB7B090018D975E6D9B40868C94CA\", \"33DE5067A433A6EC5C328067DC18EC37\", \"65018CD542145A3792BA09985734C12A\", \"<KEY>\", \"<KEY>\", \"<KEY>\", \"<KEY>\", \"<KEY>\", \"A316D5AECA269CA865077E7FFF356E7D\",",
"in entry.imports: x = function.name print('\\t', x.decode('utf-8')) PSAPI.append(x.decode('utf-8')) prueba[\"PSAPI\"] = PSAPI elif str(entry.dll)",
"\"ntdll\": [], \"TimeStamp\": None} # pe = pefile.PE(\"65018CD542145A3792BA09985734C12A\") # algo = [10, 20,",
"for function in entry.imports: x = function.name print('\\t', x.decode('utf-8')) kernel32.append(x.decode('utf-8')) prueba[\"kernel32\"] = kernel32",
"execs = [ \"1F2EB7B090018D975E6D9B40868C94CA\", \"33DE5067A433A6EC5C328067DC18EC37\", \"65018CD542145A3792BA09985734C12A\", \"<KEY>\", \"<KEY>\", \"<KEY>\", \"<KEY>\", \"<KEY>\", \"A316D5AECA269CA865077E7FFF356E7D\", \"<KEY>\",",
"x = function.name print('\\t', x.decode('utf-8')) shell32.append(x.decode('utf-8')) prueba[\"shell32\"] = shell32 elif str(entry.dll) == \"b'USER32.dll'\":",
"== \"b'PSAPI.DLL'\": for function in entry.imports: x = function.name print('\\t', x.decode('utf-8')) PSAPI.append(x.decode('utf-8')) prueba[\"PSAPI\"]",
"\"FGTR43_EF8E0FB20E7228C7492CCDC59D87C690\", \"<KEY>\", \"FTTR9EA3C16194CE354C244C1B74C46CD92E\", \"<KEY>\", \"GFT4_7DDD3D72EAD03C7518F5D47650C8572\", \"<KEY>\", \"<KEY>\", \"JKK8CA6FE7A1315AF5AFEAC2961460A80569\", \"<KEY>\", \"<KEY>\", \"L11_1415EB8519D13328091CC5C76A624E3D\", \"NBV_8B75BCBFF174C25A0161F30758509A44\", \"NV99_C9C9DBF388A8D81D8CFB4D3FC05F8E4\",",
"x = function.name print('\\t', x.decode('utf-8')) ADVAPI32.append(x.decode('utf-8')) prueba[\"ADVAPI32\"] = ADVAPI32 elif str(entry.dll) == \"b'GDI32.dll'\":",
"= shell32 elif str(entry.dll) == \"b'USER32.dll'\": for function in entry.imports: x = function.name",
"print() # print(pe.FILE_HEADER.NumberOfSections) granPrueba.append(prueba) prueba = {\"correlativo\": None, \"nameExec\": None, \"sectionName\": [], \"sectionVA\":",
"\"sectionSR\": None, \"kernel32\": None, \"msvcrt\": None, \"shell32\": None, \"user32\": None, \"ws232\": None, \"TimeStamp\":",
"str(entry.dll) == \"b'ADVAPI32.dll'\": for function in entry.imports: x = function.name print('\\t', x.decode('utf-8')) ADVAPI32.append(x.decode('utf-8'))",
"[], \"shell32\": [], \"user32\": [], \"ws232\": [], \"ADVAPI32\": [], \"GDI32\": [], \"KERNEL32\": [],",
"NETAPI32 elif str(entry.dll) == \"b'PSAPI.DLL'\": for function in entry.imports: x = function.name print('\\t',",
"= pefile.PE(a) prueba[\"correlativo\"] = c prueba[\"nameExec\"] = a print(c) print(\"Secciones\") for section in",
"\"sectionVS\": [], \"sectionSR\": [], \"kernel32\": [], \"msvcrt\": [], \"shell32\": [], \"user32\": [], \"ws232\":",
"print('Llamadas DLL:') print (entry.dll) l = entry.dll print('Llamadas a funciones:') entrysList.append(str(l.decode('utf-8'))) if str(entry.dll)",
"\"AL65_DB05DF0498B59B42A8E493CF3C10C578\", \"B07322743778B5868475DBE66EEDAC4F\", \"B98hX8E8622C393D7E832D39E620EAD5D3B49\", \"BVJ2D9FBF759F527AF373E34673DC3ACA462\", \"DS22_A670D13D4D014169C4080328B8FEB86\", \"EEE99EC8AA67B05407C01094184C33D2B5A44\", \"F6655E39465C2FF5B016980D918EA028\", \"F8437E44748D2C3FCF84019766F4E6DC\", \"<KEY>\", \"FGTR43_EF8E0FB20E7228C7492CCDC59D87C690\", \"<KEY>\", \"FTTR9EA3C16194CE354C244C1B74C46CD92E\", \"<KEY>\",",
"PSAPI elif str(entry.dll) == \"b'WININET.dll'\": for function in entry.imports: x = function.name print('\\t',",
"str(entry.dll) == \"b'MSVCRT.dll'\": for function in entry.imports: x = function.name print('\\t', x.decode('utf-8')) msvcrt.append(x.decode('utf-8'))",
"None, \"nameExec\": None, \"sectionName\": [], \"sectionVA\": [], \"sectionVS\": [], \"sectionSR\": None, \"kernel32\": None,",
"\"33DE5067A433A6EC5C328067DC18EC37\", \"65018CD542145A3792BA09985734C12A\", \"<KEY>\", \"<KEY>\", \"<KEY>\", \"<KEY>\", \"<KEY>\", \"A316D5AECA269CA865077E7FFF356E7D\", \"<KEY>\", \"AL65_DB05DF0498B59B42A8E493CF3C10C578\", \"B07322743778B5868475DBE66EEDAC4F\", \"B98hX8E8622C393D7E832D39E620EAD5D3B49\", \"BVJ2D9FBF759F527AF373E34673DC3ACA462\",",
"\"ws232\": [], \"ADVAPI32\": [], \"GDI32\": [], \"KERNEL32\": [], \"NETAPI32\": [], \"PSAPI\": [], \"WININET\":",
"granPrueba = [] entrysList = [] for a in execs: sectionNames = []",
"\"VC990_468FF2C12CFFC7E5B2FE0EE6BB3B239E\", ] prueba = {\"correlativo\": None, \"nameExec\": None, \"sectionName\": [], \"sectionVA\": [], \"sectionVS\":",
"NETAPI32 = [] PSAPI = [] WININET = [] ntdll = [] #",
"elif str(entry.dll) == \"b'ntdll.dll'\": for function in entry.imports: x = function.name print('\\t', x.decode('utf-8'))",
"for function in entry.imports: x = function.name print('\\t', x.decode('utf-8')) WININET.append(x.decode('utf-8')) prueba[\"WININET\"] = WININET",
"KERNEL32 elif str(entry.dll) == \"b'NETAPI32.dll'\": for function in entry.imports: x = function.name print('\\t',",
"\"PSAPI\": [], \"WININET\": [], \"ntdll\": [], \"TimeStamp\": None} # pe = pefile.PE(\"65018CD542145A3792BA09985734C12A\") #",
"import pandas as pd df = pd.DataFrame(granPrueba) print(df) # print(entrysList) def unique(list1): x",
"= function.name print('\\t', x.decode('utf-8')) GDI32.append(x.decode('utf-8')) prueba[\"GDI32\"] = GDI32 elif str(entry.dll) == \"b'KERNEL32.dll'\": for",
"= sectionVS prueba[\"sectionSR\"] = sectionSR print() print() print(\"Entradas\") for entry in pe.DIRECTORY_ENTRY_IMPORT: print('Llamadas",
"= [] kernel32 = [] msvcrt = [] shell32 = [] user32 =",
"20, 30, 40, 50] granPrueba = [] entrysList = [] for a in",
"print(\"a\") print(a) c = execs.index(a) + 1 pe = pefile.PE(a) prueba[\"correlativo\"] = c",
"x = function.name print('\\t', x.decode('utf-8')) kernel32.append(x.decode('utf-8')) prueba[\"kernel32\"] = kernel32 elif str(entry.dll) == \"b'ADVAPI32.dll'\":",
"in entry.imports: x = function.name print('\\t', x.decode('utf-8')) kernel32.append(x.decode('utf-8')) prueba[\"kernel32\"] = kernel32 elif str(entry.dll)",
"print() print() print(\"TimeStamp\") print(\"TimeDateStamp : \" + pe.FILE_HEADER.dump_dict()['TimeDateStamp']['Value'].split('[')[1][:-1]) z = pe.FILE_HEADER.dump_dict()['TimeDateStamp']['Value'].split('[')[1][:-1] print(z) prueba[\"TimeStamp\"]",
"listamalware = os.listdir(path) print() print() print(\"TimeStamp\") print(\"TimeDateStamp : \" + pe.FILE_HEADER.dump_dict()['TimeDateStamp']['Value'].split('[')[1][:-1]) z =",
"\"<KEY>\", \"L11_1415EB8519D13328091CC5C76A624E3D\", \"NBV_8B75BCBFF174C25A0161F30758509A44\", \"NV99_C9C9DBF388A8D81D8CFB4D3FC05F8E4\", \"PL98_BD8B082B7711BC980252F988BB0CA936\", \"POL55_A4F1ECC4D25B33395196B5D51A06790\", \"QW2_4C6BDDCCA2695D6202DF38708E14FC7E\", \"RTC_7F85D7F628CE62D1D8F7B39D8940472\", \"SAM_B659D71AE168E774FAAF38DB30F4A84\", \"TG78Z__727A6800991EEAD454E53E8AF164A99C\", \"VBMM9_149B7BD7218AAB4E257D28469FDDB0D\", \"VC990_468FF2C12CFFC7E5B2FE0EE6BB3B239E\", ]",
"None, \"sectionName\": [], \"sectionVA\": [], \"sectionVS\": [], \"sectionSR\": [], \"kernel32\": [], \"msvcrt\": [],",
"print('\\t', x.decode('utf-8')) WININET.append(x.decode('utf-8')) prueba[\"WININET\"] = WININET elif str(entry.dll) == \"b'ntdll.dll'\": for function in",
"print('\\t', x.decode('utf-8')) user32.append(x.decode('utf-8')) prueba[\"user32\"] = user32 elif str(entry.dll) == \"b'WS2_32.dll'\": for function in",
"x.decode('utf-8')) ADVAPI32.append(x.decode('utf-8')) prueba[\"ADVAPI32\"] = ADVAPI32 elif str(entry.dll) == \"b'GDI32.dll'\": for function in entry.imports:",
"ADVAPI32.append(x.decode('utf-8')) prueba[\"ADVAPI32\"] = ADVAPI32 elif str(entry.dll) == \"b'GDI32.dll'\": for function in entry.imports: x",
"pe.DIRECTORY_ENTRY_IMPORT: print('Llamadas DLL:') print (entry.dll) l = entry.dll print('Llamadas a funciones:') entrysList.append(str(l.decode('utf-8'))) if",
"function in entry.imports: x = function.name print('\\t', x.decode('utf-8')) kernel32.append(x.decode('utf-8')) prueba[\"kernel32\"] = kernel32 elif",
"[] shell32 = [] user32 = [] ws232 = [] ADVAPI32 = []",
"[], \"msvcrt\": [], \"shell32\": [], \"user32\": [], \"ws232\": [], \"ADVAPI32\": [], \"GDI32\": [],",
"shell32 = [] user32 = [] ws232 = [] ADVAPI32 = [] GDI32",
"elif str(entry.dll) == \"b'MSVCRT.dll'\": for function in entry.imports: x = function.name print('\\t', x.decode('utf-8'))",
"pandas as pd df = pd.DataFrame(granPrueba) print(df) # print(entrysList) def unique(list1): x =",
"pe.FILE_HEADER.dump_dict()['TimeDateStamp']['Value'].split('[')[1][:-1] print(z) prueba[\"TimeStamp\"] = z print(c) # print() # print() # print(pe.FILE_HEADER.NumberOfSections) granPrueba.append(prueba)",
"50] granPrueba = [] entrysList = [] for a in execs: sectionNames =",
"function.name print('\\t', x.decode('utf-8')) kernel32.append(x.decode('utf-8')) prueba[\"kernel32\"] = kernel32 elif str(entry.dll) == \"b'ADVAPI32.dll'\": for function",
"[] PSAPI = [] WININET = [] ntdll = [] # print(execs.index(a) +",
"\"kernel32\": None, \"msvcrt\": None, \"shell32\": None, \"user32\": None, \"ws232\": None, \"TimeStamp\": None} #",
"function.name print('\\t', x.decode('utf-8')) WININET.append(x.decode('utf-8')) prueba[\"WININET\"] = WININET elif str(entry.dll) == \"b'ntdll.dll'\": for function",
"= [] PSAPI = [] WININET = [] ntdll = [] # print(execs.index(a)",
"\"<KEY>\", \"GFT4_7DDD3D72EAD03C7518F5D47650C8572\", \"<KEY>\", \"<KEY>\", \"JKK8CA6FE7A1315AF5AFEAC2961460A80569\", \"<KEY>\", \"<KEY>\", \"L11_1415EB8519D13328091CC5C76A624E3D\", \"NBV_8B75BCBFF174C25A0161F30758509A44\", \"NV99_C9C9DBF388A8D81D8CFB4D3FC05F8E4\", \"PL98_BD8B082B7711BC980252F988BB0CA936\", \"POL55_A4F1ECC4D25B33395196B5D51A06790\", \"QW2_4C6BDDCCA2695D6202DF38708E14FC7E\",",
"print() # print() # print(pe.FILE_HEADER.NumberOfSections) granPrueba.append(prueba) prueba = {\"correlativo\": None, \"nameExec\": None, \"sectionName\":",
"in entry.imports: x = function.name print('\\t', x.decode('utf-8')) KERNEL32.append(x.decode('utf-8')) prueba[\"KERNEL32\"] = KERNEL32 elif str(entry.dll)",
"\"b'KERNEL32.dll'\": for function in entry.imports: x = function.name print('\\t', x.decode('utf-8')) KERNEL32.append(x.decode('utf-8')) prueba[\"KERNEL32\"] =",
"str(entry.dll) == \"b'USER32.dll'\": for function in entry.imports: x = function.name print('\\t', x.decode('utf-8')) user32.append(x.decode('utf-8'))",
"= a print(c) print(\"Secciones\") for section in pe.sections: print(section.Name, hex(section.VirtualAddress), hex(section.Misc_VirtualSize), section.SizeOfRawData) b",
"str(entry.dll) == \"b'WININET.dll'\": for function in entry.imports: x = function.name print('\\t', x.decode('utf-8')) WININET.append(x.decode('utf-8'))",
"[], \"kernel32\": [], \"msvcrt\": [], \"shell32\": [], \"user32\": [], \"ws232\": [], \"ADVAPI32\": [],",
"x = function.name print('\\t', x.decode('utf-8')) GDI32.append(x.decode('utf-8')) prueba[\"GDI32\"] = GDI32 elif str(entry.dll) == \"b'KERNEL32.dll'\":",
"x.decode('utf-8')) NETAPI32.append(x.decode('utf-8')) prueba[\"NETAPI32\"] = NETAPI32 elif str(entry.dll) == \"b'PSAPI.DLL'\": for function in entry.imports:",
"[], \"WININET\": [], \"ntdll\": [], \"TimeStamp\": None} # pe = pefile.PE(\"65018CD542145A3792BA09985734C12A\") # algo",
"entry.imports: x = function.name print('\\t', x.decode('utf-8')) ntdll.append(x.decode('utf-8')) prueba[\"ntdll\"] = ntdll elif str(entry.dll) ==",
"prueba[\"KERNEL32\"] = KERNEL32 elif str(entry.dll) == \"b'NETAPI32.dll'\": for function in entry.imports: x =",
"for function in entry.imports: x = function.name print('\\t', x.decode('utf-8')) KERNEL32.append(x.decode('utf-8')) prueba[\"KERNEL32\"] = KERNEL32",
"pe.FILE_HEADER.dump_dict()['TimeDateStamp']['Value'].split('[')[1][:-1]) z = pe.FILE_HEADER.dump_dict()['TimeDateStamp']['Value'].split('[')[1][:-1] print(z) prueba[\"TimeStamp\"] = z print(c) # print() # print()",
"shell32 elif str(entry.dll) == \"b'USER32.dll'\": for function in entry.imports: x = function.name print('\\t',",
"prueba[\"ntdll\"] = ntdll elif str(entry.dll) == \"b'MSVCRT.dll'\": for function in entry.imports: x =",
"= [] # print(execs.index(a) + 1) print(\"a\") print(a) c = execs.index(a) + 1",
"= [] for a in execs: sectionNames = [] sectionVA = [] sectionVS",
"\"b'WININET.dll'\": for function in entry.imports: x = function.name print('\\t', x.decode('utf-8')) WININET.append(x.decode('utf-8')) prueba[\"WININET\"] =",
"function.name print('\\t', x.decode('utf-8')) GDI32.append(x.decode('utf-8')) prueba[\"GDI32\"] = GDI32 elif str(entry.dll) == \"b'KERNEL32.dll'\": for function",
"print('\\t', x.decode('utf-8')) ntdll.append(x.decode('utf-8')) prueba[\"ntdll\"] = ntdll elif str(entry.dll) == \"b'MSVCRT.dll'\": for function in",
"print('Llamadas a funciones:') entrysList.append(str(l.decode('utf-8'))) if str(entry.dll) == \"b'KERNEL32.DLL'\": for function in entry.imports: x",
"sectionNames prueba[\"sectionVA\"] = sectionVA prueba[\"sectionVS\"] = sectionVS prueba[\"sectionSR\"] = sectionSR print() print() print(\"Entradas\")",
"PSAPI.append(x.decode('utf-8')) prueba[\"PSAPI\"] = PSAPI elif str(entry.dll) == \"b'WININET.dll'\": for function in entry.imports: x",
"x = function.name print('\\t', x.decode('utf-8')) msvcrt.append(x.decode('utf-8')) prueba[\"msvcrt\"] = msvcrt elif str(entry.dll) == \"b'SHELL32.dll'\":",
"None, \"nameExec\": None, \"sectionName\": [], \"sectionVA\": [], \"sectionVS\": [], \"sectionSR\": [], \"kernel32\": [],",
"sectionSR print() print() print(\"Entradas\") for entry in pe.DIRECTORY_ENTRY_IMPORT: print('Llamadas DLL:') print (entry.dll) l",
"in execs: sectionNames = [] sectionVA = [] sectionVS = [] sectionSR =",
"function in entry.imports: x = function.name print('\\t', x.decode('utf-8')) ntdll.append(x.decode('utf-8')) prueba[\"ntdll\"] = ntdll elif",
"[] for a in execs: sectionNames = [] sectionVA = [] sectionVS =",
"\"A316D5AECA269CA865077E7FFF356E7D\", \"<KEY>\", \"AL65_DB05DF0498B59B42A8E493CF3C10C578\", \"B07322743778B5868475DBE66EEDAC4F\", \"B98hX8E8622C393D7E832D39E620EAD5D3B49\", \"BVJ2D9FBF759F527AF373E34673DC3ACA462\", \"DS22_A670D13D4D014169C4080328B8FEB86\", \"EEE99EC8AA67B05407C01094184C33D2B5A44\", \"F6655E39465C2FF5B016980D918EA028\", \"F8437E44748D2C3FCF84019766F4E6DC\", \"<KEY>\", \"FGTR43_EF8E0FB20E7228C7492CCDC59D87C690\", \"<KEY>\",",
"\"POL55_A4F1ECC4D25B33395196B5D51A06790\", \"QW2_4C6BDDCCA2695D6202DF38708E14FC7E\", \"RTC_7F85D7F628CE62D1D8F7B39D8940472\", \"SAM_B659D71AE168E774FAAF38DB30F4A84\", \"TG78Z__727A6800991EEAD454E53E8AF164A99C\", \"VBMM9_149B7BD7218AAB4E257D28469FDDB0D\", \"VC990_468FF2C12CFFC7E5B2FE0EE6BB3B239E\", ] prueba = {\"correlativo\": None, \"nameExec\":",
"l = entry.dll print('Llamadas a funciones:') entrysList.append(str(l.decode('utf-8'))) if str(entry.dll) == \"b'KERNEL32.DLL'\": for function",
"[] WININET = [] ntdll = [] # print(execs.index(a) + 1) print(\"a\") print(a)",
"entry in pe.DIRECTORY_ENTRY_IMPORT: print('Llamadas DLL:') print (entry.dll) l = entry.dll print('Llamadas a funciones:')",
"print('\\t', x.decode('utf-8')) KERNEL32.append(x.decode('utf-8')) prueba[\"KERNEL32\"] = KERNEL32 elif str(entry.dll) == \"b'NETAPI32.dll'\": for function in",
"x.decode('utf-8')) kernel32.append(x.decode('utf-8')) prueba[\"kernel32\"] = kernel32 elif str(entry.dll) == \"b'ADVAPI32.dll'\": for function in entry.imports:",
"print() print(\"TimeStamp\") print(\"TimeDateStamp : \" + pe.FILE_HEADER.dump_dict()['TimeDateStamp']['Value'].split('[')[1][:-1]) z = pe.FILE_HEADER.dump_dict()['TimeDateStamp']['Value'].split('[')[1][:-1] print(z) prueba[\"TimeStamp\"] =",
"in entry.imports: x = function.name print('\\t', x.decode('utf-8')) ntdll.append(x.decode('utf-8')) prueba[\"ntdll\"] = ntdll elif str(entry.dll)",
"pefile.PE(\"65018CD542145A3792BA09985734C12A\") # algo = [10, 20, 30, 40, 50] granPrueba = [] entrysList",
"= sectionSR print() print() print(\"Entradas\") for entry in pe.DIRECTORY_ENTRY_IMPORT: print('Llamadas DLL:') print (entry.dll)",
"[], \"KERNEL32\": [], \"NETAPI32\": [], \"PSAPI\": [], \"WININET\": [], \"ntdll\": [], \"TimeStamp\": None}",
"== \"b'ADVAPI32.dll'\": for function in entry.imports: x = function.name print('\\t', x.decode('utf-8')) ADVAPI32.append(x.decode('utf-8')) prueba[\"ADVAPI32\"]",
"\"GFT4_7DDD3D72EAD03C7518F5D47650C8572\", \"<KEY>\", \"<KEY>\", \"JKK8CA6FE7A1315AF5AFEAC2961460A80569\", \"<KEY>\", \"<KEY>\", \"L11_1415EB8519D13328091CC5C76A624E3D\", \"NBV_8B75BCBFF174C25A0161F30758509A44\", \"NV99_C9C9DBF388A8D81D8CFB4D3FC05F8E4\", \"PL98_BD8B082B7711BC980252F988BB0CA936\", \"POL55_A4F1ECC4D25B33395196B5D51A06790\", \"QW2_4C6BDDCCA2695D6202DF38708E14FC7E\", \"RTC_7F85D7F628CE62D1D8F7B39D8940472\",",
"prueba[\"GDI32\"] = GDI32 elif str(entry.dll) == \"b'KERNEL32.dll'\": for function in entry.imports: x =",
"KERNEL32.append(x.decode('utf-8')) prueba[\"KERNEL32\"] = KERNEL32 elif str(entry.dll) == \"b'NETAPI32.dll'\": for function in entry.imports: x",
"pe = pefile.PE(a) prueba[\"correlativo\"] = c prueba[\"nameExec\"] = a print(c) print(\"Secciones\") for section",
"print(\"TimeDateStamp : \" + pe.FILE_HEADER.dump_dict()['TimeDateStamp']['Value'].split('[')[1][:-1]) z = pe.FILE_HEADER.dump_dict()['TimeDateStamp']['Value'].split('[')[1][:-1] print(z) prueba[\"TimeStamp\"] = z print(c)",
"function in entry.imports: x = function.name print('\\t', x.decode('utf-8')) shell32.append(x.decode('utf-8')) prueba[\"shell32\"] = shell32 elif",
"prueba[\"sectionName\"] = sectionNames prueba[\"sectionVA\"] = sectionVA prueba[\"sectionVS\"] = sectionVS prueba[\"sectionSR\"] = sectionSR print()",
"in entry.imports: x = function.name print('\\t', x.decode('utf-8')) shell32.append(x.decode('utf-8')) prueba[\"shell32\"] = shell32 elif str(entry.dll)",
"\"ws232\": None, \"TimeStamp\": None} # print(granPrueba) import pandas as pd df = pd.DataFrame(granPrueba)",
"+ 1) print(\"a\") print(a) c = execs.index(a) + 1 pe = pefile.PE(a) prueba[\"correlativo\"]",
"\"sectionName\": [], \"sectionVA\": [], \"sectionVS\": [], \"sectionSR\": [], \"kernel32\": [], \"msvcrt\": [], \"shell32\":",
"\"ADVAPI32\": [], \"GDI32\": [], \"KERNEL32\": [], \"NETAPI32\": [], \"PSAPI\": [], \"WININET\": [], \"ntdll\":",
"entry.imports: x = function.name print('\\t', x.decode('utf-8')) user32.append(x.decode('utf-8')) prueba[\"user32\"] = user32 elif str(entry.dll) ==",
"\"B98hX8E8622C393D7E832D39E620EAD5D3B49\", \"BVJ2D9FBF759F527AF373E34673DC3ACA462\", \"DS22_A670D13D4D014169C4080328B8FEB86\", \"EEE99EC8AA67B05407C01094184C33D2B5A44\", \"F6655E39465C2FF5B016980D918EA028\", \"F8437E44748D2C3FCF84019766F4E6DC\", \"<KEY>\", \"FGTR43_EF8E0FB20E7228C7492CCDC59D87C690\", \"<KEY>\", \"FTTR9EA3C16194CE354C244C1B74C46CD92E\", \"<KEY>\", \"GFT4_7DDD3D72EAD03C7518F5D47650C8572\", \"<KEY>\",",
"a funciones:') entrysList.append(str(l.decode('utf-8'))) if str(entry.dll) == \"b'KERNEL32.DLL'\": for function in entry.imports: x =",
"\"JKK8CA6FE7A1315AF5AFEAC2961460A80569\", \"<KEY>\", \"<KEY>\", \"L11_1415EB8519D13328091CC5C76A624E3D\", \"NBV_8B75BCBFF174C25A0161F30758509A44\", \"NV99_C9C9DBF388A8D81D8CFB4D3FC05F8E4\", \"PL98_BD8B082B7711BC980252F988BB0CA936\", \"POL55_A4F1ECC4D25B33395196B5D51A06790\", \"QW2_4C6BDDCCA2695D6202DF38708E14FC7E\", \"RTC_7F85D7F628CE62D1D8F7B39D8940472\", \"SAM_B659D71AE168E774FAAF38DB30F4A84\", \"TG78Z__727A6800991EEAD454E53E8AF164A99C\", \"VBMM9_149B7BD7218AAB4E257D28469FDDB0D\",",
"\"SAM_B659D71AE168E774FAAF38DB30F4A84\", \"TG78Z__727A6800991EEAD454E53E8AF164A99C\", \"VBMM9_149B7BD7218AAB4E257D28469FDDB0D\", \"VC990_468FF2C12CFFC7E5B2FE0EE6BB3B239E\", ] prueba = {\"correlativo\": None, \"nameExec\": None, \"sectionName\": [],",
"print(\"Entradas\") for entry in pe.DIRECTORY_ENTRY_IMPORT: print('Llamadas DLL:') print (entry.dll) l = entry.dll print('Llamadas",
"print('\\t', x.decode('utf-8')) NETAPI32.append(x.decode('utf-8')) prueba[\"NETAPI32\"] = NETAPI32 elif str(entry.dll) == \"b'PSAPI.DLL'\": for function in",
"= [] ws232 = [] ADVAPI32 = [] GDI32 = [] KERNEL32 =",
"x = function.name print('\\t', x.decode('utf-8')) WININET.append(x.decode('utf-8')) prueba[\"WININET\"] = WININET elif str(entry.dll) == \"b'ntdll.dll'\":",
"\"BVJ2D9FBF759F527AF373E34673DC3ACA462\", \"DS22_A670D13D4D014169C4080328B8FEB86\", \"EEE99EC8AA67B05407C01094184C33D2B5A44\", \"F6655E39465C2FF5B016980D918EA028\", \"F8437E44748D2C3FCF84019766F4E6DC\", \"<KEY>\", \"FGTR43_EF8E0FB20E7228C7492CCDC59D87C690\", \"<KEY>\", \"FTTR9EA3C16194CE354C244C1B74C46CD92E\", \"<KEY>\", \"GFT4_7DDD3D72EAD03C7518F5D47650C8572\", \"<KEY>\", \"<KEY>\",",
"entry.imports: x = function.name print('\\t', x.decode('utf-8')) PSAPI.append(x.decode('utf-8')) prueba[\"PSAPI\"] = PSAPI elif str(entry.dll) ==",
"\"<KEY>\", \"<KEY>\", \"A316D5AECA269CA865077E7FFF356E7D\", \"<KEY>\", \"AL65_DB05DF0498B59B42A8E493CF3C10C578\", \"B07322743778B5868475DBE66EEDAC4F\", \"B98hX8E8622C393D7E832D39E620EAD5D3B49\", \"BVJ2D9FBF759F527AF373E34673DC3ACA462\", \"DS22_A670D13D4D014169C4080328B8FEB86\", \"EEE99EC8AA67B05407C01094184C33D2B5A44\", \"F6655E39465C2FF5B016980D918EA028\", \"F8437E44748D2C3FCF84019766F4E6DC\", \"<KEY>\",",
"[], \"sectionVA\": [], \"sectionVS\": [], \"sectionSR\": None, \"kernel32\": None, \"msvcrt\": None, \"shell32\": None,",
"\"F6655E39465C2FF5B016980D918EA028\", \"F8437E44748D2C3FCF84019766F4E6DC\", \"<KEY>\", \"FGTR43_EF8E0FB20E7228C7492CCDC59D87C690\", \"<KEY>\", \"FTTR9EA3C16194CE354C244C1B74C46CD92E\", \"<KEY>\", \"GFT4_7DDD3D72EAD03C7518F5D47650C8572\", \"<KEY>\", \"<KEY>\", \"JKK8CA6FE7A1315AF5AFEAC2961460A80569\", \"<KEY>\", \"<KEY>\",",
"{\"correlativo\": None, \"nameExec\": None, \"sectionName\": [], \"sectionVA\": [], \"sectionVS\": [], \"sectionSR\": None, \"kernel32\":",
"\"sectionName\": [], \"sectionVA\": [], \"sectionVS\": [], \"sectionSR\": None, \"kernel32\": None, \"msvcrt\": None, \"shell32\":",
"for function in entry.imports: x = function.name print('\\t', x.decode('utf-8')) user32.append(x.decode('utf-8')) prueba[\"user32\"] = user32",
"print(c) print(\"Secciones\") for section in pe.sections: print(section.Name, hex(section.VirtualAddress), hex(section.Misc_VirtualSize), section.SizeOfRawData) b = section.Name",
"print() print() print(\"Entradas\") for entry in pe.DIRECTORY_ENTRY_IMPORT: print('Llamadas DLL:') print (entry.dll) l =",
"as pd df = pd.DataFrame(granPrueba) print(df) # print(entrysList) def unique(list1): x = np.array(list1)",
"# import os execs = [ \"1F2EB7B090018D975E6D9B40868C94CA\", \"33DE5067A433A6EC5C328067DC18EC37\", \"65018CD542145A3792BA09985734C12A\", \"<KEY>\", \"<KEY>\", \"<KEY>\", \"<KEY>\",",
"entrysList = [] for a in execs: sectionNames = [] sectionVA = []",
"= [] sectionVA = [] sectionVS = [] sectionSR = [] kernel32 =",
"in entry.imports: x = function.name print('\\t', x.decode('utf-8')) WININET.append(x.decode('utf-8')) prueba[\"WININET\"] = WININET elif str(entry.dll)",
"prueba[\"kernel32\"] = kernel32 elif str(entry.dll) == \"b'ADVAPI32.dll'\": for function in entry.imports: x =",
"[] # print(execs.index(a) + 1) print(\"a\") print(a) c = execs.index(a) + 1 pe",
"pe.sections: print(section.Name, hex(section.VirtualAddress), hex(section.Misc_VirtualSize), section.SizeOfRawData) b = section.Name sectionNames.append(b.decode('utf-8')) sectionVA.append(section.VirtualAddress) sectionVS.append(section.Misc_VirtualSize) sectionSR.append(section.SizeOfRawData) prueba[\"sectionName\"]",
"\"<KEY>\", \"FTTR9EA3C16194CE354C244C1B74C46CD92E\", \"<KEY>\", \"GFT4_7DDD3D72EAD03C7518F5D47650C8572\", \"<KEY>\", \"<KEY>\", \"JKK8CA6FE7A1315AF5AFEAC2961460A80569\", \"<KEY>\", \"<KEY>\", \"L11_1415EB8519D13328091CC5C76A624E3D\", \"NBV_8B75BCBFF174C25A0161F30758509A44\", \"NV99_C9C9DBF388A8D81D8CFB4D3FC05F8E4\", \"PL98_BD8B082B7711BC980252F988BB0CA936\",",
"\"kernel32\": [], \"msvcrt\": [], \"shell32\": [], \"user32\": [], \"ws232\": [], \"ADVAPI32\": [], \"GDI32\":",
"+ 1 pe = pefile.PE(a) prueba[\"correlativo\"] = c prueba[\"nameExec\"] = a print(c) print(\"Secciones\")",
"= [] NETAPI32 = [] PSAPI = [] WININET = [] ntdll =",
"as np # import os execs = [ \"1F2EB7B090018D975E6D9B40868C94CA\", \"33DE5067A433A6EC5C328067DC18EC37\", \"65018CD542145A3792BA09985734C12A\", \"<KEY>\", \"<KEY>\",",
"function.name print('\\t', x.decode('utf-8')) ws232.append(x.decode('utf-8')) prueba[\"ws232\"] = ws232 # listamalware = os.listdir(path) print() print()",
"[], \"sectionVS\": [], \"sectionSR\": None, \"kernel32\": None, \"msvcrt\": None, \"shell32\": None, \"user32\": None,",
"str(entry.dll) == \"b'KERNEL32.DLL'\": for function in entry.imports: x = function.name print('\\t', x.decode('utf-8')) kernel32.append(x.decode('utf-8'))",
"[], \"user32\": [], \"ws232\": [], \"ADVAPI32\": [], \"GDI32\": [], \"KERNEL32\": [], \"NETAPI32\": [],",
"\"<KEY>\", \"A316D5AECA269CA865077E7FFF356E7D\", \"<KEY>\", \"AL65_DB05DF0498B59B42A8E493CF3C10C578\", \"B07322743778B5868475DBE66EEDAC4F\", \"B98hX8E8622C393D7E832D39E620EAD5D3B49\", \"BVJ2D9FBF759F527AF373E34673DC3ACA462\", \"DS22_A670D13D4D014169C4080328B8FEB86\", \"EEE99EC8AA67B05407C01094184C33D2B5A44\", \"F6655E39465C2FF5B016980D918EA028\", \"F8437E44748D2C3FCF84019766F4E6DC\", \"<KEY>\", \"FGTR43_EF8E0FB20E7228C7492CCDC59D87C690\",",
"30, 40, 50] granPrueba = [] entrysList = [] for a in execs:",
"sectionVA = [] sectionVS = [] sectionSR = [] kernel32 = [] msvcrt",
"KERNEL32 = [] NETAPI32 = [] PSAPI = [] WININET = [] ntdll",
"print(granPrueba) import pandas as pd df = pd.DataFrame(granPrueba) print(df) # print(entrysList) def unique(list1):",
"entry.imports: x = function.name print('\\t', x.decode('utf-8')) WININET.append(x.decode('utf-8')) prueba[\"WININET\"] = WININET elif str(entry.dll) ==",
": \" + pe.FILE_HEADER.dump_dict()['TimeDateStamp']['Value'].split('[')[1][:-1]) z = pe.FILE_HEADER.dump_dict()['TimeDateStamp']['Value'].split('[')[1][:-1] print(z) prueba[\"TimeStamp\"] = z print(c) #",
"prueba[\"msvcrt\"] = msvcrt elif str(entry.dll) == \"b'SHELL32.dll'\": for function in entry.imports: x =",
"= entry.dll print('Llamadas a funciones:') entrysList.append(str(l.decode('utf-8'))) if str(entry.dll) == \"b'KERNEL32.DLL'\": for function in",
"== \"b'KERNEL32.DLL'\": for function in entry.imports: x = function.name print('\\t', x.decode('utf-8')) kernel32.append(x.decode('utf-8')) prueba[\"kernel32\"]",
"for function in entry.imports: x = function.name print('\\t', x.decode('utf-8')) ws232.append(x.decode('utf-8')) prueba[\"ws232\"] = ws232",
"ws232 = [] ADVAPI32 = [] GDI32 = [] KERNEL32 = [] NETAPI32",
"pefile.PE(a) prueba[\"correlativo\"] = c prueba[\"nameExec\"] = a print(c) print(\"Secciones\") for section in pe.sections:",
"print(a) c = execs.index(a) + 1 pe = pefile.PE(a) prueba[\"correlativo\"] = c prueba[\"nameExec\"]",
"print('\\t', x.decode('utf-8')) GDI32.append(x.decode('utf-8')) prueba[\"GDI32\"] = GDI32 elif str(entry.dll) == \"b'KERNEL32.dll'\": for function in",
"None, \"user32\": None, \"ws232\": None, \"TimeStamp\": None} # print(granPrueba) import pandas as pd",
"elif str(entry.dll) == \"b'SHELL32.dll'\": for function in entry.imports: x = function.name print('\\t', x.decode('utf-8'))",
"\"GDI32\": [], \"KERNEL32\": [], \"NETAPI32\": [], \"PSAPI\": [], \"WININET\": [], \"ntdll\": [], \"TimeStamp\":",
"for function in entry.imports: x = function.name print('\\t', x.decode('utf-8')) PSAPI.append(x.decode('utf-8')) prueba[\"PSAPI\"] = PSAPI",
"function in entry.imports: x = function.name print('\\t', x.decode('utf-8')) WININET.append(x.decode('utf-8')) prueba[\"WININET\"] = WININET elif"
] |
[
"= make_init(\"step\", 200) # c = Crystal(x, x, initial_grid=init.copy(), mode=\"step\", hist_int=int(num_of_growths/4), \\ #",
"# #c.print_grid() # plot_crystal(c) # # A crystal object serving to visualize only",
"mode=\"step\", hist_int=int(num_of_growths/4), \\ # border_policy=\"loop\") # #c.print_grid() # c.grow(num_of_growths) # #c.print_grid() # plot_crystal(c)",
"Crystal(x, x, initial_grid=init.copy(), mode=\"step\", hist_int=int(num_of_growths/4), \\ # border_policy=\"loop\") # #c.print_grid() # c.grow(num_of_growths) #",
"used to profile code for speedups. ''' import cProfile cProfile.run('main(50)', 'pstats') from pstats",
"# #c.print_grid() # c.grow(num_of_growths) # #c.print_grid() # plot_crystal(c) # # A crystal object",
"crystal object serving to visualize only \"what grew\" without init state # d",
"dimensions (m=n=x) # Stairs x, init = make_init(\"stairs\", 200) c = Crystal(x, x,",
"Stairs x, init = make_init(\"stairs\", 200) c = Crystal(x, x, initial_grid=init.copy(), mode=\"step\", hist_int=int(num_of_growths/4),",
"* def main(num_of_growths): ''' Main method to start simulation. Uncomment specific simulation or",
"make_init(\"screw\", 200) # c = Crystal(x, x, initial_grid=init.copy(), mode=\"spin\", hist_int=int(num_of_growths/8), \\ # border_policy=\"flex\")",
"#c.print_grid() # plot_crystal(c) # # A crystal object serving to visualize only \"what",
"a publishable plot # plot_out(c) def profile(): ''' Function used to profile code",
"simulation crystal, x is dimensions (m=n=x) # Stairs x, init = make_init(\"stairs\", 200)",
"x is dimensions (m=n=x) # Stairs x, init = make_init(\"stairs\", 200) c =",
"enables profiling. ''' import numpy as np import math from mpl_toolkits.mplot3d import Axes3D",
"Crystal(x, x, initial_grid=(c.grid-init)) # plot_crystal(d, 2) # Show history of simulation plot_history(c) #",
"plot_out(c) def profile(): ''' Function used to profile code for speedups. ''' import",
"-*- coding: utf-8 -*- ''' The main script to run, enables profiling. '''",
"math from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt from crystal import *",
"# Main simulation crystal, x is dimensions (m=n=x) # Stairs x, init =",
"# -*- coding: utf-8 -*- ''' The main script to run, enables profiling.",
"Crystal(x, x, initial_grid=init.copy(), mode=\"step\", hist_int=int(num_of_growths/4), \\ border_policy=\"loop\", use_height=False) c.grow(num_of_growths) plot_crystal(c) # # Step",
"cProfile cProfile.run('main(50)', 'pstats') from pstats import Stats p = Stats('pstats') p.strip_dirs().sort_stats('time').print_stats(10) main(50) #profile()",
"c = Crystal(x, x, initial_grid=init.copy(), mode=\"step\", hist_int=int(num_of_growths/4), \\ # border_policy=\"loop\") # #c.print_grid() #",
"# #c.print_grid() # c.grow(num_of_growths) # #c.print_grid() # plot_crystal(c) # # # Screw #",
"plot_crystal(d, 2) # Show history of simulation plot_history(c) # # Generate a publishable",
"is dimensions (m=n=x) # Stairs x, init = make_init(\"stairs\", 200) c = Crystal(x,",
"# # Generate a publishable plot # plot_out(c) def profile(): ''' Function used",
"Show history of simulation plot_history(c) # # Generate a publishable plot # plot_out(c)",
"np import math from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt from crystal",
"#c.print_grid() # c.grow(num_of_growths) # #c.print_grid() # plot_crystal(c) # # # Screw # x,",
"Main method to start simulation. Uncomment specific simulation or write a new one.",
"plot # plot_out(c) def profile(): ''' Function used to profile code for speedups.",
"c.grow(num_of_growths) # #c.print_grid() # plot_crystal(c) # # # Screw # x, init =",
"# border_policy=\"flex\") # #c.print_grid() # c.grow(num_of_growths) # #c.print_grid() # plot_crystal(c) # # A",
"initial_grid=init.copy(), mode=\"step\", hist_int=int(num_of_growths/4), \\ # border_policy=\"loop\") # #c.print_grid() # c.grow(num_of_growths) # #c.print_grid() #",
"specific simulation or write a new one. ''' # Main simulation crystal, x",
"mode=\"step\", hist_int=int(num_of_growths/4), \\ border_policy=\"loop\", use_height=False) c.grow(num_of_growths) plot_crystal(c) # # Step # x, init",
"# x, init = make_init(\"step\", 200) # c = Crystal(x, x, initial_grid=init.copy(), mode=\"step\",",
"(m=n=x) # Stairs x, init = make_init(\"stairs\", 200) c = Crystal(x, x, initial_grid=init.copy(),",
"border_policy=\"flex\") # #c.print_grid() # c.grow(num_of_growths) # #c.print_grid() # plot_crystal(c) # # A crystal",
"initial_grid=init.copy(), mode=\"step\", hist_int=int(num_of_growths/4), \\ border_policy=\"loop\", use_height=False) c.grow(num_of_growths) plot_crystal(c) # # Step # x,",
"# plot_crystal(c) # # # Screw # x, init = make_init(\"screw\", 200) #",
"# plot_crystal(d, 2) # Show history of simulation plot_history(c) # # Generate a",
"coding: utf-8 -*- ''' The main script to run, enables profiling. ''' import",
"def main(num_of_growths): ''' Main method to start simulation. Uncomment specific simulation or write",
"as plt from crystal import * from plot_init_tools import * def main(num_of_growths): '''",
"serving to visualize only \"what grew\" without init state # d = Crystal(x,",
"simulation or write a new one. ''' # Main simulation crystal, x is",
"d = Crystal(x, x, initial_grid=(c.grid-init)) # plot_crystal(d, 2) # Show history of simulation",
"new one. ''' # Main simulation crystal, x is dimensions (m=n=x) # Stairs",
"only \"what grew\" without init state # d = Crystal(x, x, initial_grid=(c.grid-init)) #",
"import math from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt from crystal import",
"#c.print_grid() # plot_crystal(c) # # # Screw # x, init = make_init(\"screw\", 200)",
"''' # Main simulation crystal, x is dimensions (m=n=x) # Stairs x, init",
"# # Step # x, init = make_init(\"step\", 200) # c = Crystal(x,",
"# plot_out(c) def profile(): ''' Function used to profile code for speedups. '''",
"history of simulation plot_history(c) # # Generate a publishable plot # plot_out(c) def",
"\\ # border_policy=\"loop\") # #c.print_grid() # c.grow(num_of_growths) # #c.print_grid() # plot_crystal(c) # #",
"utf-8 -*- ''' The main script to run, enables profiling. ''' import numpy",
"c.grow(num_of_growths) plot_crystal(c) # # Step # x, init = make_init(\"step\", 200) # c",
"plot_crystal(c) # # A crystal object serving to visualize only \"what grew\" without",
"import * from plot_init_tools import * def main(num_of_growths): ''' Main method to start",
"as np import math from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt from",
"x, initial_grid=init.copy(), mode=\"spin\", hist_int=int(num_of_growths/8), \\ # border_policy=\"flex\") # #c.print_grid() # c.grow(num_of_growths) # #c.print_grid()",
"without init state # d = Crystal(x, x, initial_grid=(c.grid-init)) # plot_crystal(d, 2) #",
"plot_crystal(c) # # # Screw # x, init = make_init(\"screw\", 200) # c",
"from crystal import * from plot_init_tools import * def main(num_of_growths): ''' Main method",
"Screw # x, init = make_init(\"screw\", 200) # c = Crystal(x, x, initial_grid=init.copy(),",
"Axes3D import matplotlib.pyplot as plt from crystal import * from plot_init_tools import *",
"# d = Crystal(x, x, initial_grid=(c.grid-init)) # plot_crystal(d, 2) # Show history of",
"''' Function used to profile code for speedups. ''' import cProfile cProfile.run('main(50)', 'pstats')",
"# Step # x, init = make_init(\"step\", 200) # c = Crystal(x, x,",
"script to run, enables profiling. ''' import numpy as np import math from",
"numpy as np import math from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt",
"init = make_init(\"stairs\", 200) c = Crystal(x, x, initial_grid=init.copy(), mode=\"step\", hist_int=int(num_of_growths/4), \\ border_policy=\"loop\",",
"simulation plot_history(c) # # Generate a publishable plot # plot_out(c) def profile(): '''",
"200) # c = Crystal(x, x, initial_grid=init.copy(), mode=\"spin\", hist_int=int(num_of_growths/8), \\ # border_policy=\"flex\") #",
"''' import cProfile cProfile.run('main(50)', 'pstats') from pstats import Stats p = Stats('pstats') p.strip_dirs().sort_stats('time').print_stats(10)",
"to profile code for speedups. ''' import cProfile cProfile.run('main(50)', 'pstats') from pstats import",
"''' import numpy as np import math from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot",
"x, initial_grid=(c.grid-init)) # plot_crystal(d, 2) # Show history of simulation plot_history(c) # #",
"one. ''' # Main simulation crystal, x is dimensions (m=n=x) # Stairs x,",
"crystal, x is dimensions (m=n=x) # Stairs x, init = make_init(\"stairs\", 200) c",
"# # A crystal object serving to visualize only \"what grew\" without init",
"simulation. Uncomment specific simulation or write a new one. ''' # Main simulation",
"visualize only \"what grew\" without init state # d = Crystal(x, x, initial_grid=(c.grid-init))",
"c.grow(num_of_growths) # #c.print_grid() # plot_crystal(c) # # A crystal object serving to visualize",
"2) # Show history of simulation plot_history(c) # # Generate a publishable plot",
"of simulation plot_history(c) # # Generate a publishable plot # plot_out(c) def profile():",
"x, initial_grid=init.copy(), mode=\"step\", hist_int=int(num_of_growths/4), \\ # border_policy=\"loop\") # #c.print_grid() # c.grow(num_of_growths) # #c.print_grid()",
"import matplotlib.pyplot as plt from crystal import * from plot_init_tools import * def",
"# c = Crystal(x, x, initial_grid=init.copy(), mode=\"step\", hist_int=int(num_of_growths/4), \\ # border_policy=\"loop\") # #c.print_grid()",
"x, init = make_init(\"step\", 200) # c = Crystal(x, x, initial_grid=init.copy(), mode=\"step\", hist_int=int(num_of_growths/4),",
"Generate a publishable plot # plot_out(c) def profile(): ''' Function used to profile",
"# Generate a publishable plot # plot_out(c) def profile(): ''' Function used to",
"profile(): ''' Function used to profile code for speedups. ''' import cProfile cProfile.run('main(50)',",
"# c = Crystal(x, x, initial_grid=init.copy(), mode=\"spin\", hist_int=int(num_of_growths/8), \\ # border_policy=\"flex\") # #c.print_grid()",
"state # d = Crystal(x, x, initial_grid=(c.grid-init)) # plot_crystal(d, 2) # Show history",
"Main simulation crystal, x is dimensions (m=n=x) # Stairs x, init = make_init(\"stairs\",",
"# x, init = make_init(\"screw\", 200) # c = Crystal(x, x, initial_grid=init.copy(), mode=\"spin\",",
"init = make_init(\"step\", 200) # c = Crystal(x, x, initial_grid=init.copy(), mode=\"step\", hist_int=int(num_of_growths/4), \\",
"border_policy=\"loop\") # #c.print_grid() # c.grow(num_of_growths) # #c.print_grid() # plot_crystal(c) # # # Screw",
"run, enables profiling. ''' import numpy as np import math from mpl_toolkits.mplot3d import",
"main(num_of_growths): ''' Main method to start simulation. Uncomment specific simulation or write a",
"# border_policy=\"loop\") # #c.print_grid() # c.grow(num_of_growths) # #c.print_grid() # plot_crystal(c) # # #",
"x, initial_grid=init.copy(), mode=\"step\", hist_int=int(num_of_growths/4), \\ border_policy=\"loop\", use_height=False) c.grow(num_of_growths) plot_crystal(c) # # Step #",
"# #c.print_grid() # plot_crystal(c) # # # Screw # x, init = make_init(\"screw\",",
"for speedups. ''' import cProfile cProfile.run('main(50)', 'pstats') from pstats import Stats p =",
"= Crystal(x, x, initial_grid=init.copy(), mode=\"step\", hist_int=int(num_of_growths/4), \\ border_policy=\"loop\", use_height=False) c.grow(num_of_growths) plot_crystal(c) # #",
"def profile(): ''' Function used to profile code for speedups. ''' import cProfile",
"200) c = Crystal(x, x, initial_grid=init.copy(), mode=\"step\", hist_int=int(num_of_growths/4), \\ border_policy=\"loop\", use_height=False) c.grow(num_of_growths) plot_crystal(c)",
"start simulation. Uncomment specific simulation or write a new one. ''' # Main",
"import Axes3D import matplotlib.pyplot as plt from crystal import * from plot_init_tools import",
"matplotlib.pyplot as plt from crystal import * from plot_init_tools import * def main(num_of_growths):",
"* from plot_init_tools import * def main(num_of_growths): ''' Main method to start simulation.",
"or write a new one. ''' # Main simulation crystal, x is dimensions",
"mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt from crystal import * from plot_init_tools",
"plt from crystal import * from plot_init_tools import * def main(num_of_growths): ''' Main",
"mode=\"spin\", hist_int=int(num_of_growths/8), \\ # border_policy=\"flex\") # #c.print_grid() # c.grow(num_of_growths) # #c.print_grid() # plot_crystal(c)",
"plot_crystal(c) # # Step # x, init = make_init(\"step\", 200) # c =",
"-*- ''' The main script to run, enables profiling. ''' import numpy as",
"import cProfile cProfile.run('main(50)', 'pstats') from pstats import Stats p = Stats('pstats') p.strip_dirs().sort_stats('time').print_stats(10) main(50)",
"= Crystal(x, x, initial_grid=init.copy(), mode=\"spin\", hist_int=int(num_of_growths/8), \\ # border_policy=\"flex\") # #c.print_grid() # c.grow(num_of_growths)",
"# # Screw # x, init = make_init(\"screw\", 200) # c = Crystal(x,",
"# A crystal object serving to visualize only \"what grew\" without init state",
"A crystal object serving to visualize only \"what grew\" without init state #",
"use_height=False) c.grow(num_of_growths) plot_crystal(c) # # Step # x, init = make_init(\"step\", 200) #",
"init = make_init(\"screw\", 200) # c = Crystal(x, x, initial_grid=init.copy(), mode=\"spin\", hist_int=int(num_of_growths/8), \\",
"\\ # border_policy=\"flex\") # #c.print_grid() # c.grow(num_of_growths) # #c.print_grid() # plot_crystal(c) # #",
"hist_int=int(num_of_growths/4), \\ # border_policy=\"loop\") # #c.print_grid() # c.grow(num_of_growths) # #c.print_grid() # plot_crystal(c) #",
"Step # x, init = make_init(\"step\", 200) # c = Crystal(x, x, initial_grid=init.copy(),",
"code for speedups. ''' import cProfile cProfile.run('main(50)', 'pstats') from pstats import Stats p",
"border_policy=\"loop\", use_height=False) c.grow(num_of_growths) plot_crystal(c) # # Step # x, init = make_init(\"step\", 200)",
"hist_int=int(num_of_growths/8), \\ # border_policy=\"flex\") # #c.print_grid() # c.grow(num_of_growths) # #c.print_grid() # plot_crystal(c) #",
"hist_int=int(num_of_growths/4), \\ border_policy=\"loop\", use_height=False) c.grow(num_of_growths) plot_crystal(c) # # Step # x, init =",
"# plot_crystal(c) # # A crystal object serving to visualize only \"what grew\"",
"plot_history(c) # # Generate a publishable plot # plot_out(c) def profile(): ''' Function",
"c = Crystal(x, x, initial_grid=init.copy(), mode=\"step\", hist_int=int(num_of_growths/4), \\ border_policy=\"loop\", use_height=False) c.grow(num_of_growths) plot_crystal(c) #",
"''' The main script to run, enables profiling. ''' import numpy as np",
"import * def main(num_of_growths): ''' Main method to start simulation. Uncomment specific simulation",
"# c.grow(num_of_growths) # #c.print_grid() # plot_crystal(c) # # A crystal object serving to",
"write a new one. ''' # Main simulation crystal, x is dimensions (m=n=x)",
"Uncomment specific simulation or write a new one. ''' # Main simulation crystal,",
"to visualize only \"what grew\" without init state # d = Crystal(x, x,",
"initial_grid=(c.grid-init)) # plot_crystal(d, 2) # Show history of simulation plot_history(c) # # Generate",
"profiling. ''' import numpy as np import math from mpl_toolkits.mplot3d import Axes3D import",
"# Show history of simulation plot_history(c) # # Generate a publishable plot #",
"# Screw # x, init = make_init(\"screw\", 200) # c = Crystal(x, x,",
"main script to run, enables profiling. ''' import numpy as np import math",
"to start simulation. Uncomment specific simulation or write a new one. ''' #",
"init state # d = Crystal(x, x, initial_grid=(c.grid-init)) # plot_crystal(d, 2) # Show",
"= make_init(\"stairs\", 200) c = Crystal(x, x, initial_grid=init.copy(), mode=\"step\", hist_int=int(num_of_growths/4), \\ border_policy=\"loop\", use_height=False)",
"The main script to run, enables profiling. ''' import numpy as np import",
"method to start simulation. Uncomment specific simulation or write a new one. '''",
"import numpy as np import math from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as",
"object serving to visualize only \"what grew\" without init state # d =",
"200) # c = Crystal(x, x, initial_grid=init.copy(), mode=\"step\", hist_int=int(num_of_growths/4), \\ # border_policy=\"loop\") #",
"\"what grew\" without init state # d = Crystal(x, x, initial_grid=(c.grid-init)) # plot_crystal(d,",
"= Crystal(x, x, initial_grid=(c.grid-init)) # plot_crystal(d, 2) # Show history of simulation plot_history(c)",
"from plot_init_tools import * def main(num_of_growths): ''' Main method to start simulation. Uncomment",
"to run, enables profiling. ''' import numpy as np import math from mpl_toolkits.mplot3d",
"grew\" without init state # d = Crystal(x, x, initial_grid=(c.grid-init)) # plot_crystal(d, 2)",
"plot_init_tools import * def main(num_of_growths): ''' Main method to start simulation. Uncomment specific",
"Crystal(x, x, initial_grid=init.copy(), mode=\"spin\", hist_int=int(num_of_growths/8), \\ # border_policy=\"flex\") # #c.print_grid() # c.grow(num_of_growths) #",
"make_init(\"step\", 200) # c = Crystal(x, x, initial_grid=init.copy(), mode=\"step\", hist_int=int(num_of_growths/4), \\ # border_policy=\"loop\")",
"= make_init(\"screw\", 200) # c = Crystal(x, x, initial_grid=init.copy(), mode=\"spin\", hist_int=int(num_of_growths/8), \\ #",
"c = Crystal(x, x, initial_grid=init.copy(), mode=\"spin\", hist_int=int(num_of_growths/8), \\ # border_policy=\"flex\") # #c.print_grid() #",
"publishable plot # plot_out(c) def profile(): ''' Function used to profile code for",
"speedups. ''' import cProfile cProfile.run('main(50)', 'pstats') from pstats import Stats p = Stats('pstats')",
"x, init = make_init(\"stairs\", 200) c = Crystal(x, x, initial_grid=init.copy(), mode=\"step\", hist_int=int(num_of_growths/4), \\",
"x, init = make_init(\"screw\", 200) # c = Crystal(x, x, initial_grid=init.copy(), mode=\"spin\", hist_int=int(num_of_growths/8),",
"# # # Screw # x, init = make_init(\"screw\", 200) # c =",
"profile code for speedups. ''' import cProfile cProfile.run('main(50)', 'pstats') from pstats import Stats",
"make_init(\"stairs\", 200) c = Crystal(x, x, initial_grid=init.copy(), mode=\"step\", hist_int=int(num_of_growths/4), \\ border_policy=\"loop\", use_height=False) c.grow(num_of_growths)",
"= Crystal(x, x, initial_grid=init.copy(), mode=\"step\", hist_int=int(num_of_growths/4), \\ # border_policy=\"loop\") # #c.print_grid() # c.grow(num_of_growths)",
"Function used to profile code for speedups. ''' import cProfile cProfile.run('main(50)', 'pstats') from",
"initial_grid=init.copy(), mode=\"spin\", hist_int=int(num_of_growths/8), \\ # border_policy=\"flex\") # #c.print_grid() # c.grow(num_of_growths) # #c.print_grid() #",
"#c.print_grid() # c.grow(num_of_growths) # #c.print_grid() # plot_crystal(c) # # A crystal object serving",
"from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt from crystal import * from",
"\\ border_policy=\"loop\", use_height=False) c.grow(num_of_growths) plot_crystal(c) # # Step # x, init = make_init(\"step\",",
"# c.grow(num_of_growths) # #c.print_grid() # plot_crystal(c) # # # Screw # x, init",
"# Stairs x, init = make_init(\"stairs\", 200) c = Crystal(x, x, initial_grid=init.copy(), mode=\"step\",",
"a new one. ''' # Main simulation crystal, x is dimensions (m=n=x) #",
"''' Main method to start simulation. Uncomment specific simulation or write a new",
"crystal import * from plot_init_tools import * def main(num_of_growths): ''' Main method to"
] |
[
"TomaLaPlazaConCabeza\", long_description=LONG_DESCRIPTION, version=\"0.1.0-dev\", author=\"TomaLaPlazaConCabeza\", author_email=\"<EMAIL>\", url=\"https://github.com/TomaLaPlazaConCabeza/web-app\", license=\"BSD-3-clause\", packages=find_packages(), python_requires=\">=3.8\", zip_safe=False, install_requires=[ \"Flask>=1.1.2,<1.2.0\", \"numpy>=1.18.4,<1.19.0\",",
"setup with open(\"README.md\") as handle: LONG_DESCRIPTION = handle.read() setup( name=\"web_app\", description=\"Wep App for",
"<reponame>TomaLaPlazaConCabeza/web-app from setuptools import find_packages, setup with open(\"README.md\") as handle: LONG_DESCRIPTION = handle.read()",
"name=\"web_app\", description=\"Wep App for TomaLaPlazaConCabeza\", long_description=LONG_DESCRIPTION, version=\"0.1.0-dev\", author=\"TomaLaPlazaConCabeza\", author_email=\"<EMAIL>\", url=\"https://github.com/TomaLaPlazaConCabeza/web-app\", license=\"BSD-3-clause\", packages=find_packages(), python_requires=\">=3.8\",",
"handle: LONG_DESCRIPTION = handle.read() setup( name=\"web_app\", description=\"Wep App for TomaLaPlazaConCabeza\", long_description=LONG_DESCRIPTION, version=\"0.1.0-dev\", author=\"TomaLaPlazaConCabeza\",",
"description=\"Wep App for TomaLaPlazaConCabeza\", long_description=LONG_DESCRIPTION, version=\"0.1.0-dev\", author=\"TomaLaPlazaConCabeza\", author_email=\"<EMAIL>\", url=\"https://github.com/TomaLaPlazaConCabeza/web-app\", license=\"BSD-3-clause\", packages=find_packages(), python_requires=\">=3.8\", zip_safe=False,",
"handle.read() setup( name=\"web_app\", description=\"Wep App for TomaLaPlazaConCabeza\", long_description=LONG_DESCRIPTION, version=\"0.1.0-dev\", author=\"TomaLaPlazaConCabeza\", author_email=\"<EMAIL>\", url=\"https://github.com/TomaLaPlazaConCabeza/web-app\", license=\"BSD-3-clause\",",
"for TomaLaPlazaConCabeza\", long_description=LONG_DESCRIPTION, version=\"0.1.0-dev\", author=\"TomaLaPlazaConCabeza\", author_email=\"<EMAIL>\", url=\"https://github.com/TomaLaPlazaConCabeza/web-app\", license=\"BSD-3-clause\", packages=find_packages(), python_requires=\">=3.8\", zip_safe=False, install_requires=[ \"Flask>=1.1.2,<1.2.0\",",
"from setuptools import find_packages, setup with open(\"README.md\") as handle: LONG_DESCRIPTION = handle.read() setup(",
"setup( name=\"web_app\", description=\"Wep App for TomaLaPlazaConCabeza\", long_description=LONG_DESCRIPTION, version=\"0.1.0-dev\", author=\"TomaLaPlazaConCabeza\", author_email=\"<EMAIL>\", url=\"https://github.com/TomaLaPlazaConCabeza/web-app\", license=\"BSD-3-clause\", packages=find_packages(),",
"= handle.read() setup( name=\"web_app\", description=\"Wep App for TomaLaPlazaConCabeza\", long_description=LONG_DESCRIPTION, version=\"0.1.0-dev\", author=\"TomaLaPlazaConCabeza\", author_email=\"<EMAIL>\", url=\"https://github.com/TomaLaPlazaConCabeza/web-app\",",
"author_email=\"<EMAIL>\", url=\"https://github.com/TomaLaPlazaConCabeza/web-app\", license=\"BSD-3-clause\", packages=find_packages(), python_requires=\">=3.8\", zip_safe=False, install_requires=[ \"Flask>=1.1.2,<1.2.0\", \"numpy>=1.18.4,<1.19.0\", \"Shapely>=1.7.0,<1.8.0\", \"descartes>=1.1.0,<1.2.0\", ], )",
"import find_packages, setup with open(\"README.md\") as handle: LONG_DESCRIPTION = handle.read() setup( name=\"web_app\", description=\"Wep",
"setuptools import find_packages, setup with open(\"README.md\") as handle: LONG_DESCRIPTION = handle.read() setup( name=\"web_app\",",
"as handle: LONG_DESCRIPTION = handle.read() setup( name=\"web_app\", description=\"Wep App for TomaLaPlazaConCabeza\", long_description=LONG_DESCRIPTION, version=\"0.1.0-dev\",",
"author=\"TomaLaPlazaConCabeza\", author_email=\"<EMAIL>\", url=\"https://github.com/TomaLaPlazaConCabeza/web-app\", license=\"BSD-3-clause\", packages=find_packages(), python_requires=\">=3.8\", zip_safe=False, install_requires=[ \"Flask>=1.1.2,<1.2.0\", \"numpy>=1.18.4,<1.19.0\", \"Shapely>=1.7.0,<1.8.0\", \"descartes>=1.1.0,<1.2.0\", ],",
"open(\"README.md\") as handle: LONG_DESCRIPTION = handle.read() setup( name=\"web_app\", description=\"Wep App for TomaLaPlazaConCabeza\", long_description=LONG_DESCRIPTION,",
"App for TomaLaPlazaConCabeza\", long_description=LONG_DESCRIPTION, version=\"0.1.0-dev\", author=\"TomaLaPlazaConCabeza\", author_email=\"<EMAIL>\", url=\"https://github.com/TomaLaPlazaConCabeza/web-app\", license=\"BSD-3-clause\", packages=find_packages(), python_requires=\">=3.8\", zip_safe=False, install_requires=[",
"version=\"0.1.0-dev\", author=\"TomaLaPlazaConCabeza\", author_email=\"<EMAIL>\", url=\"https://github.com/TomaLaPlazaConCabeza/web-app\", license=\"BSD-3-clause\", packages=find_packages(), python_requires=\">=3.8\", zip_safe=False, install_requires=[ \"Flask>=1.1.2,<1.2.0\", \"numpy>=1.18.4,<1.19.0\", \"Shapely>=1.7.0,<1.8.0\", \"descartes>=1.1.0,<1.2.0\",",
"LONG_DESCRIPTION = handle.read() setup( name=\"web_app\", description=\"Wep App for TomaLaPlazaConCabeza\", long_description=LONG_DESCRIPTION, version=\"0.1.0-dev\", author=\"TomaLaPlazaConCabeza\", author_email=\"<EMAIL>\",",
"long_description=LONG_DESCRIPTION, version=\"0.1.0-dev\", author=\"TomaLaPlazaConCabeza\", author_email=\"<EMAIL>\", url=\"https://github.com/TomaLaPlazaConCabeza/web-app\", license=\"BSD-3-clause\", packages=find_packages(), python_requires=\">=3.8\", zip_safe=False, install_requires=[ \"Flask>=1.1.2,<1.2.0\", \"numpy>=1.18.4,<1.19.0\", \"Shapely>=1.7.0,<1.8.0\",",
"find_packages, setup with open(\"README.md\") as handle: LONG_DESCRIPTION = handle.read() setup( name=\"web_app\", description=\"Wep App",
"with open(\"README.md\") as handle: LONG_DESCRIPTION = handle.read() setup( name=\"web_app\", description=\"Wep App for TomaLaPlazaConCabeza\","
] |
[
"size of the board. \"\"\" pass @abstractmethod def unpack(self, force=False) -> NoReturn: \"\"\"",
"self.unpack(force=force) self.extract(force=force) class GameDatabase: def __init__(self, size=19): self.size = size def __len__(self): return",
"listdir(self.root()) def values(self) -> Iterable[np.ndarray]: for key in self.keys(): yield self[key] def items(self)",
"exist_ok=True) def get_cache_dir() -> str: return cache_dir def get_archive_dir() -> str: return archive_folder",
"{_cls.name: _cls for _cls in cls.__subclasses__()} for v in cls.__subclasses__(): _dict.update(v.archive_map()) return _dict",
"abc import ABCMeta, abstractmethod from sgfmill.sgf import Sgf_game import numpy as np from",
"def __setitem__(self, name: str, data: GameData): data.to_pickle(name) def __delitem__(self, name: str): remove(path.join(get_game_dir(), str(self.size),",
"def get_game_dir() -> str: return game_folder def get_array_dir() -> str: return array_folder class",
"Tuple[Optional[Set[GoPoint]], Optional[Set[GoPoint]], Optional[Set[GoPoint]]] @classmethod def from_sgf(cls, sgf_game: Sgf_game): size = sgf_game.get_size() winner =",
"ArrayDatabase: def __init__(self, method: str, size=19): self.size = size self.method = method makedirs(self.root(),",
"path.join(self.root(), key) with open(file, \"rb\") as f: return pickle.load(f) def __setitem__(self, key: str,",
"dest = self.path(name) with open(dest, \"wb\") as f: pickle.dump(self, f) def root(self): return",
"\"rb\") as f: return pickle.load(f) def __setitem__(self, key: str, value: Tuple[np.ndarray, ...]): file",
"len(self.keys()) def __getitem__(self, key: str) -> Tuple[np.ndarray, ...]: file = path.join(self.root(), key) with",
"pickle.dump(value, f) def __delitem__(self, key: str): file = path.join(self.root(), key) remove(file) def __contains__(self,",
"str): remove(path.join(get_game_dir(), str(self.size), name)) def __contains__(self, name: str): return path.exists(path.join(get_game_dir(), str(self.size), name)) def",
"force=False) -> NoReturn: \"\"\" Retrieve all archives available from Internet. :param force: whether",
"def values(self) -> Iterable[np.ndarray]: for key in self.keys(): yield self[key] def items(self) ->",
"key in self.keys(): yield key, self[key] class ArrayDatabase: def __init__(self, method: str, size=19):",
"in self.keys(): yield key, self[key] class ArrayDatabase: def __init__(self, method: str, size=19): self.size",
"as f: pickle.dump(self, f) def root(self): return path.join(get_game_dir(), str(self.size)) def path(self, name: str):",
"of the board. \"\"\" pass @abstractmethod def unpack(self, force=False) -> NoReturn: \"\"\" Unpack",
"return cls(size, winner, sequence, komi, setup_stones) @staticmethod def from_pickle(name: str, size: Union[int, str]",
"_cls in cls.__subclasses__()} for v in cls.__subclasses__(): _dict.update(v.archive_map()) return _dict @abstractmethod def retrieve(self,",
"def __len__(self): return len(self.keys()) def __getitem__(self, name: str) -> GameData: return GameData.from_pickle(name, self.size)",
"open(path.join(get_game_dir(), str(size), name), \"rb\") as f: return pickle.load(f) @staticmethod def pickle_exists(name: str, size:",
"exist_ok=True) makedirs(get_game_dir(), exist_ok=True) makedirs(get_array_dir(), exist_ok=True) def get_cache_dir() -> str: return cache_dir def get_archive_dir()",
"def values(self) -> Iterable[GameData]: for key in self.keys(): yield self[key] def items(self) ->",
"self.size == other.size return NotImplemented def root(self): return path.join(get_game_dir(), str(self.size)) def keys(self) ->",
"str(size), name), \"rb\") as f: return pickle.load(f) @staticmethod def pickle_exists(name: str, size: Union[int,",
"\"GameArchive\", \"GameDatabase\", \"ArrayDatabase\"] default_cache_dir = path.join(path.dirname(path.realpath(__file__)), \"../..\", \".data\") cache_dir = default_cache_dir archive_folder =",
"str, data: GameData): data.to_pickle(name) def __delitem__(self, name: str): remove(path.join(get_game_dir(), str(self.size), name)) def __contains__(self,",
"key)) def root(self): return path.join(get_array_dir(), str(self.size), self.method) def keys(self) -> List[str]: return listdir(self.root())",
"def retrieve(self, force=False) -> NoReturn: \"\"\" Retrieve all archives available from Internet. :param",
"size=19): self.size = size self.method = method makedirs(self.root(), exist_ok=True) def __len__(self): return len(self.keys())",
"from abc import ABCMeta, abstractmethod from sgfmill.sgf import Sgf_game import numpy as np",
"self.retrieve(force=force) self.unpack(force=force) self.extract(force=force) class GameDatabase: def __init__(self, size=19): self.size = size def __len__(self):",
"pickle from abc import ABCMeta, abstractmethod from sgfmill.sgf import Sgf_game import numpy as",
"__setitem__(self, key: str, value: Tuple[np.ndarray, ...]): file = path.join(self.root(), key) with open(file, \"wb\")",
"name), \"rb\") as f: return pickle.load(f) @staticmethod def pickle_exists(name: str, size: Union[int, str]",
"archive_folder = path.join(cache_dir, \".kgs\") game_folder = path.join(cache_dir, \".game\") array_folder = path.join(cache_dir, \".array\") def",
"file = path.join(self.root(), key) remove(file) def __contains__(self, key: str): return path.exists(path.join(self.root(), key)) def",
"def __delitem__(self, name: str): remove(path.join(get_game_dir(), str(self.size), name)) def __contains__(self, name: str): return path.exists(path.join(get_game_dir(),",
"Iterable[Tuple[str, GameData]]: for key in self.keys(): yield key, self[key] class ArrayDatabase: def __init__(self,",
"sgf_game.get_root().get_setup_stones() return cls(size, winner, sequence, komi, setup_stones) @staticmethod def from_pickle(name: str, size: Union[int,",
"sequence: List[Tuple[Optional[GoPlayer], Optional[GoPoint]]] komi: float setup_stones: Tuple[Optional[Set[GoPoint]], Optional[Set[GoPoint]], Optional[Set[GoPoint]]] @classmethod def from_sgf(cls, sgf_game:",
"= 19): return path.exists(path.join(get_game_dir(), str(size), name)) def to_pickle(self, name: str): makedirs(self.root(), exist_ok=True) dest",
"with open(file, \"rb\") as f: return pickle.load(f) def __setitem__(self, key: str, value: Tuple[np.ndarray,",
"np from .go_types import * __all__ = [\"set_cache_dir\", \"get_cache_dir\", \"get_game_dir\", \"get_archive_dir\", \"get_array_dir\", \"GameData\",",
"all game archives to Game Cache Folder, every single file should end with",
"winner, sequence, komi, setup_stones) @staticmethod def from_pickle(name: str, size: Union[int, str] = 19):",
"exist_ok=True) makedirs(get_array_dir(), exist_ok=True) def get_cache_dir() -> str: return cache_dir def get_archive_dir() -> str:",
"coding: utf-8 -*- from os import path, getcwd, makedirs, listdir, remove from typing",
"return array_folder class GameData(NamedTuple): size: int winner: GoPlayer sequence: List[Tuple[Optional[GoPlayer], Optional[GoPoint]]] komi: float",
"path.join(cache_dir, \".kgs\") game_folder = path.join(cache_dir, \".game\") array_folder = path.join(cache_dir, \".array\") makedirs(get_cache_dir(), exist_ok=True) makedirs(get_archive_dir(),",
"size=19): self.size = size def __len__(self): return len(self.keys()) def __getitem__(self, name: str) ->",
"value: Tuple[np.ndarray, ...]): file = path.join(self.root(), key) with open(file, \"wb\") as f: pickle.dump(value,",
"-> NoReturn: \"\"\" Extract all game archives to Game Cache Folder, every single",
"[\"set_cache_dir\", \"get_cache_dir\", \"get_game_dir\", \"get_archive_dir\", \"get_array_dir\", \"GameData\", \"GameArchive\", \"GameDatabase\", \"ArrayDatabase\"] default_cache_dir = path.join(path.dirname(path.realpath(__file__)), \"../..\",",
"19): with open(path.join(get_game_dir(), str(size), name), \"rb\") as f: return pickle.load(f) @staticmethod def pickle_exists(name:",
"name = \"none\" @classmethod def archive_map(cls): _dict = {_cls.name: _cls for _cls in",
"whether forces to download archive if it has already existed \"\"\" pass @abstractmethod",
"name) class GameArchive(metaclass=ABCMeta): name = \"none\" @classmethod def archive_map(cls): _dict = {_cls.name: _cls",
"return archive_folder def get_game_dir() -> str: return game_folder def get_array_dir() -> str: return",
"= path.join(self.root(), key) with open(file, \"rb\") as f: return pickle.load(f) def __setitem__(self, key:",
"\"\"\" Retrieve all archives available from Internet. :param force: whether forces to download",
"game archives to :param force: whether forces to download archive if it has",
"the board. \"\"\" pass @abstractmethod def unpack(self, force=False) -> NoReturn: \"\"\" Unpack all",
"sgf_game.get_komi() setup_stones = sgf_game.get_root().get_setup_stones() return cls(size, winner, sequence, komi, setup_stones) @staticmethod def from_pickle(name:",
"open(dest, \"wb\") as f: pickle.dump(self, f) def root(self): return path.join(get_game_dir(), str(self.size)) def path(self,",
"method makedirs(self.root(), exist_ok=True) def __len__(self): return len(self.keys()) def __getitem__(self, key: str) -> Tuple[np.ndarray,",
"pickle_exists(name: str, size: Union[int, str] = 19): return path.exists(path.join(get_game_dir(), str(size), name)) def to_pickle(self,",
"Union[int, str] = 19): return path.exists(path.join(get_game_dir(), str(size), name)) def to_pickle(self, name: str): makedirs(self.root(),",
"every single file should end with `.game.pkl` and be start with it's size",
"__delitem__(self, key: str): file = path.join(self.root(), key) remove(file) def __contains__(self, key: str): return",
"for key in self.keys(): yield self[key] def items(self) -> Iterable[Tuple[str, np.ndarray]]: for key",
"-> GameData: return GameData.from_pickle(name, self.size) def __setitem__(self, name: str, data: GameData): data.to_pickle(name) def",
"keys(self) -> List[str]: return listdir(self.root()) def values(self) -> Iterable[GameData]: for key in self.keys():",
"makedirs(get_cache_dir(), exist_ok=True) makedirs(get_archive_dir(), exist_ok=True) makedirs(get_game_dir(), exist_ok=True) makedirs(get_array_dir(), exist_ok=True) def get_cache_dir() -> str: return",
"array_folder = path.join(cache_dir, \".array\") def set_cache_dir(directory: Optional[str] = None) -> NoReturn: global cache_dir,",
"str(size), name)) def to_pickle(self, name: str): makedirs(self.root(), exist_ok=True) dest = self.path(name) with open(dest,",
"def __getitem__(self, name: str) -> GameData: return GameData.from_pickle(name, self.size) def __setitem__(self, name: str,",
"str(self.size), name)) def __contains__(self, name: str): return path.exists(path.join(get_game_dir(), str(self.size), name)) def __eq__(self, other):",
"str(self.size), self.method) def keys(self) -> List[str]: return listdir(self.root()) def values(self) -> Iterable[np.ndarray]: for",
"str): return path.join(self.root(), name) class GameArchive(metaclass=ABCMeta): name = \"none\" @classmethod def archive_map(cls): _dict",
"str(self.size)) def keys(self) -> List[str]: return listdir(self.root()) def values(self) -> Iterable[GameData]: for key",
"as f: pickle.dump(value, f) def __delitem__(self, key: str): file = path.join(self.root(), key) remove(file)",
"return path.exists(path.join(get_game_dir(), str(self.size), name)) def __eq__(self, other): if isinstance(other, GameDatabase): return self.size ==",
"key) with open(file, \"wb\") as f: pickle.dump(value, f) def __delitem__(self, key: str): file",
"global cache_dir, archive_folder, game_folder, array_folder if directory is None: directory = default_cache_dir cache_dir",
"= {_cls.name: _cls for _cls in cls.__subclasses__()} for v in cls.__subclasses__(): _dict.update(v.archive_map()) return",
"size: Union[int, str] = 19): with open(path.join(get_game_dir(), str(size), name), \"rb\") as f: return",
"@staticmethod def pickle_exists(name: str, size: Union[int, str] = 19): return path.exists(path.join(get_game_dir(), str(size), name))",
"-> Tuple[np.ndarray, ...]: file = path.join(self.root(), key) with open(file, \"rb\") as f: return",
"retrieve(self, force=False) -> NoReturn: \"\"\" Retrieve all archives available from Internet. :param force:",
"all archives available from Internet. :param force: whether forces to download archive if",
"return pickle.load(f) def __setitem__(self, key: str, value: Tuple[np.ndarray, ...]): file = path.join(self.root(), key)",
"None: directory = default_cache_dir cache_dir = path.join(getcwd(), directory) archive_folder = path.join(cache_dir, \".kgs\") game_folder",
"directory) archive_folder = path.join(cache_dir, \".kgs\") game_folder = path.join(cache_dir, \".game\") array_folder = path.join(cache_dir, \".array\")",
"return self.size == other.size return NotImplemented def root(self): return path.join(get_game_dir(), str(self.size)) def keys(self)",
"= [\"set_cache_dir\", \"get_cache_dir\", \"get_game_dir\", \"get_archive_dir\", \"get_array_dir\", \"GameData\", \"GameArchive\", \"GameDatabase\", \"ArrayDatabase\"] default_cache_dir = path.join(path.dirname(path.realpath(__file__)),",
"already existed \"\"\" pass @abstractmethod def extract(self, force=False) -> NoReturn: \"\"\" Extract all",
"name: str): makedirs(self.root(), exist_ok=True) dest = self.path(name) with open(dest, \"wb\") as f: pickle.dump(self,",
"force=False): self.retrieve(force=force) self.unpack(force=force) self.extract(force=force) class GameDatabase: def __init__(self, size=19): self.size = size def",
"Optional[Set[GoPoint]]] @classmethod def from_sgf(cls, sgf_game: Sgf_game): size = sgf_game.get_size() winner = GoPlayer.to_player(sgf_game.get_winner()) sequence",
"get_cache_dir() -> str: return cache_dir def get_archive_dir() -> str: return archive_folder def get_game_dir()",
"\"\"\" Extract all game archives to Game Cache Folder, every single file should",
"remove(file) def __contains__(self, key: str): return path.exists(path.join(self.root(), key)) def root(self): return path.join(get_array_dir(), str(self.size),",
"setup_stones) @staticmethod def from_pickle(name: str, size: Union[int, str] = 19): with open(path.join(get_game_dir(), str(size),",
"makedirs(self.root(), exist_ok=True) dest = self.path(name) with open(dest, \"wb\") as f: pickle.dump(self, f) def",
"= sgf_game.get_komi() setup_stones = sgf_game.get_root().get_setup_stones() return cls(size, winner, sequence, komi, setup_stones) @staticmethod def",
"\".game\") array_folder = path.join(cache_dir, \".array\") makedirs(get_cache_dir(), exist_ok=True) makedirs(get_archive_dir(), exist_ok=True) makedirs(get_game_dir(), exist_ok=True) makedirs(get_array_dir(), exist_ok=True)",
"with open(dest, \"wb\") as f: pickle.dump(self, f) def root(self): return path.join(get_game_dir(), str(self.size)) def",
"with open(path.join(get_game_dir(), str(size), name), \"rb\") as f: return pickle.load(f) @staticmethod def pickle_exists(name: str,",
"array_folder = path.join(cache_dir, \".array\") makedirs(get_cache_dir(), exist_ok=True) makedirs(get_archive_dir(), exist_ok=True) makedirs(get_game_dir(), exist_ok=True) makedirs(get_array_dir(), exist_ok=True) def",
"def root(self): return path.join(get_game_dir(), str(self.size)) def keys(self) -> List[str]: return listdir(self.root()) def values(self)",
"self.method = method makedirs(self.root(), exist_ok=True) def __len__(self): return len(self.keys()) def __getitem__(self, key: str)",
"exist_ok=True) dest = self.path(name) with open(dest, \"wb\") as f: pickle.dump(self, f) def root(self):",
"end with `.game.pkl` and be start with it's size of the board. \"\"\"",
"Tuple[np.ndarray, ...]): file = path.join(self.root(), key) with open(file, \"wb\") as f: pickle.dump(value, f)",
"NoReturn: \"\"\" Unpack all game archives to :param force: whether forces to download",
"if directory is None: directory = default_cache_dir cache_dir = path.join(getcwd(), directory) archive_folder =",
"default_cache_dir cache_dir = path.join(getcwd(), directory) archive_folder = path.join(cache_dir, \".kgs\") game_folder = path.join(cache_dir, \".game\")",
"v in cls.__subclasses__(): _dict.update(v.archive_map()) return _dict @abstractmethod def retrieve(self, force=False) -> NoReturn: \"\"\"",
"def __contains__(self, key: str): return path.exists(path.join(self.root(), key)) def root(self): return path.join(get_array_dir(), str(self.size), self.method)",
"str: return array_folder class GameData(NamedTuple): size: int winner: GoPlayer sequence: List[Tuple[Optional[GoPlayer], Optional[GoPoint]]] komi:",
"__init__(self, method: str, size=19): self.size = size self.method = method makedirs(self.root(), exist_ok=True) def",
"@staticmethod def from_pickle(name: str, size: Union[int, str] = 19): with open(path.join(get_game_dir(), str(size), name),",
"return path.join(get_game_dir(), str(self.size)) def keys(self) -> List[str]: return listdir(self.root()) def values(self) -> Iterable[GameData]:",
"utf-8 -*- from os import path, getcwd, makedirs, listdir, remove from typing import",
"size self.method = method makedirs(self.root(), exist_ok=True) def __len__(self): return len(self.keys()) def __getitem__(self, key:",
"NoReturn: \"\"\" Extract all game archives to Game Cache Folder, every single file",
"self.extract(force=force) class GameDatabase: def __init__(self, size=19): self.size = size def __len__(self): return len(self.keys())",
"def __init__(self, size=19): self.size = size def __len__(self): return len(self.keys()) def __getitem__(self, name:",
"-> str: return archive_folder def get_game_dir() -> str: return game_folder def get_array_dir() ->",
"makedirs(get_game_dir(), exist_ok=True) makedirs(get_array_dir(), exist_ok=True) def get_cache_dir() -> str: return cache_dir def get_archive_dir() ->",
"\"GameData\", \"GameArchive\", \"GameDatabase\", \"ArrayDatabase\"] default_cache_dir = path.join(path.dirname(path.realpath(__file__)), \"../..\", \".data\") cache_dir = default_cache_dir archive_folder",
"@classmethod def from_sgf(cls, sgf_game: Sgf_game): size = sgf_game.get_size() winner = GoPlayer.to_player(sgf_game.get_winner()) sequence =",
"\"none\" @classmethod def archive_map(cls): _dict = {_cls.name: _cls for _cls in cls.__subclasses__()} for",
"values(self) -> Iterable[np.ndarray]: for key in self.keys(): yield self[key] def items(self) -> Iterable[Tuple[str,",
"for key in self.keys(): yield key, self[key] class ArrayDatabase: def __init__(self, method: str,",
"NoReturn: \"\"\" Retrieve all archives available from Internet. :param force: whether forces to",
"komi: float setup_stones: Tuple[Optional[Set[GoPoint]], Optional[Set[GoPoint]], Optional[Set[GoPoint]]] @classmethod def from_sgf(cls, sgf_game: Sgf_game): size =",
"f: pickle.dump(self, f) def root(self): return path.join(get_game_dir(), str(self.size)) def path(self, name: str): return",
"`.game.pkl` and be start with it's size of the board. \"\"\" pass @abstractmethod",
"self.keys(): yield self[key] def items(self) -> Iterable[Tuple[str, np.ndarray]]: for key in self.keys(): yield",
"method: str, size=19): self.size = size self.method = method makedirs(self.root(), exist_ok=True) def __len__(self):",
"def keys(self) -> List[str]: return listdir(self.root()) def values(self) -> Iterable[GameData]: for key in",
"...]: file = path.join(self.root(), key) with open(file, \"rb\") as f: return pickle.load(f) def",
"if it has already existed \"\"\" pass def download(self, force=False): self.retrieve(force=force) self.unpack(force=force) self.extract(force=force)",
"cache_dir, archive_folder, game_folder, array_folder if directory is None: directory = default_cache_dir cache_dir =",
"return len(self.keys()) def __getitem__(self, name: str) -> GameData: return GameData.from_pickle(name, self.size) def __setitem__(self,",
"as np from .go_types import * __all__ = [\"set_cache_dir\", \"get_cache_dir\", \"get_game_dir\", \"get_archive_dir\", \"get_array_dir\",",
"GameDatabase: def __init__(self, size=19): self.size = size def __len__(self): return len(self.keys()) def __getitem__(self,",
"str: return cache_dir def get_archive_dir() -> str: return archive_folder def get_game_dir() -> str:",
"path.join(cache_dir, \".kgs\") game_folder = path.join(cache_dir, \".game\") array_folder = path.join(cache_dir, \".array\") def set_cache_dir(directory: Optional[str]",
"str] = 19): with open(path.join(get_game_dir(), str(size), name), \"rb\") as f: return pickle.load(f) @staticmethod",
"GoPlayer.to_player(sgf_game.get_winner()) sequence = list(map(lambda move: (GoPlayer.to_player(move[0]), move[1]), (node.get_move() for node in sgf_game.get_main_sequence()))) komi",
"\".array\") makedirs(get_cache_dir(), exist_ok=True) makedirs(get_archive_dir(), exist_ok=True) makedirs(get_game_dir(), exist_ok=True) makedirs(get_array_dir(), exist_ok=True) def get_cache_dir() -> str:",
"@classmethod def archive_map(cls): _dict = {_cls.name: _cls for _cls in cls.__subclasses__()} for v",
"GameData(NamedTuple): size: int winner: GoPlayer sequence: List[Tuple[Optional[GoPlayer], Optional[GoPoint]]] komi: float setup_stones: Tuple[Optional[Set[GoPoint]], Optional[Set[GoPoint]],",
"\"ArrayDatabase\"] default_cache_dir = path.join(path.dirname(path.realpath(__file__)), \"../..\", \".data\") cache_dir = default_cache_dir archive_folder = path.join(cache_dir, \".kgs\")",
"-> str: return game_folder def get_array_dir() -> str: return array_folder class GameData(NamedTuple): size:",
"Sgf_game import numpy as np from .go_types import * __all__ = [\"set_cache_dir\", \"get_cache_dir\",",
"file should end with `.game.pkl` and be start with it's size of the",
"remove from typing import * import pickle from abc import ABCMeta, abstractmethod from",
"it has already existed \"\"\" pass def download(self, force=False): self.retrieve(force=force) self.unpack(force=force) self.extract(force=force) class",
"key) with open(file, \"rb\") as f: return pickle.load(f) def __setitem__(self, key: str, value:",
"def unpack(self, force=False) -> NoReturn: \"\"\" Unpack all game archives to :param force:",
"yield self[key] def items(self) -> Iterable[Tuple[str, np.ndarray]]: for key in self.keys(): yield key,",
"makedirs, listdir, remove from typing import * import pickle from abc import ABCMeta,",
"self.method) def keys(self) -> List[str]: return listdir(self.root()) def values(self) -> Iterable[np.ndarray]: for key",
"values(self) -> Iterable[GameData]: for key in self.keys(): yield self[key] def items(self) -> Iterable[Tuple[str,",
"root(self): return path.join(get_game_dir(), str(self.size)) def path(self, name: str): return path.join(self.root(), name) class GameArchive(metaclass=ABCMeta):",
"existed \"\"\" pass @abstractmethod def extract(self, force=False) -> NoReturn: \"\"\" Extract all game",
"Iterable[GameData]: for key in self.keys(): yield self[key] def items(self) -> Iterable[Tuple[str, GameData]]: for",
"forces to download archive if it has already existed \"\"\" pass @abstractmethod def",
"-> str: return cache_dir def get_archive_dir() -> str: return archive_folder def get_game_dir() ->",
"from_pickle(name: str, size: Union[int, str] = 19): with open(path.join(get_game_dir(), str(size), name), \"rb\") as",
"size = sgf_game.get_size() winner = GoPlayer.to_player(sgf_game.get_winner()) sequence = list(map(lambda move: (GoPlayer.to_player(move[0]), move[1]), (node.get_move()",
"key: str): file = path.join(self.root(), key) remove(file) def __contains__(self, key: str): return path.exists(path.join(self.root(),",
"path.join(get_array_dir(), str(self.size), self.method) def keys(self) -> List[str]: return listdir(self.root()) def values(self) -> Iterable[np.ndarray]:",
"(node.get_move() for node in sgf_game.get_main_sequence()))) komi = sgf_game.get_komi() setup_stones = sgf_game.get_root().get_setup_stones() return cls(size,",
"Extract all game archives to Game Cache Folder, every single file should end",
"self.keys(): yield key, self[key] class ArrayDatabase: def __init__(self, method: str, size=19): self.size =",
"_dict = {_cls.name: _cls for _cls in cls.__subclasses__()} for v in cls.__subclasses__(): _dict.update(v.archive_map())",
"__getitem__(self, key: str) -> Tuple[np.ndarray, ...]: file = path.join(self.root(), key) with open(file, \"rb\")",
"root(self): return path.join(get_game_dir(), str(self.size)) def keys(self) -> List[str]: return listdir(self.root()) def values(self) ->",
"\"get_array_dir\", \"GameData\", \"GameArchive\", \"GameDatabase\", \"ArrayDatabase\"] default_cache_dir = path.join(path.dirname(path.realpath(__file__)), \"../..\", \".data\") cache_dir = default_cache_dir",
"= GoPlayer.to_player(sgf_game.get_winner()) sequence = list(map(lambda move: (GoPlayer.to_player(move[0]), move[1]), (node.get_move() for node in sgf_game.get_main_sequence())))",
"GameData]]: for key in self.keys(): yield key, self[key] class ArrayDatabase: def __init__(self, method:",
"pass def download(self, force=False): self.retrieve(force=force) self.unpack(force=force) self.extract(force=force) class GameDatabase: def __init__(self, size=19): self.size",
"return path.exists(path.join(get_game_dir(), str(size), name)) def to_pickle(self, name: str): makedirs(self.root(), exist_ok=True) dest = self.path(name)",
"= 19): with open(path.join(get_game_dir(), str(size), name), \"rb\") as f: return pickle.load(f) @staticmethod def",
"and be start with it's size of the board. \"\"\" pass @abstractmethod def",
"= default_cache_dir archive_folder = path.join(cache_dir, \".kgs\") game_folder = path.join(cache_dir, \".game\") array_folder = path.join(cache_dir,",
"def path(self, name: str): return path.join(self.root(), name) class GameArchive(metaclass=ABCMeta): name = \"none\" @classmethod",
"pickle.dump(self, f) def root(self): return path.join(get_game_dir(), str(self.size)) def path(self, name: str): return path.join(self.root(),",
"makedirs(self.root(), exist_ok=True) def __len__(self): return len(self.keys()) def __getitem__(self, key: str) -> Tuple[np.ndarray, ...]:",
"def download(self, force=False): self.retrieve(force=force) self.unpack(force=force) self.extract(force=force) class GameDatabase: def __init__(self, size=19): self.size =",
"board. \"\"\" pass @abstractmethod def unpack(self, force=False) -> NoReturn: \"\"\" Unpack all game",
"as f: return pickle.load(f) @staticmethod def pickle_exists(name: str, size: Union[int, str] = 19):",
"it has already existed \"\"\" pass @abstractmethod def extract(self, force=False) -> NoReturn: \"\"\"",
"__contains__(self, key: str): return path.exists(path.join(self.root(), key)) def root(self): return path.join(get_array_dir(), str(self.size), self.method) def",
"\"get_cache_dir\", \"get_game_dir\", \"get_archive_dir\", \"get_array_dir\", \"GameData\", \"GameArchive\", \"GameDatabase\", \"ArrayDatabase\"] default_cache_dir = path.join(path.dirname(path.realpath(__file__)), \"../..\", \".data\")",
"game_folder, array_folder if directory is None: directory = default_cache_dir cache_dir = path.join(getcwd(), directory)",
"str) -> GameData: return GameData.from_pickle(name, self.size) def __setitem__(self, name: str, data: GameData): data.to_pickle(name)",
"open(file, \"rb\") as f: return pickle.load(f) def __setitem__(self, key: str, value: Tuple[np.ndarray, ...]):",
"\"get_archive_dir\", \"get_array_dir\", \"GameData\", \"GameArchive\", \"GameDatabase\", \"ArrayDatabase\"] default_cache_dir = path.join(path.dirname(path.realpath(__file__)), \"../..\", \".data\") cache_dir =",
"List[str]: return listdir(self.root()) def values(self) -> Iterable[GameData]: for key in self.keys(): yield self[key]",
"winner = GoPlayer.to_player(sgf_game.get_winner()) sequence = list(map(lambda move: (GoPlayer.to_player(move[0]), move[1]), (node.get_move() for node in",
"name: str) -> GameData: return GameData.from_pickle(name, self.size) def __setitem__(self, name: str, data: GameData):",
"str: return archive_folder def get_game_dir() -> str: return game_folder def get_array_dir() -> str:",
"ABCMeta, abstractmethod from sgfmill.sgf import Sgf_game import numpy as np from .go_types import",
"GameData: return GameData.from_pickle(name, self.size) def __setitem__(self, name: str, data: GameData): data.to_pickle(name) def __delitem__(self,",
"listdir(self.root()) def values(self) -> Iterable[GameData]: for key in self.keys(): yield self[key] def items(self)",
"set_cache_dir(directory: Optional[str] = None) -> NoReturn: global cache_dir, archive_folder, game_folder, array_folder if directory",
"key, self[key] class ArrayDatabase: def __init__(self, method: str, size=19): self.size = size self.method",
"-> List[str]: return listdir(self.root()) def values(self) -> Iterable[GameData]: for key in self.keys(): yield",
"def __delitem__(self, key: str): file = path.join(self.root(), key) remove(file) def __contains__(self, key: str):",
"to Game Cache Folder, every single file should end with `.game.pkl` and be",
"exist_ok=True) makedirs(get_archive_dir(), exist_ok=True) makedirs(get_game_dir(), exist_ok=True) makedirs(get_array_dir(), exist_ok=True) def get_cache_dir() -> str: return cache_dir",
"@abstractmethod def unpack(self, force=False) -> NoReturn: \"\"\" Unpack all game archives to :param",
"\".data\") cache_dir = default_cache_dir archive_folder = path.join(cache_dir, \".kgs\") game_folder = path.join(cache_dir, \".game\") array_folder",
"node in sgf_game.get_main_sequence()))) komi = sgf_game.get_komi() setup_stones = sgf_game.get_root().get_setup_stones() return cls(size, winner, sequence,",
"def extract(self, force=False) -> NoReturn: \"\"\" Extract all game archives to Game Cache",
"\".kgs\") game_folder = path.join(cache_dir, \".game\") array_folder = path.join(cache_dir, \".array\") def set_cache_dir(directory: Optional[str] =",
"-> Iterable[np.ndarray]: for key in self.keys(): yield self[key] def items(self) -> Iterable[Tuple[str, np.ndarray]]:",
"Iterable[np.ndarray]: for key in self.keys(): yield self[key] def items(self) -> Iterable[Tuple[str, np.ndarray]]: for",
"\"wb\") as f: pickle.dump(self, f) def root(self): return path.join(get_game_dir(), str(self.size)) def path(self, name:",
"cache_dir def get_archive_dir() -> str: return archive_folder def get_game_dir() -> str: return game_folder",
"move: (GoPlayer.to_player(move[0]), move[1]), (node.get_move() for node in sgf_game.get_main_sequence()))) komi = sgf_game.get_komi() setup_stones =",
"return GameData.from_pickle(name, self.size) def __setitem__(self, name: str, data: GameData): data.to_pickle(name) def __delitem__(self, name:",
"path.join(get_game_dir(), str(self.size)) def path(self, name: str): return path.join(self.root(), name) class GameArchive(metaclass=ABCMeta): name =",
"NoReturn: global cache_dir, archive_folder, game_folder, array_folder if directory is None: directory = default_cache_dir",
"class ArrayDatabase: def __init__(self, method: str, size=19): self.size = size self.method = method",
"pass @abstractmethod def unpack(self, force=False) -> NoReturn: \"\"\" Unpack all game archives to",
"abstractmethod from sgfmill.sgf import Sgf_game import numpy as np from .go_types import *",
"default_cache_dir archive_folder = path.join(cache_dir, \".kgs\") game_folder = path.join(cache_dir, \".game\") array_folder = path.join(cache_dir, \".array\")",
"List[str]: return listdir(self.root()) def values(self) -> Iterable[np.ndarray]: for key in self.keys(): yield self[key]",
"if isinstance(other, GameDatabase): return self.size == other.size return NotImplemented def root(self): return path.join(get_game_dir(),",
"force: whether forces to download archive if it has already existed \"\"\" pass",
"from os import path, getcwd, makedirs, listdir, remove from typing import * import",
"float setup_stones: Tuple[Optional[Set[GoPoint]], Optional[Set[GoPoint]], Optional[Set[GoPoint]]] @classmethod def from_sgf(cls, sgf_game: Sgf_game): size = sgf_game.get_size()",
"import * import pickle from abc import ABCMeta, abstractmethod from sgfmill.sgf import Sgf_game",
"return cache_dir def get_archive_dir() -> str: return archive_folder def get_game_dir() -> str: return",
"def get_array_dir() -> str: return array_folder class GameData(NamedTuple): size: int winner: GoPlayer sequence:",
"for node in sgf_game.get_main_sequence()))) komi = sgf_game.get_komi() setup_stones = sgf_game.get_root().get_setup_stones() return cls(size, winner,",
"self[key] class ArrayDatabase: def __init__(self, method: str, size=19): self.size = size self.method =",
"* __all__ = [\"set_cache_dir\", \"get_cache_dir\", \"get_game_dir\", \"get_archive_dir\", \"get_array_dir\", \"GameData\", \"GameArchive\", \"GameDatabase\", \"ArrayDatabase\"] default_cache_dir",
"str(self.size)) def path(self, name: str): return path.join(self.root(), name) class GameArchive(metaclass=ABCMeta): name = \"none\"",
"game archives to Game Cache Folder, every single file should end with `.game.pkl`",
"sequence, komi, setup_stones) @staticmethod def from_pickle(name: str, size: Union[int, str] = 19): with",
"path.join(cache_dir, \".array\") def set_cache_dir(directory: Optional[str] = None) -> NoReturn: global cache_dir, archive_folder, game_folder,",
"it's size of the board. \"\"\" pass @abstractmethod def unpack(self, force=False) -> NoReturn:",
"= path.join(cache_dir, \".game\") array_folder = path.join(cache_dir, \".array\") def set_cache_dir(directory: Optional[str] = None) ->",
"sgf_game: Sgf_game): size = sgf_game.get_size() winner = GoPlayer.to_player(sgf_game.get_winner()) sequence = list(map(lambda move: (GoPlayer.to_player(move[0]),",
"keys(self) -> List[str]: return listdir(self.root()) def values(self) -> Iterable[np.ndarray]: for key in self.keys():",
"has already existed \"\"\" pass def download(self, force=False): self.retrieve(force=force) self.unpack(force=force) self.extract(force=force) class GameDatabase:",
"str: return game_folder def get_array_dir() -> str: return array_folder class GameData(NamedTuple): size: int",
"str) -> Tuple[np.ndarray, ...]: file = path.join(self.root(), key) with open(file, \"rb\") as f:",
"single file should end with `.game.pkl` and be start with it's size of",
"with it's size of the board. \"\"\" pass @abstractmethod def unpack(self, force=False) ->",
"Unpack all game archives to :param force: whether forces to download archive if",
".go_types import * __all__ = [\"set_cache_dir\", \"get_cache_dir\", \"get_game_dir\", \"get_archive_dir\", \"get_array_dir\", \"GameData\", \"GameArchive\", \"GameDatabase\",",
"komi = sgf_game.get_komi() setup_stones = sgf_game.get_root().get_setup_stones() return cls(size, winner, sequence, komi, setup_stones) @staticmethod",
"already existed \"\"\" pass def download(self, force=False): self.retrieve(force=force) self.unpack(force=force) self.extract(force=force) class GameDatabase: def",
"self[key] def items(self) -> Iterable[Tuple[str, np.ndarray]]: for key in self.keys(): yield key, self[key]",
"path.exists(path.join(self.root(), key)) def root(self): return path.join(get_array_dir(), str(self.size), self.method) def keys(self) -> List[str]: return",
"game_folder def get_array_dir() -> str: return array_folder class GameData(NamedTuple): size: int winner: GoPlayer",
"pickle.load(f) @staticmethod def pickle_exists(name: str, size: Union[int, str] = 19): return path.exists(path.join(get_game_dir(), str(size),",
"-> List[str]: return listdir(self.root()) def values(self) -> Iterable[np.ndarray]: for key in self.keys(): yield",
"def pickle_exists(name: str, size: Union[int, str] = 19): return path.exists(path.join(get_game_dir(), str(size), name)) def",
"archives to Game Cache Folder, every single file should end with `.game.pkl` and",
"self.keys(): yield self[key] def items(self) -> Iterable[Tuple[str, GameData]]: for key in self.keys(): yield",
"key) remove(file) def __contains__(self, key: str): return path.exists(path.join(self.root(), key)) def root(self): return path.join(get_array_dir(),",
"str, size=19): self.size = size self.method = method makedirs(self.root(), exist_ok=True) def __len__(self): return",
"f: return pickle.load(f) def __setitem__(self, key: str, value: Tuple[np.ndarray, ...]): file = path.join(self.root(),",
"data: GameData): data.to_pickle(name) def __delitem__(self, name: str): remove(path.join(get_game_dir(), str(self.size), name)) def __contains__(self, name:",
"path.exists(path.join(get_game_dir(), str(self.size), name)) def __eq__(self, other): if isinstance(other, GameDatabase): return self.size == other.size",
"size def __len__(self): return len(self.keys()) def __getitem__(self, name: str) -> GameData: return GameData.from_pickle(name,",
"komi, setup_stones) @staticmethod def from_pickle(name: str, size: Union[int, str] = 19): with open(path.join(get_game_dir(),",
"def get_archive_dir() -> str: return archive_folder def get_game_dir() -> str: return game_folder def",
"def __contains__(self, name: str): return path.exists(path.join(get_game_dir(), str(self.size), name)) def __eq__(self, other): if isinstance(other,",
"typing import * import pickle from abc import ABCMeta, abstractmethod from sgfmill.sgf import",
"def root(self): return path.join(get_game_dir(), str(self.size)) def path(self, name: str): return path.join(self.root(), name) class",
"sequence = list(map(lambda move: (GoPlayer.to_player(move[0]), move[1]), (node.get_move() for node in sgf_game.get_main_sequence()))) komi =",
"directory = default_cache_dir cache_dir = path.join(getcwd(), directory) archive_folder = path.join(cache_dir, \".kgs\") game_folder =",
"self.size) def __setitem__(self, name: str, data: GameData): data.to_pickle(name) def __delitem__(self, name: str): remove(path.join(get_game_dir(),",
"name)) def __eq__(self, other): if isinstance(other, GameDatabase): return self.size == other.size return NotImplemented",
"= size self.method = method makedirs(self.root(), exist_ok=True) def __len__(self): return len(self.keys()) def __getitem__(self,",
"archive_map(cls): _dict = {_cls.name: _cls for _cls in cls.__subclasses__()} for v in cls.__subclasses__():",
"path.join(path.dirname(path.realpath(__file__)), \"../..\", \".data\") cache_dir = default_cache_dir archive_folder = path.join(cache_dir, \".kgs\") game_folder = path.join(cache_dir,",
"self.size = size def __len__(self): return len(self.keys()) def __getitem__(self, name: str) -> GameData:",
"winner: GoPlayer sequence: List[Tuple[Optional[GoPlayer], Optional[GoPoint]]] komi: float setup_stones: Tuple[Optional[Set[GoPoint]], Optional[Set[GoPoint]], Optional[Set[GoPoint]]] @classmethod def",
"= path.join(cache_dir, \".kgs\") game_folder = path.join(cache_dir, \".game\") array_folder = path.join(cache_dir, \".array\") makedirs(get_cache_dir(), exist_ok=True)",
"f: return pickle.load(f) @staticmethod def pickle_exists(name: str, size: Union[int, str] = 19): return",
"directory is None: directory = default_cache_dir cache_dir = path.join(getcwd(), directory) archive_folder = path.join(cache_dir,",
"to :param force: whether forces to download archive if it has already existed",
"to download archive if it has already existed \"\"\" pass @abstractmethod def extract(self,",
"= size def __len__(self): return len(self.keys()) def __getitem__(self, name: str) -> GameData: return",
"Optional[str] = None) -> NoReturn: global cache_dir, archive_folder, game_folder, array_folder if directory is",
"forces to download archive if it has already existed \"\"\" pass def download(self,",
"game_folder = path.join(cache_dir, \".game\") array_folder = path.join(cache_dir, \".array\") def set_cache_dir(directory: Optional[str] = None)",
"numpy as np from .go_types import * __all__ = [\"set_cache_dir\", \"get_cache_dir\", \"get_game_dir\", \"get_archive_dir\",",
"force=False) -> NoReturn: \"\"\" Unpack all game archives to :param force: whether forces",
"def keys(self) -> List[str]: return listdir(self.root()) def values(self) -> Iterable[np.ndarray]: for key in",
"str, size: Union[int, str] = 19): return path.exists(path.join(get_game_dir(), str(size), name)) def to_pickle(self, name:",
"Union[int, str] = 19): with open(path.join(get_game_dir(), str(size), name), \"rb\") as f: return pickle.load(f)",
"class GameDatabase: def __init__(self, size=19): self.size = size def __len__(self): return len(self.keys()) def",
"from sgfmill.sgf import Sgf_game import numpy as np from .go_types import * __all__",
"class GameData(NamedTuple): size: int winner: GoPlayer sequence: List[Tuple[Optional[GoPlayer], Optional[GoPoint]]] komi: float setup_stones: Tuple[Optional[Set[GoPoint]],",
"cache_dir = default_cache_dir archive_folder = path.join(cache_dir, \".kgs\") game_folder = path.join(cache_dir, \".game\") array_folder =",
"archive_folder def get_game_dir() -> str: return game_folder def get_array_dir() -> str: return array_folder",
"for _cls in cls.__subclasses__()} for v in cls.__subclasses__(): _dict.update(v.archive_map()) return _dict @abstractmethod def",
"return path.join(get_game_dir(), str(self.size)) def path(self, name: str): return path.join(self.root(), name) class GameArchive(metaclass=ABCMeta): name",
"def archive_map(cls): _dict = {_cls.name: _cls for _cls in cls.__subclasses__()} for v in",
"str(self.size), name)) def __eq__(self, other): if isinstance(other, GameDatabase): return self.size == other.size return",
"__all__ = [\"set_cache_dir\", \"get_cache_dir\", \"get_game_dir\", \"get_archive_dir\", \"get_array_dir\", \"GameData\", \"GameArchive\", \"GameDatabase\", \"ArrayDatabase\"] default_cache_dir =",
"array_folder if directory is None: directory = default_cache_dir cache_dir = path.join(getcwd(), directory) archive_folder",
"return _dict @abstractmethod def retrieve(self, force=False) -> NoReturn: \"\"\" Retrieve all archives available",
"name: str): return path.exists(path.join(get_game_dir(), str(self.size), name)) def __eq__(self, other): if isinstance(other, GameDatabase): return",
"return listdir(self.root()) def values(self) -> Iterable[GameData]: for key in self.keys(): yield self[key] def",
"_dict @abstractmethod def retrieve(self, force=False) -> NoReturn: \"\"\" Retrieve all archives available from",
"== other.size return NotImplemented def root(self): return path.join(get_game_dir(), str(self.size)) def keys(self) -> List[str]:",
"get_game_dir() -> str: return game_folder def get_array_dir() -> str: return array_folder class GameData(NamedTuple):",
"to download archive if it has already existed \"\"\" pass def download(self, force=False):",
"open(file, \"wb\") as f: pickle.dump(value, f) def __delitem__(self, key: str): file = path.join(self.root(),",
"\"GameDatabase\", \"ArrayDatabase\"] default_cache_dir = path.join(path.dirname(path.realpath(__file__)), \"../..\", \".data\") cache_dir = default_cache_dir archive_folder = path.join(cache_dir,",
"str, size: Union[int, str] = 19): with open(path.join(get_game_dir(), str(size), name), \"rb\") as f:",
"@abstractmethod def retrieve(self, force=False) -> NoReturn: \"\"\" Retrieve all archives available from Internet.",
"import ABCMeta, abstractmethod from sgfmill.sgf import Sgf_game import numpy as np from .go_types",
"def get_cache_dir() -> str: return cache_dir def get_archive_dir() -> str: return archive_folder def",
"Optional[Set[GoPoint]], Optional[Set[GoPoint]]] @classmethod def from_sgf(cls, sgf_game: Sgf_game): size = sgf_game.get_size() winner = GoPlayer.to_player(sgf_game.get_winner())",
"@abstractmethod def extract(self, force=False) -> NoReturn: \"\"\" Extract all game archives to Game",
"force=False) -> NoReturn: \"\"\" Extract all game archives to Game Cache Folder, every",
"in sgf_game.get_main_sequence()))) komi = sgf_game.get_komi() setup_stones = sgf_game.get_root().get_setup_stones() return cls(size, winner, sequence, komi,",
"GameData.from_pickle(name, self.size) def __setitem__(self, name: str, data: GameData): data.to_pickle(name) def __delitem__(self, name: str):",
"key: str): return path.exists(path.join(self.root(), key)) def root(self): return path.join(get_array_dir(), str(self.size), self.method) def keys(self)",
"-> str: return array_folder class GameData(NamedTuple): size: int winner: GoPlayer sequence: List[Tuple[Optional[GoPlayer], Optional[GoPoint]]]",
"def set_cache_dir(directory: Optional[str] = None) -> NoReturn: global cache_dir, archive_folder, game_folder, array_folder if",
"\"\"\" pass @abstractmethod def extract(self, force=False) -> NoReturn: \"\"\" Extract all game archives",
"if it has already existed \"\"\" pass @abstractmethod def extract(self, force=False) -> NoReturn:",
"= path.join(cache_dir, \".array\") def set_cache_dir(directory: Optional[str] = None) -> NoReturn: global cache_dir, archive_folder,",
"path.join(getcwd(), directory) archive_folder = path.join(cache_dir, \".kgs\") game_folder = path.join(cache_dir, \".game\") array_folder = path.join(cache_dir,",
"for v in cls.__subclasses__(): _dict.update(v.archive_map()) return _dict @abstractmethod def retrieve(self, force=False) -> NoReturn:",
"path.join(cache_dir, \".game\") array_folder = path.join(cache_dir, \".array\") def set_cache_dir(directory: Optional[str] = None) -> NoReturn:",
"unpack(self, force=False) -> NoReturn: \"\"\" Unpack all game archives to :param force: whether",
"def __len__(self): return len(self.keys()) def __getitem__(self, key: str) -> Tuple[np.ndarray, ...]: file =",
"in self.keys(): yield self[key] def items(self) -> Iterable[Tuple[str, np.ndarray]]: for key in self.keys():",
"as f: return pickle.load(f) def __setitem__(self, key: str, value: Tuple[np.ndarray, ...]): file =",
"items(self) -> Iterable[Tuple[str, GameData]]: for key in self.keys(): yield key, self[key] class ArrayDatabase:",
"_cls for _cls in cls.__subclasses__()} for v in cls.__subclasses__(): _dict.update(v.archive_map()) return _dict @abstractmethod",
"str): return path.exists(path.join(get_game_dir(), str(self.size), name)) def __eq__(self, other): if isinstance(other, GameDatabase): return self.size",
"__eq__(self, other): if isinstance(other, GameDatabase): return self.size == other.size return NotImplemented def root(self):",
"path.join(get_game_dir(), str(self.size)) def keys(self) -> List[str]: return listdir(self.root()) def values(self) -> Iterable[GameData]: for",
"pass @abstractmethod def extract(self, force=False) -> NoReturn: \"\"\" Extract all game archives to",
"def root(self): return path.join(get_array_dir(), str(self.size), self.method) def keys(self) -> List[str]: return listdir(self.root()) def",
"__contains__(self, name: str): return path.exists(path.join(get_game_dir(), str(self.size), name)) def __eq__(self, other): if isinstance(other, GameDatabase):",
"path.join(self.root(), key) remove(file) def __contains__(self, key: str): return path.exists(path.join(self.root(), key)) def root(self): return",
"in self.keys(): yield self[key] def items(self) -> Iterable[Tuple[str, GameData]]: for key in self.keys():",
"= path.join(getcwd(), directory) archive_folder = path.join(cache_dir, \".kgs\") game_folder = path.join(cache_dir, \".game\") array_folder =",
"-> NoReturn: \"\"\" Retrieve all archives available from Internet. :param force: whether forces",
"Retrieve all archives available from Internet. :param force: whether forces to download archive",
"str, value: Tuple[np.ndarray, ...]): file = path.join(self.root(), key) with open(file, \"wb\") as f:",
"return len(self.keys()) def __getitem__(self, key: str) -> Tuple[np.ndarray, ...]: file = path.join(self.root(), key)",
"sgf_game.get_size() winner = GoPlayer.to_player(sgf_game.get_winner()) sequence = list(map(lambda move: (GoPlayer.to_player(move[0]), move[1]), (node.get_move() for node",
"exist_ok=True) def __len__(self): return len(self.keys()) def __getitem__(self, key: str) -> Tuple[np.ndarray, ...]: file",
"start with it's size of the board. \"\"\" pass @abstractmethod def unpack(self, force=False)",
"import Sgf_game import numpy as np from .go_types import * __all__ = [\"set_cache_dir\",",
"file = path.join(self.root(), key) with open(file, \"wb\") as f: pickle.dump(value, f) def __delitem__(self,",
"pickle.load(f) def __setitem__(self, key: str, value: Tuple[np.ndarray, ...]): file = path.join(self.root(), key) with",
"getcwd, makedirs, listdir, remove from typing import * import pickle from abc import",
":param force: whether forces to download archive if it has already existed \"\"\"",
"__init__(self, size=19): self.size = size def __len__(self): return len(self.keys()) def __getitem__(self, name: str)",
"-*- from os import path, getcwd, makedirs, listdir, remove from typing import *",
"should end with `.game.pkl` and be start with it's size of the board.",
"__getitem__(self, name: str) -> GameData: return GameData.from_pickle(name, self.size) def __setitem__(self, name: str, data:",
"key: str, value: Tuple[np.ndarray, ...]): file = path.join(self.root(), key) with open(file, \"wb\") as",
"with open(file, \"wb\") as f: pickle.dump(value, f) def __delitem__(self, key: str): file =",
"archive if it has already existed \"\"\" pass @abstractmethod def extract(self, force=False) ->",
"archive_folder = path.join(cache_dir, \".kgs\") game_folder = path.join(cache_dir, \".game\") array_folder = path.join(cache_dir, \".array\") makedirs(get_cache_dir(),",
"NotImplemented def root(self): return path.join(get_game_dir(), str(self.size)) def keys(self) -> List[str]: return listdir(self.root()) def",
"= list(map(lambda move: (GoPlayer.to_player(move[0]), move[1]), (node.get_move() for node in sgf_game.get_main_sequence()))) komi = sgf_game.get_komi()",
"path.join(cache_dir, \".game\") array_folder = path.join(cache_dir, \".array\") makedirs(get_cache_dir(), exist_ok=True) makedirs(get_archive_dir(), exist_ok=True) makedirs(get_game_dir(), exist_ok=True) makedirs(get_array_dir(),",
"path.join(cache_dir, \".array\") makedirs(get_cache_dir(), exist_ok=True) makedirs(get_archive_dir(), exist_ok=True) makedirs(get_game_dir(), exist_ok=True) makedirs(get_array_dir(), exist_ok=True) def get_cache_dir() ->",
"= path.join(cache_dir, \".game\") array_folder = path.join(cache_dir, \".array\") makedirs(get_cache_dir(), exist_ok=True) makedirs(get_archive_dir(), exist_ok=True) makedirs(get_game_dir(), exist_ok=True)",
"get_array_dir() -> str: return array_folder class GameData(NamedTuple): size: int winner: GoPlayer sequence: List[Tuple[Optional[GoPlayer],",
"existed \"\"\" pass def download(self, force=False): self.retrieve(force=force) self.unpack(force=force) self.extract(force=force) class GameDatabase: def __init__(self,",
"-> NoReturn: global cache_dir, archive_folder, game_folder, array_folder if directory is None: directory =",
"__len__(self): return len(self.keys()) def __getitem__(self, key: str) -> Tuple[np.ndarray, ...]: file = path.join(self.root(),",
"= path.join(self.root(), key) remove(file) def __contains__(self, key: str): return path.exists(path.join(self.root(), key)) def root(self):",
"\".game\") array_folder = path.join(cache_dir, \".array\") def set_cache_dir(directory: Optional[str] = None) -> NoReturn: global",
"= default_cache_dir cache_dir = path.join(getcwd(), directory) archive_folder = path.join(cache_dir, \".kgs\") game_folder = path.join(cache_dir,",
"* import pickle from abc import ABCMeta, abstractmethod from sgfmill.sgf import Sgf_game import",
"__delitem__(self, name: str): remove(path.join(get_game_dir(), str(self.size), name)) def __contains__(self, name: str): return path.exists(path.join(get_game_dir(), str(self.size),",
"from_sgf(cls, sgf_game: Sgf_game): size = sgf_game.get_size() winner = GoPlayer.to_player(sgf_game.get_winner()) sequence = list(map(lambda move:",
"def from_pickle(name: str, size: Union[int, str] = 19): with open(path.join(get_game_dir(), str(size), name), \"rb\")",
"-> Iterable[GameData]: for key in self.keys(): yield self[key] def items(self) -> Iterable[Tuple[str, GameData]]:",
"def __init__(self, method: str, size=19): self.size = size self.method = method makedirs(self.root(), exist_ok=True)",
"for key in self.keys(): yield self[key] def items(self) -> Iterable[Tuple[str, GameData]]: for key",
"in cls.__subclasses__()} for v in cls.__subclasses__(): _dict.update(v.archive_map()) return _dict @abstractmethod def retrieve(self, force=False)",
"to_pickle(self, name: str): makedirs(self.root(), exist_ok=True) dest = self.path(name) with open(dest, \"wb\") as f:",
"os import path, getcwd, makedirs, listdir, remove from typing import * import pickle",
"= method makedirs(self.root(), exist_ok=True) def __len__(self): return len(self.keys()) def __getitem__(self, key: str) ->",
"extract(self, force=False) -> NoReturn: \"\"\" Extract all game archives to Game Cache Folder,",
"-*- coding: utf-8 -*- from os import path, getcwd, makedirs, listdir, remove from",
"= path.join(self.root(), key) with open(file, \"wb\") as f: pickle.dump(value, f) def __delitem__(self, key:",
"return listdir(self.root()) def values(self) -> Iterable[np.ndarray]: for key in self.keys(): yield self[key] def",
"from Internet. :param force: whether forces to download archive if it has already",
"from typing import * import pickle from abc import ABCMeta, abstractmethod from sgfmill.sgf",
"makedirs(get_array_dir(), exist_ok=True) def get_cache_dir() -> str: return cache_dir def get_archive_dir() -> str: return",
"with `.game.pkl` and be start with it's size of the board. \"\"\" pass",
"\"wb\") as f: pickle.dump(value, f) def __delitem__(self, key: str): file = path.join(self.root(), key)",
"return NotImplemented def root(self): return path.join(get_game_dir(), str(self.size)) def keys(self) -> List[str]: return listdir(self.root())",
"def __setitem__(self, key: str, value: Tuple[np.ndarray, ...]): file = path.join(self.root(), key) with open(file,",
"= None) -> NoReturn: global cache_dir, archive_folder, game_folder, array_folder if directory is None:",
"remove(path.join(get_game_dir(), str(self.size), name)) def __contains__(self, name: str): return path.exists(path.join(get_game_dir(), str(self.size), name)) def __eq__(self,",
"sgf_game.get_main_sequence()))) komi = sgf_game.get_komi() setup_stones = sgf_game.get_root().get_setup_stones() return cls(size, winner, sequence, komi, setup_stones)",
"name)) def to_pickle(self, name: str): makedirs(self.root(), exist_ok=True) dest = self.path(name) with open(dest, \"wb\")",
"# -*- coding: utf-8 -*- from os import path, getcwd, makedirs, listdir, remove",
"Folder, every single file should end with `.game.pkl` and be start with it's",
"= path.join(path.dirname(path.realpath(__file__)), \"../..\", \".data\") cache_dir = default_cache_dir archive_folder = path.join(cache_dir, \".kgs\") game_folder =",
"= \"none\" @classmethod def archive_map(cls): _dict = {_cls.name: _cls for _cls in cls.__subclasses__()}",
"all game archives to :param force: whether forces to download archive if it",
"cache_dir = path.join(getcwd(), directory) archive_folder = path.join(cache_dir, \".kgs\") game_folder = path.join(cache_dir, \".game\") array_folder",
"19): return path.exists(path.join(get_game_dir(), str(size), name)) def to_pickle(self, name: str): makedirs(self.root(), exist_ok=True) dest =",
"self[key] def items(self) -> Iterable[Tuple[str, GameData]]: for key in self.keys(): yield key, self[key]",
"List[Tuple[Optional[GoPlayer], Optional[GoPoint]]] komi: float setup_stones: Tuple[Optional[Set[GoPoint]], Optional[Set[GoPoint]], Optional[Set[GoPoint]]] @classmethod def from_sgf(cls, sgf_game: Sgf_game):",
"str): file = path.join(self.root(), key) remove(file) def __contains__(self, key: str): return path.exists(path.join(self.root(), key))",
"\"rb\") as f: return pickle.load(f) @staticmethod def pickle_exists(name: str, size: Union[int, str] =",
"= self.path(name) with open(dest, \"wb\") as f: pickle.dump(self, f) def root(self): return path.join(get_game_dir(),",
"list(map(lambda move: (GoPlayer.to_player(move[0]), move[1]), (node.get_move() for node in sgf_game.get_main_sequence()))) komi = sgf_game.get_komi() setup_stones",
"name: str): remove(path.join(get_game_dir(), str(self.size), name)) def __contains__(self, name: str): return path.exists(path.join(get_game_dir(), str(self.size), name))",
"other.size return NotImplemented def root(self): return path.join(get_game_dir(), str(self.size)) def keys(self) -> List[str]: return",
"import pickle from abc import ABCMeta, abstractmethod from sgfmill.sgf import Sgf_game import numpy",
"import * __all__ = [\"set_cache_dir\", \"get_cache_dir\", \"get_game_dir\", \"get_archive_dir\", \"get_array_dir\", \"GameData\", \"GameArchive\", \"GameDatabase\", \"ArrayDatabase\"]",
"\"../..\", \".data\") cache_dir = default_cache_dir archive_folder = path.join(cache_dir, \".kgs\") game_folder = path.join(cache_dir, \".game\")",
"makedirs(get_archive_dir(), exist_ok=True) makedirs(get_game_dir(), exist_ok=True) makedirs(get_array_dir(), exist_ok=True) def get_cache_dir() -> str: return cache_dir def",
"default_cache_dir = path.join(path.dirname(path.realpath(__file__)), \"../..\", \".data\") cache_dir = default_cache_dir archive_folder = path.join(cache_dir, \".kgs\") game_folder",
"cls.__subclasses__(): _dict.update(v.archive_map()) return _dict @abstractmethod def retrieve(self, force=False) -> NoReturn: \"\"\" Retrieve all",
"is None: directory = default_cache_dir cache_dir = path.join(getcwd(), directory) archive_folder = path.join(cache_dir, \".kgs\")",
"path.join(self.root(), name) class GameArchive(metaclass=ABCMeta): name = \"none\" @classmethod def archive_map(cls): _dict = {_cls.name:",
"= sgf_game.get_size() winner = GoPlayer.to_player(sgf_game.get_winner()) sequence = list(map(lambda move: (GoPlayer.to_player(move[0]), move[1]), (node.get_move() for",
"cls.__subclasses__()} for v in cls.__subclasses__(): _dict.update(v.archive_map()) return _dict @abstractmethod def retrieve(self, force=False) ->",
"Internet. :param force: whether forces to download archive if it has already existed",
"def from_sgf(cls, sgf_game: Sgf_game): size = sgf_game.get_size() winner = GoPlayer.to_player(sgf_game.get_winner()) sequence = list(map(lambda",
"return game_folder def get_array_dir() -> str: return array_folder class GameData(NamedTuple): size: int winner:",
"class GameArchive(metaclass=ABCMeta): name = \"none\" @classmethod def archive_map(cls): _dict = {_cls.name: _cls for",
"path(self, name: str): return path.join(self.root(), name) class GameArchive(metaclass=ABCMeta): name = \"none\" @classmethod def",
"name: str): return path.join(self.root(), name) class GameArchive(metaclass=ABCMeta): name = \"none\" @classmethod def archive_map(cls):",
"__setitem__(self, name: str, data: GameData): data.to_pickle(name) def __delitem__(self, name: str): remove(path.join(get_game_dir(), str(self.size), name))",
"key in self.keys(): yield self[key] def items(self) -> Iterable[Tuple[str, np.ndarray]]: for key in",
"file = path.join(self.root(), key) with open(file, \"rb\") as f: return pickle.load(f) def __setitem__(self,",
"Cache Folder, every single file should end with `.game.pkl` and be start with",
"self.size = size self.method = method makedirs(self.root(), exist_ok=True) def __len__(self): return len(self.keys()) def",
"...]): file = path.join(self.root(), key) with open(file, \"wb\") as f: pickle.dump(value, f) def",
"return path.join(get_array_dir(), str(self.size), self.method) def keys(self) -> List[str]: return listdir(self.root()) def values(self) ->",
"def to_pickle(self, name: str): makedirs(self.root(), exist_ok=True) dest = self.path(name) with open(dest, \"wb\") as",
"yield key, self[key] class ArrayDatabase: def __init__(self, method: str, size=19): self.size = size",
"f) def __delitem__(self, key: str): file = path.join(self.root(), key) remove(file) def __contains__(self, key:",
"self.path(name) with open(dest, \"wb\") as f: pickle.dump(self, f) def root(self): return path.join(get_game_dir(), str(self.size))",
"whether forces to download archive if it has already existed \"\"\" pass def",
"Sgf_game): size = sgf_game.get_size() winner = GoPlayer.to_player(sgf_game.get_winner()) sequence = list(map(lambda move: (GoPlayer.to_player(move[0]), move[1]),",
"__len__(self): return len(self.keys()) def __getitem__(self, name: str) -> GameData: return GameData.from_pickle(name, self.size) def",
"\".kgs\") game_folder = path.join(cache_dir, \".game\") array_folder = path.join(cache_dir, \".array\") makedirs(get_cache_dir(), exist_ok=True) makedirs(get_archive_dir(), exist_ok=True)",
"archives available from Internet. :param force: whether forces to download archive if it",
"available from Internet. :param force: whether forces to download archive if it has",
"key in self.keys(): yield self[key] def items(self) -> Iterable[Tuple[str, GameData]]: for key in",
"yield self[key] def items(self) -> Iterable[Tuple[str, GameData]]: for key in self.keys(): yield key,",
"-> Iterable[Tuple[str, GameData]]: for key in self.keys(): yield key, self[key] class ArrayDatabase: def",
"move[1]), (node.get_move() for node in sgf_game.get_main_sequence()))) komi = sgf_game.get_komi() setup_stones = sgf_game.get_root().get_setup_stones() return",
"\"get_game_dir\", \"get_archive_dir\", \"get_array_dir\", \"GameData\", \"GameArchive\", \"GameDatabase\", \"ArrayDatabase\"] default_cache_dir = path.join(path.dirname(path.realpath(__file__)), \"../..\", \".data\") cache_dir",
"str): return path.exists(path.join(self.root(), key)) def root(self): return path.join(get_array_dir(), str(self.size), self.method) def keys(self) ->",
"other): if isinstance(other, GameDatabase): return self.size == other.size return NotImplemented def root(self): return",
"game_folder = path.join(cache_dir, \".game\") array_folder = path.join(cache_dir, \".array\") makedirs(get_cache_dir(), exist_ok=True) makedirs(get_archive_dir(), exist_ok=True) makedirs(get_game_dir(),",
"setup_stones: Tuple[Optional[Set[GoPoint]], Optional[Set[GoPoint]], Optional[Set[GoPoint]]] @classmethod def from_sgf(cls, sgf_game: Sgf_game): size = sgf_game.get_size() winner",
"return pickle.load(f) @staticmethod def pickle_exists(name: str, size: Union[int, str] = 19): return path.exists(path.join(get_game_dir(),",
"key: str) -> Tuple[np.ndarray, ...]: file = path.join(self.root(), key) with open(file, \"rb\") as",
"def items(self) -> Iterable[Tuple[str, GameData]]: for key in self.keys(): yield key, self[key] class",
"Tuple[np.ndarray, ...]: file = path.join(self.root(), key) with open(file, \"rb\") as f: return pickle.load(f)",
"str] = 19): return path.exists(path.join(get_game_dir(), str(size), name)) def to_pickle(self, name: str): makedirs(self.root(), exist_ok=True)",
"be start with it's size of the board. \"\"\" pass @abstractmethod def unpack(self,",
"download archive if it has already existed \"\"\" pass @abstractmethod def extract(self, force=False)",
"def __getitem__(self, key: str) -> Tuple[np.ndarray, ...]: file = path.join(self.root(), key) with open(file,",
"= path.join(cache_dir, \".array\") makedirs(get_cache_dir(), exist_ok=True) makedirs(get_archive_dir(), exist_ok=True) makedirs(get_game_dir(), exist_ok=True) makedirs(get_array_dir(), exist_ok=True) def get_cache_dir()",
"listdir, remove from typing import * import pickle from abc import ABCMeta, abstractmethod",
"sgfmill.sgf import Sgf_game import numpy as np from .go_types import * __all__ =",
"return path.join(self.root(), name) class GameArchive(metaclass=ABCMeta): name = \"none\" @classmethod def archive_map(cls): _dict =",
"archive if it has already existed \"\"\" pass def download(self, force=False): self.retrieve(force=force) self.unpack(force=force)",
"cls(size, winner, sequence, komi, setup_stones) @staticmethod def from_pickle(name: str, size: Union[int, str] =",
"f) def root(self): return path.join(get_game_dir(), str(self.size)) def path(self, name: str): return path.join(self.root(), name)",
"= path.join(cache_dir, \".kgs\") game_folder = path.join(cache_dir, \".game\") array_folder = path.join(cache_dir, \".array\") def set_cache_dir(directory:",
"\"\"\" pass def download(self, force=False): self.retrieve(force=force) self.unpack(force=force) self.extract(force=force) class GameDatabase: def __init__(self, size=19):",
"str): makedirs(self.root(), exist_ok=True) dest = self.path(name) with open(dest, \"wb\") as f: pickle.dump(self, f)",
"path.exists(path.join(get_game_dir(), str(size), name)) def to_pickle(self, name: str): makedirs(self.root(), exist_ok=True) dest = self.path(name) with",
"size: int winner: GoPlayer sequence: List[Tuple[Optional[GoPlayer], Optional[GoPoint]]] komi: float setup_stones: Tuple[Optional[Set[GoPoint]], Optional[Set[GoPoint]], Optional[Set[GoPoint]]]",
"\".array\") def set_cache_dir(directory: Optional[str] = None) -> NoReturn: global cache_dir, archive_folder, game_folder, array_folder",
"GameArchive(metaclass=ABCMeta): name = \"none\" @classmethod def archive_map(cls): _dict = {_cls.name: _cls for _cls",
"GoPlayer sequence: List[Tuple[Optional[GoPlayer], Optional[GoPoint]]] komi: float setup_stones: Tuple[Optional[Set[GoPoint]], Optional[Set[GoPoint]], Optional[Set[GoPoint]]] @classmethod def from_sgf(cls,",
"\"\"\" Unpack all game archives to :param force: whether forces to download archive",
"root(self): return path.join(get_array_dir(), str(self.size), self.method) def keys(self) -> List[str]: return listdir(self.root()) def values(self)",
"Game Cache Folder, every single file should end with `.game.pkl` and be start",
"-> NoReturn: \"\"\" Unpack all game archives to :param force: whether forces to",
"has already existed \"\"\" pass @abstractmethod def extract(self, force=False) -> NoReturn: \"\"\" Extract",
"\"\"\" pass @abstractmethod def unpack(self, force=False) -> NoReturn: \"\"\" Unpack all game archives",
"len(self.keys()) def __getitem__(self, name: str) -> GameData: return GameData.from_pickle(name, self.size) def __setitem__(self, name:",
"import numpy as np from .go_types import * __all__ = [\"set_cache_dir\", \"get_cache_dir\", \"get_game_dir\",",
"Optional[GoPoint]]] komi: float setup_stones: Tuple[Optional[Set[GoPoint]], Optional[Set[GoPoint]], Optional[Set[GoPoint]]] @classmethod def from_sgf(cls, sgf_game: Sgf_game): size",
"name)) def __contains__(self, name: str): return path.exists(path.join(get_game_dir(), str(self.size), name)) def __eq__(self, other): if",
"return path.exists(path.join(self.root(), key)) def root(self): return path.join(get_array_dir(), str(self.size), self.method) def keys(self) -> List[str]:",
"download archive if it has already existed \"\"\" pass def download(self, force=False): self.retrieve(force=force)",
"size: Union[int, str] = 19): return path.exists(path.join(get_game_dir(), str(size), name)) def to_pickle(self, name: str):",
"archives to :param force: whether forces to download archive if it has already",
"get_archive_dir() -> str: return archive_folder def get_game_dir() -> str: return game_folder def get_array_dir()",
"def __eq__(self, other): if isinstance(other, GameDatabase): return self.size == other.size return NotImplemented def",
"import path, getcwd, makedirs, listdir, remove from typing import * import pickle from",
"None) -> NoReturn: global cache_dir, archive_folder, game_folder, array_folder if directory is None: directory",
"= sgf_game.get_root().get_setup_stones() return cls(size, winner, sequence, komi, setup_stones) @staticmethod def from_pickle(name: str, size:",
"_dict.update(v.archive_map()) return _dict @abstractmethod def retrieve(self, force=False) -> NoReturn: \"\"\" Retrieve all archives",
"download(self, force=False): self.retrieve(force=force) self.unpack(force=force) self.extract(force=force) class GameDatabase: def __init__(self, size=19): self.size = size",
"data.to_pickle(name) def __delitem__(self, name: str): remove(path.join(get_game_dir(), str(self.size), name)) def __contains__(self, name: str): return",
"archive_folder, game_folder, array_folder if directory is None: directory = default_cache_dir cache_dir = path.join(getcwd(),",
"int winner: GoPlayer sequence: List[Tuple[Optional[GoPlayer], Optional[GoPoint]]] komi: float setup_stones: Tuple[Optional[Set[GoPoint]], Optional[Set[GoPoint]], Optional[Set[GoPoint]]] @classmethod",
"isinstance(other, GameDatabase): return self.size == other.size return NotImplemented def root(self): return path.join(get_game_dir(), str(self.size))",
"from .go_types import * __all__ = [\"set_cache_dir\", \"get_cache_dir\", \"get_game_dir\", \"get_archive_dir\", \"get_array_dir\", \"GameData\", \"GameArchive\",",
"in cls.__subclasses__(): _dict.update(v.archive_map()) return _dict @abstractmethod def retrieve(self, force=False) -> NoReturn: \"\"\" Retrieve",
"setup_stones = sgf_game.get_root().get_setup_stones() return cls(size, winner, sequence, komi, setup_stones) @staticmethod def from_pickle(name: str,",
"path, getcwd, makedirs, listdir, remove from typing import * import pickle from abc",
"array_folder class GameData(NamedTuple): size: int winner: GoPlayer sequence: List[Tuple[Optional[GoPlayer], Optional[GoPoint]]] komi: float setup_stones:",
"(GoPlayer.to_player(move[0]), move[1]), (node.get_move() for node in sgf_game.get_main_sequence()))) komi = sgf_game.get_komi() setup_stones = sgf_game.get_root().get_setup_stones()",
"path.join(self.root(), key) with open(file, \"wb\") as f: pickle.dump(value, f) def __delitem__(self, key: str):",
"name: str, data: GameData): data.to_pickle(name) def __delitem__(self, name: str): remove(path.join(get_game_dir(), str(self.size), name)) def",
"GameDatabase): return self.size == other.size return NotImplemented def root(self): return path.join(get_game_dir(), str(self.size)) def",
"GameData): data.to_pickle(name) def __delitem__(self, name: str): remove(path.join(get_game_dir(), str(self.size), name)) def __contains__(self, name: str):",
"f: pickle.dump(value, f) def __delitem__(self, key: str): file = path.join(self.root(), key) remove(file) def"
] |
[] |
[
"{ngram: [next_words], ... } \"\"\" def file_words(file_pointer): \"\"\"Generator for words in a file\"\"\"",
"choose from while len(holy_words) <= 1: chosen_word = holy_words[0] speech_of_god.append(chosen_word) holy_tuple = tuple(speech_of_god[-self.tuple_length:])",
"raise Heresy(\"Not one word of God shall be changed!\") unholy_num |= unholy_bit <<",
"= \"\\n\\nAmen.\" def __init__(self, tuple_length=3, line_width=70, compress=True): self.compress = compress self.line_width = line_width",
"= 0 if self.compress: unholy_bytes = gzip.decompress(unholy_bytes) return unholy_bytes def reveal(self, annotated_speech): \"\"\"Decode",
"for bit in bits(unholy_bytes): holy_tuple = tuple(speech_of_god[-self.tuple_length:]) holy_words = self.god_grams[holy_tuple] # Make sure",
"annotated_speech_of_god = '.\\n\\n'.join( [ '\\n'.join(textwrap.wrap(\"[{}] \".format(idx + 1) + holy_phrase, width=self.line_width)) for idx,",
"0 for holy_word in holy_words[self.tuple_length:]: try: holy_ngram_list = self.god_grams[holy_tuple] except: raise Heresy(\"Thou shalt",
"Remove line annotations try: holy_words = ' '.join([sentence.split('] ')[1] for sentence in holy_annotated_sentences]).split()",
"= unholy_bytes.encode() if self.compress: unholy_bytes = gzip.compress(unholy_bytes) # Start with a capitalized tuple",
"'.join(speech_of_god).split('. ') annotated_speech_of_god = '.\\n\\n'.join( [ '\\n'.join(textwrap.wrap(\"[{}] \".format(idx + 1) + holy_phrase, width=self.line_width))",
"for bit_num in range(8): # Extract bit from byte byte, bit = byte",
"\"\\n\\nAmen.\" def __init__(self, tuple_length=3, line_width=70, compress=True): self.compress = compress self.line_width = line_width self.tuple_length",
"textwrap class Heresy(Exception): \"\"\"You have defiled the word of God!\"\"\" pass def bits(byte_string):",
"def __init__(self, tuple_length=3, line_width=70, compress=True): self.compress = compress self.line_width = line_width self.tuple_length =",
"unholy unicode into Holy text\"\"\" if not unholy_bytes: raise Heresy(\"Thou shalt not be",
"if len(word_list) < tuple_length: word_list.append(word) continue ngrams[tuple(word_list)].add(word) word_list = word_list[1:] + [word] return",
"speech_of_god.append(chosen_word) holy_tuple = tuple(speech_of_god[-self.tuple_length:]) holy_words = self.god_grams[holy_tuple] # Select from even indices if",
"hallelujah and amen if split_annotated_speech[0] != self.hallelujah.strip() \\ or split_annotated_speech[-1] != self.amen.strip(): raise",
"random.choice(holy_words[bit::2]) speech_of_god.append(chosen_word) holy_sentences = ' '.join(speech_of_god).split('. ') annotated_speech_of_god = '.\\n\\n'.join( [ '\\n'.join(textwrap.wrap(\"[{}] \".format(idx",
"1 chosen_word = random.choice(holy_words[bit::2]) speech_of_god.append(chosen_word) holy_sentences = ' '.join(speech_of_god).split('. ') annotated_speech_of_god = '.\\n\\n'.join(",
"holy_word in holy_words[self.tuple_length:]: try: holy_ngram_list = self.god_grams[holy_tuple] except: raise Heresy(\"Thou shalt not modify",
"+ annotated_speech_of_god + self.amen def reveal_from_words(self, holy_words): \"\"\"Decode a list of holy words",
"you imitate the word of God!\") return self.reveal_from_words(holy_words) def hex_expand(byte_str): return ':'.join('{:02x}'.format(byte) for",
"words in a file\"\"\" for line in file_pointer: for word in line.split(): yield",
"def hex_expand(byte_str): return ':'.join('{:02x}'.format(byte) for byte in byte_str) if __name__ == '__main__': god",
"... } \"\"\" def file_words(file_pointer): \"\"\"Generator for words in a file\"\"\" for line",
"holy_words = ' '.join([sentence.split('] ')[1] for sentence in holy_annotated_sentences]).split() except: raise Heresy(\"How dare",
"idx, holy_phrase in enumerate(holy_sentences) ] ) return self.hallelujah + annotated_speech_of_god + self.amen def",
"[ '\\n'.join(textwrap.wrap(\"[{}] \".format(idx + 1) + holy_phrase, width=self.line_width)) for idx, holy_phrase in enumerate(holy_sentences)",
"self.god_grams[holy_tuple] # Make sure that we have some words to choose from while",
"word_list = word_list[1:] + [word] return {key: tuple(val) for key, val in ngrams.items()}",
"annotations try: holy_words = ' '.join([sentence.split('] ')[1] for sentence in holy_annotated_sentences]).split() except: raise",
"the word of God!\") holy_tuple = tuple(holy_tuple[1:] + (holy_word,)) if len(holy_ngram_list) <= 1:",
"unholy bytes or unholy unicode into Holy text\"\"\" if not unholy_bytes: raise Heresy(\"Thou",
"] ) return self.hallelujah + annotated_speech_of_god + self.amen def reveal_from_words(self, holy_words): \"\"\"Decode a",
"tuple_length) self.capital_tuples = [key for key, value in self.god_grams.items() if key[0][0].isupper()] def praise(self,",
"raise Heresy(\"Your praise is insufficient!\") # Remove hallelujah and amen try: holy_annotated_sentences =",
"= \"Hello world!\" print(\"I praise unto God: %s\\n\\n\" % hello_world) holy_hello_world = god.praise(hello_world)",
"reveal(self, annotated_speech): \"\"\"Decode holy speech into bytes\"\"\" split_annotated_speech = annotated_speech.split('\\n\\n') # Check for",
"God!\") return self.reveal_from_words(holy_words) def hex_expand(byte_str): return ':'.join('{:02x}'.format(byte) for byte in byte_str) if __name__",
"tuple(holy_words[:self.tuple_length]) except: raise Heresy(\"You mock the word of God!\") unholy_bytes = b'' unholy_num",
"we have some words to choose from while len(holy_words) <= 1: chosen_word =",
"for byte in byte_str) if __name__ == '__main__': god = GodZip(compress=False) hello_world =",
"os import random import textwrap class Heresy(Exception): \"\"\"You have defiled the word of",
"= tuple(speech_of_god[-self.tuple_length:]) holy_words = self.god_grams[holy_tuple] # Select from even indices if bit ==",
"if split_annotated_speech[0] != self.hallelujah.strip() \\ or split_annotated_speech[-1] != self.amen.strip(): raise Heresy(\"Your praise is",
"[next_words], ... } \"\"\" def file_words(file_pointer): \"\"\"Generator for words in a file\"\"\" for",
"holy_words[0] speech_of_god.append(chosen_word) holy_tuple = tuple(speech_of_god[-self.tuple_length:]) holy_words = self.god_grams[holy_tuple] # Select from even indices",
"indices if bit == 0, odd if bit == 1 chosen_word = random.choice(holy_words[bit::2])",
"to choose from while len(holy_words) <= 1: chosen_word = holy_words[0] speech_of_god.append(chosen_word) holy_tuple =",
"== '__main__': god = GodZip(compress=False) hello_world = \"Hello world!\" print(\"I praise unto God:",
"# Check for hallelujah and amen if split_annotated_speech[0] != self.hallelujah.strip() \\ or split_annotated_speech[-1]",
"try: unholy_bit = holy_ngram_list.index(holy_word) % 2 except: raise Heresy(\"Not one word of God",
"data_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data', 'bible-kjv.raw.txt') self.god_grams = generate_ngram_dict(data_path, tuple_length) self.capital_tuples = [key for",
"of holy words into unholy bytes.\"\"\" try: holy_tuple = tuple(holy_words[:self.tuple_length]) except: raise Heresy(\"You",
"to read from. :param tuple_length: The length of the ngram keys :return: Dict",
"byte >> 1, byte % 2 yield bit def generate_ngram_dict(filename, tuple_length): \"\"\"Generate a",
"Heresy(\"The word of God will not be silenced!\") # Remove line annotations try:",
"0: unholy_bytes += bytes([unholy_num]) unholy_num = 0 bit_counter = 0 if self.compress: unholy_bytes",
"= GodZip(compress=False) hello_world = \"Hello world!\" print(\"I praise unto God: %s\\n\\n\" % hello_world)",
"Lord!\") if not isinstance(unholy_bytes, bytes): unholy_bytes = unholy_bytes.encode() if self.compress: unholy_bytes = gzip.compress(unholy_bytes)",
":return: Dict of the form {ngram: [next_words], ... } \"\"\" def file_words(file_pointer): \"\"\"Generator",
"+ 1) + holy_phrase, width=self.line_width)) for idx, holy_phrase in enumerate(holy_sentences) ] ) return",
"tuple_length data_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data', 'bible-kjv.raw.txt') self.god_grams = generate_ngram_dict(data_path, tuple_length) self.capital_tuples = [key",
"amen if split_annotated_speech[0] != self.hallelujah.strip() \\ or split_annotated_speech[-1] != self.amen.strip(): raise Heresy(\"Your praise",
"in line.split(): yield word ngrams = defaultdict(lambda: set()) with open(filename, 'r') as fp:",
"return self.hallelujah + annotated_speech_of_god + self.amen def reveal_from_words(self, holy_words): \"\"\"Decode a list of",
"speech_of_god = list(random.choice(self.capital_tuples)) for bit in bits(unholy_bytes): holy_tuple = tuple(speech_of_god[-self.tuple_length:]) holy_words = self.god_grams[holy_tuple]",
"some words to choose from while len(holy_words) <= 1: chosen_word = holy_words[0] speech_of_god.append(chosen_word)",
"'data', 'bible-kjv.raw.txt') self.god_grams = generate_ngram_dict(data_path, tuple_length) self.capital_tuples = [key for key, value in",
"% 2 yield bit def generate_ngram_dict(filename, tuple_length): \"\"\"Generate a dict with ngrams as",
"bit_num in range(8): # Extract bit from byte byte, bit = byte >>",
"unholy_bytes.encode() if self.compress: unholy_bytes = gzip.compress(unholy_bytes) # Start with a capitalized tuple speech_of_god",
"if len(holy_ngram_list) <= 1: continue try: unholy_bit = holy_ngram_list.index(holy_word) % 2 except: raise",
"GodZip(object): \"\"\"Turn unholy bits into holy words!\"\"\" hallelujah = \"Sayeth the Lord:\\n\\n\" amen",
"' '.join(speech_of_god).split('. ') annotated_speech_of_god = '.\\n\\n'.join( [ '\\n'.join(textwrap.wrap(\"[{}] \".format(idx + 1) + holy_phrase,",
"= line_width self.tuple_length = tuple_length data_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data', 'bible-kjv.raw.txt') self.god_grams = generate_ngram_dict(data_path,",
"the Lord!\") if not isinstance(unholy_bytes, bytes): unholy_bytes = unholy_bytes.encode() if self.compress: unholy_bytes =",
"Filename to read from. :param tuple_length: The length of the ngram keys :return:",
"unholy_bytes = unholy_bytes.encode() if self.compress: unholy_bytes = gzip.compress(unholy_bytes) # Start with a capitalized",
"= tuple_length data_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data', 'bible-kjv.raw.txt') self.god_grams = generate_ngram_dict(data_path, tuple_length) self.capital_tuples =",
"self.line_width = line_width self.tuple_length = tuple_length data_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data', 'bible-kjv.raw.txt') self.god_grams =",
"\"Hello world!\" print(\"I praise unto God: %s\\n\\n\" % hello_world) holy_hello_world = god.praise(hello_world) print(holy_hello_world)",
"sentence in holy_annotated_sentences]).split() except: raise Heresy(\"How dare you imitate the word of God!\")",
"except: raise Heresy(\"Thou shalt not modify the word of God!\") holy_tuple = tuple(holy_tuple[1:]",
"tuple_length=3, line_width=70, compress=True): self.compress = compress self.line_width = line_width self.tuple_length = tuple_length data_path",
"= compress self.line_width = line_width self.tuple_length = tuple_length data_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data', 'bible-kjv.raw.txt')",
"bit == 1 chosen_word = random.choice(holy_words[bit::2]) speech_of_god.append(chosen_word) holy_sentences = ' '.join(speech_of_god).split('. ') annotated_speech_of_god",
"line_width self.tuple_length = tuple_length data_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data', 'bible-kjv.raw.txt') self.god_grams = generate_ngram_dict(data_path, tuple_length)",
"filename: Filename to read from. :param tuple_length: The length of the ngram keys",
"\\ or split_annotated_speech[-1] != self.amen.strip(): raise Heresy(\"Your praise is insufficient!\") # Remove hallelujah",
"for words in a file\"\"\" for line in file_pointer: for word in line.split():",
"or unholy unicode into Holy text\"\"\" if not unholy_bytes: raise Heresy(\"Thou shalt not",
"import textwrap class Heresy(Exception): \"\"\"You have defiled the word of God!\"\"\" pass def",
"bytes): unholy_bytes = unholy_bytes.encode() if self.compress: unholy_bytes = gzip.compress(unholy_bytes) # Start with a",
"holy_tuple = tuple(holy_tuple[1:] + (holy_word,)) if len(holy_ngram_list) <= 1: continue try: unholy_bit =",
"generate_ngram_dict(filename, tuple_length): \"\"\"Generate a dict with ngrams as key following words as value",
"word ngrams = defaultdict(lambda: set()) with open(filename, 'r') as fp: word_list = []",
"bit in bits(unholy_bytes): holy_tuple = tuple(speech_of_god[-self.tuple_length:]) holy_words = self.god_grams[holy_tuple] # Make sure that",
"a capitalized tuple speech_of_god = list(random.choice(self.capital_tuples)) for bit in bits(unholy_bytes): holy_tuple = tuple(speech_of_god[-self.tuple_length:])",
"bytes([unholy_num]) unholy_num = 0 bit_counter = 0 if self.compress: unholy_bytes = gzip.decompress(unholy_bytes) return",
"line in file_pointer: for word in line.split(): yield word ngrams = defaultdict(lambda: set())",
"for idx, holy_phrase in enumerate(holy_sentences) ] ) return self.hallelujah + annotated_speech_of_god + self.amen",
"tuple(holy_tuple[1:] + (holy_word,)) if len(holy_ngram_list) <= 1: continue try: unholy_bit = holy_ngram_list.index(holy_word) %",
"word of God!\") return self.reveal_from_words(holy_words) def hex_expand(byte_str): return ':'.join('{:02x}'.format(byte) for byte in byte_str)",
"a byte stream\"\"\" for byte in byte_string: for bit_num in range(8): # Extract",
"ngrams as key following words as value :param filename: Filename to read from.",
"god = GodZip(compress=False) hello_world = \"Hello world!\" print(\"I praise unto God: %s\\n\\n\" %",
"not unholy_bytes: raise Heresy(\"Thou shalt not be silent in the face of the",
"shalt not modify the word of God!\") holy_tuple = tuple(holy_tuple[1:] + (holy_word,)) if",
"word_list[1:] + [word] return {key: tuple(val) for key, val in ngrams.items()} class GodZip(object):",
"= 0 bit_counter = 0 if self.compress: unholy_bytes = gzip.decompress(unholy_bytes) return unholy_bytes def",
"self.compress: unholy_bytes = gzip.compress(unholy_bytes) # Start with a capitalized tuple speech_of_god = list(random.choice(self.capital_tuples))",
"line_width=70, compress=True): self.compress = compress self.line_width = line_width self.tuple_length = tuple_length data_path =",
"holy_tuple = tuple(speech_of_god[-self.tuple_length:]) holy_words = self.god_grams[holy_tuple] # Make sure that we have some",
"__name__ == '__main__': god = GodZip(compress=False) hello_world = \"Hello world!\" print(\"I praise unto",
"holy_ngram_list = self.god_grams[holy_tuple] except: raise Heresy(\"Thou shalt not modify the word of God!\")",
"'__main__': god = GodZip(compress=False) hello_world = \"Hello world!\" print(\"I praise unto God: %s\\n\\n\"",
"= 0 bit_counter = 0 for holy_word in holy_words[self.tuple_length:]: try: holy_ngram_list = self.god_grams[holy_tuple]",
"unholy_bit = holy_ngram_list.index(holy_word) % 2 except: raise Heresy(\"Not one word of God shall",
"\".format(idx + 1) + holy_phrase, width=self.line_width)) for idx, holy_phrase in enumerate(holy_sentences) ] )",
"self.god_grams.items() if key[0][0].isupper()] def praise(self, unholy_bytes): \"\"\"Encode unholy bytes or unholy unicode into",
"') annotated_speech_of_god = '.\\n\\n'.join( [ '\\n'.join(textwrap.wrap(\"[{}] \".format(idx + 1) + holy_phrase, width=self.line_width)) for",
"# Extract bit from byte byte, bit = byte >> 1, byte %",
"holy_ngram_list.index(holy_word) % 2 except: raise Heresy(\"Not one word of God shall be changed!\")",
"unholy bits into holy words!\"\"\" hallelujah = \"Sayeth the Lord:\\n\\n\" amen = \"\\n\\nAmen.\"",
"unicode into Holy text\"\"\" if not unholy_bytes: raise Heresy(\"Thou shalt not be silent",
"ngram keys :return: Dict of the form {ngram: [next_words], ... } \"\"\" def",
"gzip.decompress(unholy_bytes) return unholy_bytes def reveal(self, annotated_speech): \"\"\"Decode holy speech into bytes\"\"\" split_annotated_speech =",
"chosen_word = holy_words[0] speech_of_god.append(chosen_word) holy_tuple = tuple(speech_of_god[-self.tuple_length:]) holy_words = self.god_grams[holy_tuple] # Select from",
"set()) with open(filename, 'r') as fp: word_list = [] for word in file_words(fp):",
"def praise(self, unholy_bytes): \"\"\"Encode unholy bytes or unholy unicode into Holy text\"\"\" if",
"if __name__ == '__main__': god = GodZip(compress=False) hello_world = \"Hello world!\" print(\"I praise",
"= ' '.join(speech_of_god).split('. ') annotated_speech_of_god = '.\\n\\n'.join( [ '\\n'.join(textwrap.wrap(\"[{}] \".format(idx + 1) +",
"the face of the Lord!\") if not isinstance(unholy_bytes, bytes): unholy_bytes = unholy_bytes.encode() if",
"bit_counter = 0 if self.compress: unholy_bytes = gzip.decompress(unholy_bytes) return unholy_bytes def reveal(self, annotated_speech):",
"have some words to choose from while len(holy_words) <= 1: chosen_word = holy_words[0]",
"into unholy bytes.\"\"\" try: holy_tuple = tuple(holy_words[:self.tuple_length]) except: raise Heresy(\"You mock the word",
"Heresy(Exception): \"\"\"You have defiled the word of God!\"\"\" pass def bits(byte_string): \"\"\"Generates a",
"bit_counter bit_counter += 1 if bit_counter % 8 == 0: unholy_bytes += bytes([unholy_num])",
"world!\" print(\"I praise unto God: %s\\n\\n\" % hello_world) holy_hello_world = god.praise(hello_world) print(holy_hello_world) assert(hello_world",
"tuple_length: word_list.append(word) continue ngrams[tuple(word_list)].add(word) word_list = word_list[1:] + [word] return {key: tuple(val) for",
"word of God!\") unholy_bytes = b'' unholy_num = 0 bit_counter = 0 for",
"= list(random.choice(self.capital_tuples)) for bit in bits(unholy_bytes): holy_tuple = tuple(speech_of_god[-self.tuple_length:]) holy_words = self.god_grams[holy_tuple] #",
"in byte_string: for bit_num in range(8): # Extract bit from byte byte, bit",
"= [] for word in file_words(fp): if len(word_list) < tuple_length: word_list.append(word) continue ngrams[tuple(word_list)].add(word)",
"Heresy(\"Thou shalt not be silent in the face of the Lord!\") if not",
"self.god_grams[holy_tuple] except: raise Heresy(\"Thou shalt not modify the word of God!\") holy_tuple =",
"tuple_length: The length of the ngram keys :return: Dict of the form {ngram:",
"random import textwrap class Heresy(Exception): \"\"\"You have defiled the word of God!\"\"\" pass",
"byte, bit = byte >> 1, byte % 2 yield bit def generate_ngram_dict(filename,",
"in self.god_grams.items() if key[0][0].isupper()] def praise(self, unholy_bytes): \"\"\"Encode unholy bytes or unholy unicode",
"holy words into unholy bytes.\"\"\" try: holy_tuple = tuple(holy_words[:self.tuple_length]) except: raise Heresy(\"You mock",
"= tuple(holy_words[:self.tuple_length]) except: raise Heresy(\"You mock the word of God!\") unholy_bytes = b''",
"holy_words[self.tuple_length:]: try: holy_ngram_list = self.god_grams[holy_tuple] except: raise Heresy(\"Thou shalt not modify the word",
"in a file\"\"\" for line in file_pointer: for word in line.split(): yield word",
"try: holy_annotated_sentences = split_annotated_speech[1:-1] except: raise Heresy(\"The word of God will not be",
"holy_annotated_sentences]).split() except: raise Heresy(\"How dare you imitate the word of God!\") return self.reveal_from_words(holy_words)",
"holy_words = self.god_grams[holy_tuple] # Select from even indices if bit == 0, odd",
"if self.compress: unholy_bytes = gzip.decompress(unholy_bytes) return unholy_bytes def reveal(self, annotated_speech): \"\"\"Decode holy speech",
"# Make sure that we have some words to choose from while len(holy_words)",
"+= bytes([unholy_num]) unholy_num = 0 bit_counter = 0 if self.compress: unholy_bytes = gzip.decompress(unholy_bytes)",
"Make sure that we have some words to choose from while len(holy_words) <=",
"word in line.split(): yield word ngrams = defaultdict(lambda: set()) with open(filename, 'r') as",
"+ [word] return {key: tuple(val) for key, val in ngrams.items()} class GodZip(object): \"\"\"Turn",
"key, value in self.god_grams.items() if key[0][0].isupper()] def praise(self, unholy_bytes): \"\"\"Encode unholy bytes or",
"fp: word_list = [] for word in file_words(fp): if len(word_list) < tuple_length: word_list.append(word)",
"words!\"\"\" hallelujah = \"Sayeth the Lord:\\n\\n\" amen = \"\\n\\nAmen.\" def __init__(self, tuple_length=3, line_width=70,",
"except: raise Heresy(\"How dare you imitate the word of God!\") return self.reveal_from_words(holy_words) def",
"\"\"\"Turn unholy bits into holy words!\"\"\" hallelujah = \"Sayeth the Lord:\\n\\n\" amen =",
"of God will not be silenced!\") # Remove line annotations try: holy_words =",
"value :param filename: Filename to read from. :param tuple_length: The length of the",
"holy_phrase in enumerate(holy_sentences) ] ) return self.hallelujah + annotated_speech_of_god + self.amen def reveal_from_words(self,",
"shall be changed!\") unholy_num |= unholy_bit << bit_counter bit_counter += 1 if bit_counter",
"0 if self.compress: unholy_bytes = gzip.decompress(unholy_bytes) return unholy_bytes def reveal(self, annotated_speech): \"\"\"Decode holy",
"= ' '.join([sentence.split('] ')[1] for sentence in holy_annotated_sentences]).split() except: raise Heresy(\"How dare you",
"word_list = [] for word in file_words(fp): if len(word_list) < tuple_length: word_list.append(word) continue",
"in the face of the Lord!\") if not isinstance(unholy_bytes, bytes): unholy_bytes = unholy_bytes.encode()",
"= gzip.compress(unholy_bytes) # Start with a capitalized tuple speech_of_god = list(random.choice(self.capital_tuples)) for bit",
"= tuple(speech_of_god[-self.tuple_length:]) holy_words = self.god_grams[holy_tuple] # Make sure that we have some words",
"= self.god_grams[holy_tuple] # Make sure that we have some words to choose from",
"split_annotated_speech = annotated_speech.split('\\n\\n') # Check for hallelujah and amen if split_annotated_speech[0] != self.hallelujah.strip()",
"1: continue try: unholy_bit = holy_ngram_list.index(holy_word) % 2 except: raise Heresy(\"Not one word",
"byte % 2 yield bit def generate_ngram_dict(filename, tuple_length): \"\"\"Generate a dict with ngrams",
"The length of the ngram keys :return: Dict of the form {ngram: [next_words],",
"% 8 == 0: unholy_bytes += bytes([unholy_num]) unholy_num = 0 bit_counter = 0",
"of bits from a byte stream\"\"\" for byte in byte_string: for bit_num in",
"as key following words as value :param filename: Filename to read from. :param",
"file_words(file_pointer): \"\"\"Generator for words in a file\"\"\" for line in file_pointer: for word",
"of God!\") unholy_bytes = b'' unholy_num = 0 bit_counter = 0 for holy_word",
"defaultdict import gzip import os import random import textwrap class Heresy(Exception): \"\"\"You have",
"text\"\"\" if not unholy_bytes: raise Heresy(\"Thou shalt not be silent in the face",
"0 bit_counter = 0 for holy_word in holy_words[self.tuple_length:]: try: holy_ngram_list = self.god_grams[holy_tuple] except:",
"dare you imitate the word of God!\") return self.reveal_from_words(holy_words) def hex_expand(byte_str): return ':'.join('{:02x}'.format(byte)",
"following words as value :param filename: Filename to read from. :param tuple_length: The",
"Heresy(\"You mock the word of God!\") unholy_bytes = b'' unholy_num = 0 bit_counter",
"\"\"\"Generate a dict with ngrams as key following words as value :param filename:",
"the word of God!\") return self.reveal_from_words(holy_words) def hex_expand(byte_str): return ':'.join('{:02x}'.format(byte) for byte in",
"length of the ngram keys :return: Dict of the form {ngram: [next_words], ...",
"= b'' unholy_num = 0 bit_counter = 0 for holy_word in holy_words[self.tuple_length:]: try:",
"= defaultdict(lambda: set()) with open(filename, 'r') as fp: word_list = [] for word",
"for holy_word in holy_words[self.tuple_length:]: try: holy_ngram_list = self.god_grams[holy_tuple] except: raise Heresy(\"Thou shalt not",
"Heresy(\"Thou shalt not modify the word of God!\") holy_tuple = tuple(holy_tuple[1:] + (holy_word,))",
"unholy_bytes): \"\"\"Encode unholy bytes or unholy unicode into Holy text\"\"\" if not unholy_bytes:",
"value in self.god_grams.items() if key[0][0].isupper()] def praise(self, unholy_bytes): \"\"\"Encode unholy bytes or unholy",
"God will not be silenced!\") # Remove line annotations try: holy_words = '",
"in range(8): # Extract bit from byte byte, bit = byte >> 1,",
"\"\"\" def file_words(file_pointer): \"\"\"Generator for words in a file\"\"\" for line in file_pointer:",
"silenced!\") # Remove line annotations try: holy_words = ' '.join([sentence.split('] ')[1] for sentence",
"Lord:\\n\\n\" amen = \"\\n\\nAmen.\" def __init__(self, tuple_length=3, line_width=70, compress=True): self.compress = compress self.line_width",
"bits(byte_string): \"\"\"Generates a sequence of bits from a byte stream\"\"\" for byte in",
"\"\"\"Encode unholy bytes or unholy unicode into Holy text\"\"\" if not unholy_bytes: raise",
"def file_words(file_pointer): \"\"\"Generator for words in a file\"\"\" for line in file_pointer: for",
"sure that we have some words to choose from while len(holy_words) <= 1:",
"God shall be changed!\") unholy_num |= unholy_bit << bit_counter bit_counter += 1 if",
":param filename: Filename to read from. :param tuple_length: The length of the ngram",
"try: holy_tuple = tuple(holy_words[:self.tuple_length]) except: raise Heresy(\"You mock the word of God!\") unholy_bytes",
"= split_annotated_speech[1:-1] except: raise Heresy(\"The word of God will not be silenced!\") #",
"mock the word of God!\") unholy_bytes = b'' unholy_num = 0 bit_counter =",
"word of God will not be silenced!\") # Remove line annotations try: holy_words",
"unholy_bytes = gzip.compress(unholy_bytes) # Start with a capitalized tuple speech_of_god = list(random.choice(self.capital_tuples)) for",
"tuple(speech_of_god[-self.tuple_length:]) holy_words = self.god_grams[holy_tuple] # Make sure that we have some words to",
"byte_string: for bit_num in range(8): # Extract bit from byte byte, bit =",
"in holy_words[self.tuple_length:]: try: holy_ngram_list = self.god_grams[holy_tuple] except: raise Heresy(\"Thou shalt not modify the",
"in bits(unholy_bytes): holy_tuple = tuple(speech_of_god[-self.tuple_length:]) holy_words = self.god_grams[holy_tuple] # Make sure that we",
"defaultdict(lambda: set()) with open(filename, 'r') as fp: word_list = [] for word in",
"self.hallelujah + annotated_speech_of_god + self.amen def reveal_from_words(self, holy_words): \"\"\"Decode a list of holy",
"GodZip(compress=False) hello_world = \"Hello world!\" print(\"I praise unto God: %s\\n\\n\" % hello_world) holy_hello_world",
"= 0 for holy_word in holy_words[self.tuple_length:]: try: holy_ngram_list = self.god_grams[holy_tuple] except: raise Heresy(\"Thou",
"hex_expand(byte_str): return ':'.join('{:02x}'.format(byte) for byte in byte_str) if __name__ == '__main__': god =",
"form {ngram: [next_words], ... } \"\"\" def file_words(file_pointer): \"\"\"Generator for words in a",
"= random.choice(holy_words[bit::2]) speech_of_god.append(chosen_word) holy_sentences = ' '.join(speech_of_god).split('. ') annotated_speech_of_god = '.\\n\\n'.join( [ '\\n'.join(textwrap.wrap(\"[{}]",
"byte stream\"\"\" for byte in byte_string: for bit_num in range(8): # Extract bit",
"[key for key, value in self.god_grams.items() if key[0][0].isupper()] def praise(self, unholy_bytes): \"\"\"Encode unholy",
"imitate the word of God!\") return self.reveal_from_words(holy_words) def hex_expand(byte_str): return ':'.join('{:02x}'.format(byte) for byte",
"that we have some words to choose from while len(holy_words) <= 1: chosen_word",
"holy speech into bytes\"\"\" split_annotated_speech = annotated_speech.split('\\n\\n') # Check for hallelujah and amen",
"raise Heresy(\"You mock the word of God!\") unholy_bytes = b'' unholy_num = 0",
"a dict with ngrams as key following words as value :param filename: Filename",
"< tuple_length: word_list.append(word) continue ngrams[tuple(word_list)].add(word) word_list = word_list[1:] + [word] return {key: tuple(val)",
"changed!\") unholy_num |= unholy_bit << bit_counter bit_counter += 1 if bit_counter % 8",
"or split_annotated_speech[-1] != self.amen.strip(): raise Heresy(\"Your praise is insufficient!\") # Remove hallelujah and",
"a sequence of bits from a byte stream\"\"\" for byte in byte_string: for",
"raise Heresy(\"How dare you imitate the word of God!\") return self.reveal_from_words(holy_words) def hex_expand(byte_str):",
"speech_of_god.append(chosen_word) holy_sentences = ' '.join(speech_of_god).split('. ') annotated_speech_of_god = '.\\n\\n'.join( [ '\\n'.join(textwrap.wrap(\"[{}] \".format(idx +",
"bytes\"\"\" split_annotated_speech = annotated_speech.split('\\n\\n') # Check for hallelujah and amen if split_annotated_speech[0] !=",
"ngrams[tuple(word_list)].add(word) word_list = word_list[1:] + [word] return {key: tuple(val) for key, val in",
"Select from even indices if bit == 0, odd if bit == 1",
"of the Lord!\") if not isinstance(unholy_bytes, bytes): unholy_bytes = unholy_bytes.encode() if self.compress: unholy_bytes",
"return unholy_bytes def reveal(self, annotated_speech): \"\"\"Decode holy speech into bytes\"\"\" split_annotated_speech = annotated_speech.split('\\n\\n')",
"of the ngram keys :return: Dict of the form {ngram: [next_words], ... }",
"speech into bytes\"\"\" split_annotated_speech = annotated_speech.split('\\n\\n') # Check for hallelujah and amen if",
"self.reveal_from_words(holy_words) def hex_expand(byte_str): return ':'.join('{:02x}'.format(byte) for byte in byte_str) if __name__ == '__main__':",
"= \"Sayeth the Lord:\\n\\n\" amen = \"\\n\\nAmen.\" def __init__(self, tuple_length=3, line_width=70, compress=True): self.compress",
"try: holy_words = ' '.join([sentence.split('] ')[1] for sentence in holy_annotated_sentences]).split() except: raise Heresy(\"How",
"holy_words): \"\"\"Decode a list of holy words into unholy bytes.\"\"\" try: holy_tuple =",
"'\\n'.join(textwrap.wrap(\"[{}] \".format(idx + 1) + holy_phrase, width=self.line_width)) for idx, holy_phrase in enumerate(holy_sentences) ]",
"annotated_speech): \"\"\"Decode holy speech into bytes\"\"\" split_annotated_speech = annotated_speech.split('\\n\\n') # Check for hallelujah",
"tuple(val) for key, val in ngrams.items()} class GodZip(object): \"\"\"Turn unholy bits into holy",
"annotated_speech.split('\\n\\n') # Check for hallelujah and amen if split_annotated_speech[0] != self.hallelujah.strip() \\ or",
"word of God!\"\"\" pass def bits(byte_string): \"\"\"Generates a sequence of bits from a",
"+= 1 if bit_counter % 8 == 0: unholy_bytes += bytes([unholy_num]) unholy_num =",
"!= self.amen.strip(): raise Heresy(\"Your praise is insufficient!\") # Remove hallelujah and amen try:",
"from collections import defaultdict import gzip import os import random import textwrap class",
"self.god_grams = generate_ngram_dict(data_path, tuple_length) self.capital_tuples = [key for key, value in self.god_grams.items() if",
"capitalized tuple speech_of_god = list(random.choice(self.capital_tuples)) for bit in bits(unholy_bytes): holy_tuple = tuple(speech_of_god[-self.tuple_length:]) holy_words",
"split_annotated_speech[0] != self.hallelujah.strip() \\ or split_annotated_speech[-1] != self.amen.strip(): raise Heresy(\"Your praise is insufficient!\")",
"split_annotated_speech[-1] != self.amen.strip(): raise Heresy(\"Your praise is insufficient!\") # Remove hallelujah and amen",
"= holy_words[0] speech_of_god.append(chosen_word) holy_tuple = tuple(speech_of_god[-self.tuple_length:]) holy_words = self.god_grams[holy_tuple] # Select from even",
"self.compress = compress self.line_width = line_width self.tuple_length = tuple_length data_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data',",
"bytes or unholy unicode into Holy text\"\"\" if not unholy_bytes: raise Heresy(\"Thou shalt",
"odd if bit == 1 chosen_word = random.choice(holy_words[bit::2]) speech_of_god.append(chosen_word) holy_sentences = ' '.join(speech_of_god).split('.",
"with a capitalized tuple speech_of_god = list(random.choice(self.capital_tuples)) for bit in bits(unholy_bytes): holy_tuple =",
"Check for hallelujah and amen if split_annotated_speech[0] != self.hallelujah.strip() \\ or split_annotated_speech[-1] !=",
"self.tuple_length = tuple_length data_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data', 'bible-kjv.raw.txt') self.god_grams = generate_ngram_dict(data_path, tuple_length) self.capital_tuples",
"# Remove line annotations try: holy_words = ' '.join([sentence.split('] ')[1] for sentence in",
"God!\"\"\" pass def bits(byte_string): \"\"\"Generates a sequence of bits from a byte stream\"\"\"",
"as value :param filename: Filename to read from. :param tuple_length: The length of",
"byte_str) if __name__ == '__main__': god = GodZip(compress=False) hello_world = \"Hello world!\" print(\"I",
"for line in file_pointer: for word in line.split(): yield word ngrams = defaultdict(lambda:",
"of God!\"\"\" pass def bits(byte_string): \"\"\"Generates a sequence of bits from a byte",
"be silent in the face of the Lord!\") if not isinstance(unholy_bytes, bytes): unholy_bytes",
"with open(filename, 'r') as fp: word_list = [] for word in file_words(fp): if",
"word_list.append(word) continue ngrams[tuple(word_list)].add(word) word_list = word_list[1:] + [word] return {key: tuple(val) for key,",
"while len(holy_words) <= 1: chosen_word = holy_words[0] speech_of_god.append(chosen_word) holy_tuple = tuple(speech_of_god[-self.tuple_length:]) holy_words =",
"return ':'.join('{:02x}'.format(byte) for byte in byte_str) if __name__ == '__main__': god = GodZip(compress=False)",
"for key, value in self.god_grams.items() if key[0][0].isupper()] def praise(self, unholy_bytes): \"\"\"Encode unholy bytes",
"words into unholy bytes.\"\"\" try: holy_tuple = tuple(holy_words[:self.tuple_length]) except: raise Heresy(\"You mock the",
"return {key: tuple(val) for key, val in ngrams.items()} class GodZip(object): \"\"\"Turn unholy bits",
"ngrams.items()} class GodZip(object): \"\"\"Turn unholy bits into holy words!\"\"\" hallelujah = \"Sayeth the",
"yield bit def generate_ngram_dict(filename, tuple_length): \"\"\"Generate a dict with ngrams as key following",
"bit_counter % 8 == 0: unholy_bytes += bytes([unholy_num]) unholy_num = 0 bit_counter =",
"in enumerate(holy_sentences) ] ) return self.hallelujah + annotated_speech_of_god + self.amen def reveal_from_words(self, holy_words):",
"not be silenced!\") # Remove line annotations try: holy_words = ' '.join([sentence.split('] ')[1]",
"class Heresy(Exception): \"\"\"You have defiled the word of God!\"\"\" pass def bits(byte_string): \"\"\"Generates",
"holy_tuple = tuple(holy_words[:self.tuple_length]) except: raise Heresy(\"You mock the word of God!\") unholy_bytes =",
"bits(unholy_bytes): holy_tuple = tuple(speech_of_god[-self.tuple_length:]) holy_words = self.god_grams[holy_tuple] # Make sure that we have",
"for word in line.split(): yield word ngrams = defaultdict(lambda: set()) with open(filename, 'r')",
"raise Heresy(\"Thou shalt not modify the word of God!\") holy_tuple = tuple(holy_tuple[1:] +",
"in file_words(fp): if len(word_list) < tuple_length: word_list.append(word) continue ngrams[tuple(word_list)].add(word) word_list = word_list[1:] +",
"if self.compress: unholy_bytes = gzip.compress(unholy_bytes) # Start with a capitalized tuple speech_of_god =",
"} \"\"\" def file_words(file_pointer): \"\"\"Generator for words in a file\"\"\" for line in",
"words as value :param filename: Filename to read from. :param tuple_length: The length",
"holy_phrase, width=self.line_width)) for idx, holy_phrase in enumerate(holy_sentences) ] ) return self.hallelujah + annotated_speech_of_god",
"bit_counter += 1 if bit_counter % 8 == 0: unholy_bytes += bytes([unholy_num]) unholy_num",
"def reveal_from_words(self, holy_words): \"\"\"Decode a list of holy words into unholy bytes.\"\"\" try:",
"not be silent in the face of the Lord!\") if not isinstance(unholy_bytes, bytes):",
"if bit == 0, odd if bit == 1 chosen_word = random.choice(holy_words[bit::2]) speech_of_god.append(chosen_word)",
"key[0][0].isupper()] def praise(self, unholy_bytes): \"\"\"Encode unholy bytes or unholy unicode into Holy text\"\"\"",
"unholy_num |= unholy_bit << bit_counter bit_counter += 1 if bit_counter % 8 ==",
"== 1 chosen_word = random.choice(holy_words[bit::2]) speech_of_god.append(chosen_word) holy_sentences = ' '.join(speech_of_god).split('. ') annotated_speech_of_god =",
"God!\") unholy_bytes = b'' unholy_num = 0 bit_counter = 0 for holy_word in",
"'r') as fp: word_list = [] for word in file_words(fp): if len(word_list) <",
"key, val in ngrams.items()} class GodZip(object): \"\"\"Turn unholy bits into holy words!\"\"\" hallelujah",
"unholy_bytes += bytes([unholy_num]) unholy_num = 0 bit_counter = 0 if self.compress: unholy_bytes =",
"generate_ngram_dict(data_path, tuple_length) self.capital_tuples = [key for key, value in self.god_grams.items() if key[0][0].isupper()] def",
"+ holy_phrase, width=self.line_width)) for idx, holy_phrase in enumerate(holy_sentences) ] ) return self.hallelujah +",
"+ (holy_word,)) if len(holy_ngram_list) <= 1: continue try: unholy_bit = holy_ngram_list.index(holy_word) % 2",
"bit == 0, odd if bit == 1 chosen_word = random.choice(holy_words[bit::2]) speech_of_god.append(chosen_word) holy_sentences",
"try: holy_ngram_list = self.god_grams[holy_tuple] except: raise Heresy(\"Thou shalt not modify the word of",
">> 1, byte % 2 yield bit def generate_ngram_dict(filename, tuple_length): \"\"\"Generate a dict",
"import random import textwrap class Heresy(Exception): \"\"\"You have defiled the word of God!\"\"\"",
"file\"\"\" for line in file_pointer: for word in line.split(): yield word ngrams =",
"for byte in byte_string: for bit_num in range(8): # Extract bit from byte",
"holy_annotated_sentences = split_annotated_speech[1:-1] except: raise Heresy(\"The word of God will not be silenced!\")",
"for hallelujah and amen if split_annotated_speech[0] != self.hallelujah.strip() \\ or split_annotated_speech[-1] != self.amen.strip():",
"1 if bit_counter % 8 == 0: unholy_bytes += bytes([unholy_num]) unholy_num = 0",
"self.god_grams[holy_tuple] # Select from even indices if bit == 0, odd if bit",
"unholy bytes.\"\"\" try: holy_tuple = tuple(holy_words[:self.tuple_length]) except: raise Heresy(\"You mock the word of",
"isinstance(unholy_bytes, bytes): unholy_bytes = unholy_bytes.encode() if self.compress: unholy_bytes = gzip.compress(unholy_bytes) # Start with",
"will not be silenced!\") # Remove line annotations try: holy_words = ' '.join([sentence.split(']",
"== 0: unholy_bytes += bytes([unholy_num]) unholy_num = 0 bit_counter = 0 if self.compress:",
"God!\") holy_tuple = tuple(holy_tuple[1:] + (holy_word,)) if len(holy_ngram_list) <= 1: continue try: unholy_bit",
"= word_list[1:] + [word] return {key: tuple(val) for key, val in ngrams.items()} class",
"import defaultdict import gzip import os import random import textwrap class Heresy(Exception): \"\"\"You",
"hello_world = \"Hello world!\" print(\"I praise unto God: %s\\n\\n\" % hello_world) holy_hello_world =",
"into Holy text\"\"\" if not unholy_bytes: raise Heresy(\"Thou shalt not be silent in",
"modify the word of God!\") holy_tuple = tuple(holy_tuple[1:] + (holy_word,)) if len(holy_ngram_list) <=",
"a list of holy words into unholy bytes.\"\"\" try: holy_tuple = tuple(holy_words[:self.tuple_length]) except:",
"pass def bits(byte_string): \"\"\"Generates a sequence of bits from a byte stream\"\"\" for",
"if key[0][0].isupper()] def praise(self, unholy_bytes): \"\"\"Encode unholy bytes or unholy unicode into Holy",
"enumerate(holy_sentences) ] ) return self.hallelujah + annotated_speech_of_god + self.amen def reveal_from_words(self, holy_words): \"\"\"Decode",
"face of the Lord!\") if not isinstance(unholy_bytes, bytes): unholy_bytes = unholy_bytes.encode() if self.compress:",
"praise unto God: %s\\n\\n\" % hello_world) holy_hello_world = god.praise(hello_world) print(holy_hello_world) assert(hello_world == god.reveal(holy_hello_world).decode())",
"byte in byte_string: for bit_num in range(8): # Extract bit from byte byte,",
"= generate_ngram_dict(data_path, tuple_length) self.capital_tuples = [key for key, value in self.god_grams.items() if key[0][0].isupper()]",
"2 except: raise Heresy(\"Not one word of God shall be changed!\") unholy_num |=",
"line.split(): yield word ngrams = defaultdict(lambda: set()) with open(filename, 'r') as fp: word_list",
"if not isinstance(unholy_bytes, bytes): unholy_bytes = unholy_bytes.encode() if self.compress: unholy_bytes = gzip.compress(unholy_bytes) #",
"byte byte, bit = byte >> 1, byte % 2 yield bit def",
"bit def generate_ngram_dict(filename, tuple_length): \"\"\"Generate a dict with ngrams as key following words",
"self.hallelujah.strip() \\ or split_annotated_speech[-1] != self.amen.strip(): raise Heresy(\"Your praise is insufficient!\") # Remove",
"is insufficient!\") # Remove hallelujah and amen try: holy_annotated_sentences = split_annotated_speech[1:-1] except: raise",
"holy_tuple = tuple(speech_of_god[-self.tuple_length:]) holy_words = self.god_grams[holy_tuple] # Select from even indices if bit",
"8 == 0: unholy_bytes += bytes([unholy_num]) unholy_num = 0 bit_counter = 0 if",
"+ self.amen def reveal_from_words(self, holy_words): \"\"\"Decode a list of holy words into unholy",
"= tuple(holy_tuple[1:] + (holy_word,)) if len(holy_ngram_list) <= 1: continue try: unholy_bit = holy_ngram_list.index(holy_word)",
"compress self.line_width = line_width self.tuple_length = tuple_length data_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data', 'bible-kjv.raw.txt') self.god_grams",
"read from. :param tuple_length: The length of the ngram keys :return: Dict of",
"= self.god_grams[holy_tuple] except: raise Heresy(\"Thou shalt not modify the word of God!\") holy_tuple",
"the form {ngram: [next_words], ... } \"\"\" def file_words(file_pointer): \"\"\"Generator for words in",
"into holy words!\"\"\" hallelujah = \"Sayeth the Lord:\\n\\n\" amen = \"\\n\\nAmen.\" def __init__(self,",
"\"\"\"Decode holy speech into bytes\"\"\" split_annotated_speech = annotated_speech.split('\\n\\n') # Check for hallelujah and",
"bytes.\"\"\" try: holy_tuple = tuple(holy_words[:self.tuple_length]) except: raise Heresy(\"You mock the word of God!\")",
":param tuple_length: The length of the ngram keys :return: Dict of the form",
"val in ngrams.items()} class GodZip(object): \"\"\"Turn unholy bits into holy words!\"\"\" hallelujah =",
"annotated_speech_of_god + self.amen def reveal_from_words(self, holy_words): \"\"\"Decode a list of holy words into",
"have defiled the word of God!\"\"\" pass def bits(byte_string): \"\"\"Generates a sequence of",
"hallelujah = \"Sayeth the Lord:\\n\\n\" amen = \"\\n\\nAmen.\" def __init__(self, tuple_length=3, line_width=70, compress=True):",
"except: raise Heresy(\"You mock the word of God!\") unholy_bytes = b'' unholy_num =",
"'.join([sentence.split('] ')[1] for sentence in holy_annotated_sentences]).split() except: raise Heresy(\"How dare you imitate the",
"__init__(self, tuple_length=3, line_width=70, compress=True): self.compress = compress self.line_width = line_width self.tuple_length = tuple_length",
"== 0, odd if bit == 1 chosen_word = random.choice(holy_words[bit::2]) speech_of_god.append(chosen_word) holy_sentences =",
"split_annotated_speech[1:-1] except: raise Heresy(\"The word of God will not be silenced!\") # Remove",
"bit_counter = 0 for holy_word in holy_words[self.tuple_length:]: try: holy_ngram_list = self.god_grams[holy_tuple] except: raise",
"hallelujah and amen try: holy_annotated_sentences = split_annotated_speech[1:-1] except: raise Heresy(\"The word of God",
"of the form {ngram: [next_words], ... } \"\"\" def file_words(file_pointer): \"\"\"Generator for words",
"ngrams = defaultdict(lambda: set()) with open(filename, 'r') as fp: word_list = [] for",
"from byte byte, bit = byte >> 1, byte % 2 yield bit",
"bits from a byte stream\"\"\" for byte in byte_string: for bit_num in range(8):",
"file_pointer: for word in line.split(): yield word ngrams = defaultdict(lambda: set()) with open(filename,",
"= gzip.decompress(unholy_bytes) return unholy_bytes def reveal(self, annotated_speech): \"\"\"Decode holy speech into bytes\"\"\" split_annotated_speech",
"bits into holy words!\"\"\" hallelujah = \"Sayeth the Lord:\\n\\n\" amen = \"\\n\\nAmen.\" def",
"of God!\") return self.reveal_from_words(holy_words) def hex_expand(byte_str): return ':'.join('{:02x}'.format(byte) for byte in byte_str) if",
"len(holy_ngram_list) <= 1: continue try: unholy_bit = holy_ngram_list.index(holy_word) % 2 except: raise Heresy(\"Not",
"for word in file_words(fp): if len(word_list) < tuple_length: word_list.append(word) continue ngrams[tuple(word_list)].add(word) word_list =",
"!= self.hallelujah.strip() \\ or split_annotated_speech[-1] != self.amen.strip(): raise Heresy(\"Your praise is insufficient!\") #",
"if bit_counter % 8 == 0: unholy_bytes += bytes([unholy_num]) unholy_num = 0 bit_counter",
"praise is insufficient!\") # Remove hallelujah and amen try: holy_annotated_sentences = split_annotated_speech[1:-1] except:",
"<= 1: chosen_word = holy_words[0] speech_of_god.append(chosen_word) holy_tuple = tuple(speech_of_god[-self.tuple_length:]) holy_words = self.god_grams[holy_tuple] #",
"collections import defaultdict import gzip import os import random import textwrap class Heresy(Exception):",
"range(8): # Extract bit from byte byte, bit = byte >> 1, byte",
"yield word ngrams = defaultdict(lambda: set()) with open(filename, 'r') as fp: word_list =",
"in ngrams.items()} class GodZip(object): \"\"\"Turn unholy bits into holy words!\"\"\" hallelujah = \"Sayeth",
") return self.hallelujah + annotated_speech_of_god + self.amen def reveal_from_words(self, holy_words): \"\"\"Decode a list",
"not isinstance(unholy_bytes, bytes): unholy_bytes = unholy_bytes.encode() if self.compress: unholy_bytes = gzip.compress(unholy_bytes) # Start",
"class GodZip(object): \"\"\"Turn unholy bits into holy words!\"\"\" hallelujah = \"Sayeth the Lord:\\n\\n\"",
"be silenced!\") # Remove line annotations try: holy_words = ' '.join([sentence.split('] ')[1] for",
"insufficient!\") # Remove hallelujah and amen try: holy_annotated_sentences = split_annotated_speech[1:-1] except: raise Heresy(\"The",
"in holy_annotated_sentences]).split() except: raise Heresy(\"How dare you imitate the word of God!\") return",
"even indices if bit == 0, odd if bit == 1 chosen_word =",
"not modify the word of God!\") holy_tuple = tuple(holy_tuple[1:] + (holy_word,)) if len(holy_ngram_list)",
"one word of God shall be changed!\") unholy_num |= unholy_bit << bit_counter bit_counter",
"self.compress: unholy_bytes = gzip.decompress(unholy_bytes) return unholy_bytes def reveal(self, annotated_speech): \"\"\"Decode holy speech into",
"|= unholy_bit << bit_counter bit_counter += 1 if bit_counter % 8 == 0:",
"key following words as value :param filename: Filename to read from. :param tuple_length:",
"Dict of the form {ngram: [next_words], ... } \"\"\" def file_words(file_pointer): \"\"\"Generator for",
"from even indices if bit == 0, odd if bit == 1 chosen_word",
"tuple_length): \"\"\"Generate a dict with ngrams as key following words as value :param",
"defiled the word of God!\"\"\" pass def bits(byte_string): \"\"\"Generates a sequence of bits",
"def bits(byte_string): \"\"\"Generates a sequence of bits from a byte stream\"\"\" for byte",
"chosen_word = random.choice(holy_words[bit::2]) speech_of_god.append(chosen_word) holy_sentences = ' '.join(speech_of_god).split('. ') annotated_speech_of_god = '.\\n\\n'.join( [",
"1) + holy_phrase, width=self.line_width)) for idx, holy_phrase in enumerate(holy_sentences) ] ) return self.hallelujah",
"raise Heresy(\"The word of God will not be silenced!\") # Remove line annotations",
"shalt not be silent in the face of the Lord!\") if not isinstance(unholy_bytes,",
"self.capital_tuples = [key for key, value in self.god_grams.items() if key[0][0].isupper()] def praise(self, unholy_bytes):",
"word of God shall be changed!\") unholy_num |= unholy_bit << bit_counter bit_counter +=",
"and amen if split_annotated_speech[0] != self.hallelujah.strip() \\ or split_annotated_speech[-1] != self.amen.strip(): raise Heresy(\"Your",
"praise(self, unholy_bytes): \"\"\"Encode unholy bytes or unholy unicode into Holy text\"\"\" if not",
"holy words!\"\"\" hallelujah = \"Sayeth the Lord:\\n\\n\" amen = \"\\n\\nAmen.\" def __init__(self, tuple_length=3,",
"for key, val in ngrams.items()} class GodZip(object): \"\"\"Turn unholy bits into holy words!\"\"\"",
"if not unholy_bytes: raise Heresy(\"Thou shalt not be silent in the face of",
"sequence of bits from a byte stream\"\"\" for byte in byte_string: for bit_num",
"words to choose from while len(holy_words) <= 1: chosen_word = holy_words[0] speech_of_god.append(chosen_word) holy_tuple",
"gzip.compress(unholy_bytes) # Start with a capitalized tuple speech_of_god = list(random.choice(self.capital_tuples)) for bit in",
"the word of God!\"\"\" pass def bits(byte_string): \"\"\"Generates a sequence of bits from",
"and amen try: holy_annotated_sentences = split_annotated_speech[1:-1] except: raise Heresy(\"The word of God will",
"unholy_bytes: raise Heresy(\"Thou shalt not be silent in the face of the Lord!\")",
"'.\\n\\n'.join( [ '\\n'.join(textwrap.wrap(\"[{}] \".format(idx + 1) + holy_phrase, width=self.line_width)) for idx, holy_phrase in",
"for sentence in holy_annotated_sentences]).split() except: raise Heresy(\"How dare you imitate the word of",
"{key: tuple(val) for key, val in ngrams.items()} class GodZip(object): \"\"\"Turn unholy bits into",
"with ngrams as key following words as value :param filename: Filename to read",
"self.amen def reveal_from_words(self, holy_words): \"\"\"Decode a list of holy words into unholy bytes.\"\"\"",
"Holy text\"\"\" if not unholy_bytes: raise Heresy(\"Thou shalt not be silent in the",
"tuple speech_of_god = list(random.choice(self.capital_tuples)) for bit in bits(unholy_bytes): holy_tuple = tuple(speech_of_god[-self.tuple_length:]) holy_words =",
"= os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data', 'bible-kjv.raw.txt') self.god_grams = generate_ngram_dict(data_path, tuple_length) self.capital_tuples = [key for key,",
"import os import random import textwrap class Heresy(Exception): \"\"\"You have defiled the word",
"unholy_num = 0 bit_counter = 0 for holy_word in holy_words[self.tuple_length:]: try: holy_ngram_list =",
"gzip import os import random import textwrap class Heresy(Exception): \"\"\"You have defiled the",
"':'.join('{:02x}'.format(byte) for byte in byte_str) if __name__ == '__main__': god = GodZip(compress=False) hello_world",
"os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data', 'bible-kjv.raw.txt') self.god_grams = generate_ngram_dict(data_path, tuple_length) self.capital_tuples = [key for key, value",
"= holy_ngram_list.index(holy_word) % 2 except: raise Heresy(\"Not one word of God shall be",
"' '.join([sentence.split('] ')[1] for sentence in holy_annotated_sentences]).split() except: raise Heresy(\"How dare you imitate",
"\"\"\"You have defiled the word of God!\"\"\" pass def bits(byte_string): \"\"\"Generates a sequence",
"Start with a capitalized tuple speech_of_god = list(random.choice(self.capital_tuples)) for bit in bits(unholy_bytes): holy_tuple",
"<< bit_counter bit_counter += 1 if bit_counter % 8 == 0: unholy_bytes +=",
"import gzip import os import random import textwrap class Heresy(Exception): \"\"\"You have defiled",
"except: raise Heresy(\"Not one word of God shall be changed!\") unholy_num |= unholy_bit",
"holy_sentences = ' '.join(speech_of_god).split('. ') annotated_speech_of_god = '.\\n\\n'.join( [ '\\n'.join(textwrap.wrap(\"[{}] \".format(idx + 1)",
"\"\"\"Decode a list of holy words into unholy bytes.\"\"\" try: holy_tuple = tuple(holy_words[:self.tuple_length])",
"# Remove hallelujah and amen try: holy_annotated_sentences = split_annotated_speech[1:-1] except: raise Heresy(\"The word",
"bit from byte byte, bit = byte >> 1, byte % 2 yield",
"keys :return: Dict of the form {ngram: [next_words], ... } \"\"\" def file_words(file_pointer):",
"return self.reveal_from_words(holy_words) def hex_expand(byte_str): return ':'.join('{:02x}'.format(byte) for byte in byte_str) if __name__ ==",
"(holy_word,)) if len(holy_ngram_list) <= 1: continue try: unholy_bit = holy_ngram_list.index(holy_word) % 2 except:",
"amen try: holy_annotated_sentences = split_annotated_speech[1:-1] except: raise Heresy(\"The word of God will not",
"continue try: unholy_bit = holy_ngram_list.index(holy_word) % 2 except: raise Heresy(\"Not one word of",
"\"\"\"Generates a sequence of bits from a byte stream\"\"\" for byte in byte_string:",
"bit = byte >> 1, byte % 2 yield bit def generate_ngram_dict(filename, tuple_length):",
"def generate_ngram_dict(filename, tuple_length): \"\"\"Generate a dict with ngrams as key following words as",
"word in file_words(fp): if len(word_list) < tuple_length: word_list.append(word) continue ngrams[tuple(word_list)].add(word) word_list = word_list[1:]",
"\"Sayeth the Lord:\\n\\n\" amen = \"\\n\\nAmen.\" def __init__(self, tuple_length=3, line_width=70, compress=True): self.compress =",
"holy_words = self.god_grams[holy_tuple] # Make sure that we have some words to choose",
"from while len(holy_words) <= 1: chosen_word = holy_words[0] speech_of_god.append(chosen_word) holy_tuple = tuple(speech_of_god[-self.tuple_length:]) holy_words",
"<= 1: continue try: unholy_bit = holy_ngram_list.index(holy_word) % 2 except: raise Heresy(\"Not one",
"the ngram keys :return: Dict of the form {ngram: [next_words], ... } \"\"\"",
"list(random.choice(self.capital_tuples)) for bit in bits(unholy_bytes): holy_tuple = tuple(speech_of_god[-self.tuple_length:]) holy_words = self.god_grams[holy_tuple] # Make",
"= self.god_grams[holy_tuple] # Select from even indices if bit == 0, odd if",
"in byte_str) if __name__ == '__main__': god = GodZip(compress=False) hello_world = \"Hello world!\"",
"\"\"\"Generator for words in a file\"\"\" for line in file_pointer: for word in",
"Heresy(\"Your praise is insufficient!\") # Remove hallelujah and amen try: holy_annotated_sentences = split_annotated_speech[1:-1]",
"width=self.line_width)) for idx, holy_phrase in enumerate(holy_sentences) ] ) return self.hallelujah + annotated_speech_of_god +",
"1: chosen_word = holy_words[0] speech_of_god.append(chosen_word) holy_tuple = tuple(speech_of_god[-self.tuple_length:]) holy_words = self.god_grams[holy_tuple] # Select",
"as fp: word_list = [] for word in file_words(fp): if len(word_list) < tuple_length:",
"'bible-kjv.raw.txt') self.god_grams = generate_ngram_dict(data_path, tuple_length) self.capital_tuples = [key for key, value in self.god_grams.items()",
"be changed!\") unholy_num |= unholy_bit << bit_counter bit_counter += 1 if bit_counter %",
"of God!\") holy_tuple = tuple(holy_tuple[1:] + (holy_word,)) if len(holy_ngram_list) <= 1: continue try:",
"0 bit_counter = 0 if self.compress: unholy_bytes = gzip.decompress(unholy_bytes) return unholy_bytes def reveal(self,",
"2 yield bit def generate_ngram_dict(filename, tuple_length): \"\"\"Generate a dict with ngrams as key",
"the Lord:\\n\\n\" amen = \"\\n\\nAmen.\" def __init__(self, tuple_length=3, line_width=70, compress=True): self.compress = compress",
"Remove hallelujah and amen try: holy_annotated_sentences = split_annotated_speech[1:-1] except: raise Heresy(\"The word of",
"unholy_bytes def reveal(self, annotated_speech): \"\"\"Decode holy speech into bytes\"\"\" split_annotated_speech = annotated_speech.split('\\n\\n') #",
"stream\"\"\" for byte in byte_string: for bit_num in range(8): # Extract bit from",
"len(word_list) < tuple_length: word_list.append(word) continue ngrams[tuple(word_list)].add(word) word_list = word_list[1:] + [word] return {key:",
"[word] return {key: tuple(val) for key, val in ngrams.items()} class GodZip(object): \"\"\"Turn unholy",
"continue ngrams[tuple(word_list)].add(word) word_list = word_list[1:] + [word] return {key: tuple(val) for key, val",
"from a byte stream\"\"\" for byte in byte_string: for bit_num in range(8): #",
"raise Heresy(\"Thou shalt not be silent in the face of the Lord!\") if",
"0, odd if bit == 1 chosen_word = random.choice(holy_words[bit::2]) speech_of_god.append(chosen_word) holy_sentences = '",
"# Select from even indices if bit == 0, odd if bit ==",
"def reveal(self, annotated_speech): \"\"\"Decode holy speech into bytes\"\"\" split_annotated_speech = annotated_speech.split('\\n\\n') # Check",
"byte in byte_str) if __name__ == '__main__': god = GodZip(compress=False) hello_world = \"Hello",
"a file\"\"\" for line in file_pointer: for word in line.split(): yield word ngrams",
"except: raise Heresy(\"The word of God will not be silenced!\") # Remove line",
"compress=True): self.compress = compress self.line_width = line_width self.tuple_length = tuple_length data_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),",
"unholy_bit << bit_counter bit_counter += 1 if bit_counter % 8 == 0: unholy_bytes",
"# Start with a capitalized tuple speech_of_god = list(random.choice(self.capital_tuples)) for bit in bits(unholy_bytes):",
"reveal_from_words(self, holy_words): \"\"\"Decode a list of holy words into unholy bytes.\"\"\" try: holy_tuple",
"print(\"I praise unto God: %s\\n\\n\" % hello_world) holy_hello_world = god.praise(hello_world) print(holy_hello_world) assert(hello_world ==",
"from. :param tuple_length: The length of the ngram keys :return: Dict of the",
"[] for word in file_words(fp): if len(word_list) < tuple_length: word_list.append(word) continue ngrams[tuple(word_list)].add(word) word_list",
"b'' unholy_num = 0 bit_counter = 0 for holy_word in holy_words[self.tuple_length:]: try: holy_ngram_list",
"len(holy_words) <= 1: chosen_word = holy_words[0] speech_of_god.append(chosen_word) holy_tuple = tuple(speech_of_god[-self.tuple_length:]) holy_words = self.god_grams[holy_tuple]",
"unholy_bytes = gzip.decompress(unholy_bytes) return unholy_bytes def reveal(self, annotated_speech): \"\"\"Decode holy speech into bytes\"\"\"",
"if bit == 1 chosen_word = random.choice(holy_words[bit::2]) speech_of_god.append(chosen_word) holy_sentences = ' '.join(speech_of_god).split('. ')",
"Extract bit from byte byte, bit = byte >> 1, byte % 2",
"amen = \"\\n\\nAmen.\" def __init__(self, tuple_length=3, line_width=70, compress=True): self.compress = compress self.line_width =",
"silent in the face of the Lord!\") if not isinstance(unholy_bytes, bytes): unholy_bytes =",
"Heresy(\"How dare you imitate the word of God!\") return self.reveal_from_words(holy_words) def hex_expand(byte_str): return",
"% 2 except: raise Heresy(\"Not one word of God shall be changed!\") unholy_num",
"line annotations try: holy_words = ' '.join([sentence.split('] ')[1] for sentence in holy_annotated_sentences]).split() except:",
"= '.\\n\\n'.join( [ '\\n'.join(textwrap.wrap(\"[{}] \".format(idx + 1) + holy_phrase, width=self.line_width)) for idx, holy_phrase",
"into bytes\"\"\" split_annotated_speech = annotated_speech.split('\\n\\n') # Check for hallelujah and amen if split_annotated_speech[0]",
"= byte >> 1, byte % 2 yield bit def generate_ngram_dict(filename, tuple_length): \"\"\"Generate",
"unholy_num = 0 bit_counter = 0 if self.compress: unholy_bytes = gzip.decompress(unholy_bytes) return unholy_bytes",
"')[1] for sentence in holy_annotated_sentences]).split() except: raise Heresy(\"How dare you imitate the word",
"word of God!\") holy_tuple = tuple(holy_tuple[1:] + (holy_word,)) if len(holy_ngram_list) <= 1: continue",
"file_words(fp): if len(word_list) < tuple_length: word_list.append(word) continue ngrams[tuple(word_list)].add(word) word_list = word_list[1:] + [word]",
"self.amen.strip(): raise Heresy(\"Your praise is insufficient!\") # Remove hallelujah and amen try: holy_annotated_sentences",
"list of holy words into unholy bytes.\"\"\" try: holy_tuple = tuple(holy_words[:self.tuple_length]) except: raise",
"1, byte % 2 yield bit def generate_ngram_dict(filename, tuple_length): \"\"\"Generate a dict with",
"the word of God!\") unholy_bytes = b'' unholy_num = 0 bit_counter = 0",
"tuple(speech_of_god[-self.tuple_length:]) holy_words = self.god_grams[holy_tuple] # Select from even indices if bit == 0,",
"open(filename, 'r') as fp: word_list = [] for word in file_words(fp): if len(word_list)",
"dict with ngrams as key following words as value :param filename: Filename to",
"Heresy(\"Not one word of God shall be changed!\") unholy_num |= unholy_bit << bit_counter",
"in file_pointer: for word in line.split(): yield word ngrams = defaultdict(lambda: set()) with",
"of God shall be changed!\") unholy_num |= unholy_bit << bit_counter bit_counter += 1",
"unholy_bytes = b'' unholy_num = 0 bit_counter = 0 for holy_word in holy_words[self.tuple_length:]:",
"= [key for key, value in self.god_grams.items() if key[0][0].isupper()] def praise(self, unholy_bytes): \"\"\"Encode",
"= annotated_speech.split('\\n\\n') # Check for hallelujah and amen if split_annotated_speech[0] != self.hallelujah.strip() \\"
] |
[
"router = DefaultRouter() router.register(r'users', UserViewSet) router.register(r'curves', CurveViewSet, basename='curves') router.register(r'contents', ContentViewSet, basename='contents') router.register(r'youtube', YouTubeContentViewSet,",
"router.register(r'users', UserViewSet) router.register(r'curves', CurveViewSet, basename='curves') router.register(r'contents', ContentViewSet, basename='contents') router.register(r'youtube', YouTubeContentViewSet, basename='youtube') router.register(r'valuetypes', ValueTypeViewSet,",
"import * router = DefaultRouter() router.register(r'users', UserViewSet) router.register(r'curves', CurveViewSet, basename='curves') router.register(r'contents', ContentViewSet, basename='contents')",
"router.register(r'curves', CurveViewSet, basename='curves') router.register(r'contents', ContentViewSet, basename='contents') router.register(r'youtube', YouTubeContentViewSet, basename='youtube') router.register(r'valuetypes', ValueTypeViewSet, basename='valuetypes') router.register(r'requests',",
"from .views import * router = DefaultRouter() router.register(r'users', UserViewSet) router.register(r'curves', CurveViewSet, basename='curves') router.register(r'contents',",
"include from .views import * router = DefaultRouter() router.register(r'users', UserViewSet) router.register(r'curves', CurveViewSet, basename='curves')",
"YouTubeContentViewSet, basename='youtube') router.register(r'valuetypes', ValueTypeViewSet, basename='valuetypes') router.register(r'requests', RequestViewSet, basename='requests') urlpatterns = router.urls urlpatterns +=",
"django.conf.urls import url, include from .views import * router = DefaultRouter() router.register(r'users', UserViewSet)",
".views import * router = DefaultRouter() router.register(r'users', UserViewSet) router.register(r'curves', CurveViewSet, basename='curves') router.register(r'contents', ContentViewSet,",
"DefaultRouter from django.conf.urls import url, include from .views import * router = DefaultRouter()",
"import url, include from .views import * router = DefaultRouter() router.register(r'users', UserViewSet) router.register(r'curves',",
"router.register(r'youtube', YouTubeContentViewSet, basename='youtube') router.register(r'valuetypes', ValueTypeViewSet, basename='valuetypes') router.register(r'requests', RequestViewSet, basename='requests') urlpatterns = router.urls urlpatterns",
"* router = DefaultRouter() router.register(r'users', UserViewSet) router.register(r'curves', CurveViewSet, basename='curves') router.register(r'contents', ContentViewSet, basename='contents') router.register(r'youtube',",
"router.register(r'contents', ContentViewSet, basename='contents') router.register(r'youtube', YouTubeContentViewSet, basename='youtube') router.register(r'valuetypes', ValueTypeViewSet, basename='valuetypes') router.register(r'requests', RequestViewSet, basename='requests') urlpatterns",
"router.register(r'valuetypes', ValueTypeViewSet, basename='valuetypes') router.register(r'requests', RequestViewSet, basename='requests') urlpatterns = router.urls urlpatterns += url(r'sign_s3/$', sign_s3),",
"import DefaultRouter from django.conf.urls import url, include from .views import * router =",
"basename='curves') router.register(r'contents', ContentViewSet, basename='contents') router.register(r'youtube', YouTubeContentViewSet, basename='youtube') router.register(r'valuetypes', ValueTypeViewSet, basename='valuetypes') router.register(r'requests', RequestViewSet, basename='requests')",
"basename='youtube') router.register(r'valuetypes', ValueTypeViewSet, basename='valuetypes') router.register(r'requests', RequestViewSet, basename='requests') urlpatterns = router.urls urlpatterns += url(r'sign_s3/$',",
"from rest_framework.routers import DefaultRouter from django.conf.urls import url, include from .views import *",
"CurveViewSet, basename='curves') router.register(r'contents', ContentViewSet, basename='contents') router.register(r'youtube', YouTubeContentViewSet, basename='youtube') router.register(r'valuetypes', ValueTypeViewSet, basename='valuetypes') router.register(r'requests', RequestViewSet,",
"from django.conf.urls import url, include from .views import * router = DefaultRouter() router.register(r'users',",
"url, include from .views import * router = DefaultRouter() router.register(r'users', UserViewSet) router.register(r'curves', CurveViewSet,",
"DefaultRouter() router.register(r'users', UserViewSet) router.register(r'curves', CurveViewSet, basename='curves') router.register(r'contents', ContentViewSet, basename='contents') router.register(r'youtube', YouTubeContentViewSet, basename='youtube') router.register(r'valuetypes',",
"basename='contents') router.register(r'youtube', YouTubeContentViewSet, basename='youtube') router.register(r'valuetypes', ValueTypeViewSet, basename='valuetypes') router.register(r'requests', RequestViewSet, basename='requests') urlpatterns = router.urls",
"rest_framework.routers import DefaultRouter from django.conf.urls import url, include from .views import * router",
"<reponame>tomoya-kwansei/emonotateV2 from rest_framework.routers import DefaultRouter from django.conf.urls import url, include from .views import",
"= DefaultRouter() router.register(r'users', UserViewSet) router.register(r'curves', CurveViewSet, basename='curves') router.register(r'contents', ContentViewSet, basename='contents') router.register(r'youtube', YouTubeContentViewSet, basename='youtube')",
"ContentViewSet, basename='contents') router.register(r'youtube', YouTubeContentViewSet, basename='youtube') router.register(r'valuetypes', ValueTypeViewSet, basename='valuetypes') router.register(r'requests', RequestViewSet, basename='requests') urlpatterns =",
"UserViewSet) router.register(r'curves', CurveViewSet, basename='curves') router.register(r'contents', ContentViewSet, basename='contents') router.register(r'youtube', YouTubeContentViewSet, basename='youtube') router.register(r'valuetypes', ValueTypeViewSet, basename='valuetypes')"
] |
[
"scoped fixture mapdl.exit() assert mapdl._exited with pytest.raises(RuntimeError): mapdl.prep7() assert not os.path.isfile(mapdl._lockfile) assert 'MAPDL",
"as this uses a module scoped fixture mapdl.exit() assert mapdl._exited with pytest.raises(RuntimeError): mapdl.prep7()",
"valid MAPDL install with CORBA valid_rver = ['182', '190', '191', '192', '193', '194',",
"elif os.name == 'posix': os.environ['I_MPI_SHM_LMT'] = 'shm' # necessary on ubuntu and dmp",
"EXEC_FILE = None for rver in valid_rver: if os.path.isfile(get_ansys_bin(rver)): EXEC_FILE = get_ansys_bin(rver) if",
"# configure shared memory parallel for VM additional_switches = '' if os.name ==",
"with pytest.raises(RuntimeError): mapdl.prep7() assert not os.path.isfile(mapdl._lockfile) assert 'MAPDL exited' in str(mapdl) with pytest.raises(MapdlExitedError):",
"None skip_no_ansys = pytest.mark.skipif(not HAS_ANSYS, reason=\"Requires ANSYS installed\") modes = ['corba'] # if",
"mapdl._exited with pytest.raises(RuntimeError): mapdl.prep7() assert not os.path.isfile(mapdl._lockfile) assert 'MAPDL exited' in str(mapdl) with",
"and socket.gethostname() == 'WIN-FRDMRVG7QAB': additional_switches = '-smp' elif os.name == 'posix': os.environ['I_MPI_SHM_LMT'] =",
"'193', '194', '195', '201'] EXEC_FILE = None for rver in valid_rver: if os.path.isfile(get_ansys_bin(rver)):",
"only for linux # modes.append('console') collect_ignore = [] if not HAS_ANSYS: collect_ignore.append(\"test_post.py\") @pytest.fixture(scope=\"session\",",
"yield mapdl ### test exit ### # must be after yield as this",
"pyvista.OFF_SCREEN = True # check for a valid MAPDL install with CORBA valid_rver",
"shared memory parallel for Windows VM # configure shared memory parallel for VM",
"pyansys.launch_mapdl(EXEC_FILE, override=True, mode='corba', additional_switches=additional_switches) mapdl._show_matplotlib_figures = False # don't show matplotlib figures yield",
"console only for linux # modes.append('console') collect_ignore = [] if not HAS_ANSYS: collect_ignore.append(\"test_post.py\")",
"== 'nt' and socket.gethostname() == 'WIN-FRDMRVG7QAB': additional_switches = '-smp' elif os.name == 'posix':",
"== 'posix': # console only for linux # modes.append('console') collect_ignore = [] if",
"get_ansys_bin import pyansys from pyansys.errors import MapdlExitedError pyvista.OFF_SCREEN = True # check for",
"pytest.mark.skipif(not HAS_ANSYS, reason=\"Requires ANSYS installed\") modes = ['corba'] # if os.name == 'posix':",
"# if os.name == 'posix': # console only for linux # modes.append('console') collect_ignore",
"= False # don't show matplotlib figures yield mapdl ### test exit ###",
"not None skip_no_ansys = pytest.mark.skipif(not HAS_ANSYS, reason=\"Requires ANSYS installed\") modes = ['corba'] #",
"HAS_ANSYS = False else: HAS_ANSYS = EXEC_FILE is not None skip_no_ansys = pytest.mark.skipif(not",
"# console only for linux # modes.append('console') collect_ignore = [] if not HAS_ANSYS:",
"'192', '193', '194', '195', '201'] EXEC_FILE = None for rver in valid_rver: if",
"= '-smp' elif os.name == 'posix': os.environ['I_MPI_SHM_LMT'] = 'shm' # necessary on ubuntu",
"override=True, mode='corba', additional_switches=additional_switches) mapdl._show_matplotlib_figures = False # don't show matplotlib figures yield mapdl",
"'posix': # console only for linux # modes.append('console') collect_ignore = [] if not",
"matplotlib figures yield mapdl ### test exit ### # must be after yield",
"install with CORBA valid_rver = ['182', '190', '191', '192', '193', '194', '195', '201']",
"params=modes) def mapdl(): # launch in shared memory parallel for Windows VM #",
"and dmp mapdl = pyansys.launch_mapdl(EXEC_FILE, override=True, mode='corba', additional_switches=additional_switches) mapdl._show_matplotlib_figures = False # don't",
"get_ansys_bin(rver) if 'PYANSYS_IGNORE_ANSYS' in os.environ: HAS_ANSYS = False else: HAS_ANSYS = EXEC_FILE is",
"= True # check for a valid MAPDL install with CORBA valid_rver =",
"additional_switches = '' if os.name == 'nt' and socket.gethostname() == 'WIN-FRDMRVG7QAB': additional_switches =",
"== 'posix': os.environ['I_MPI_SHM_LMT'] = 'shm' # necessary on ubuntu and dmp mapdl =",
"on ubuntu and dmp mapdl = pyansys.launch_mapdl(EXEC_FILE, override=True, mode='corba', additional_switches=additional_switches) mapdl._show_matplotlib_figures = False",
"launch in shared memory parallel for Windows VM # configure shared memory parallel",
"additional_switches=additional_switches) mapdl._show_matplotlib_figures = False # don't show matplotlib figures yield mapdl ### test",
"'WIN-FRDMRVG7QAB': additional_switches = '-smp' elif os.name == 'posix': os.environ['I_MPI_SHM_LMT'] = 'shm' # necessary",
"os.path.isfile(get_ansys_bin(rver)): EXEC_FILE = get_ansys_bin(rver) if 'PYANSYS_IGNORE_ANSYS' in os.environ: HAS_ANSYS = False else: HAS_ANSYS",
"os.environ['I_MPI_SHM_LMT'] = 'shm' # necessary on ubuntu and dmp mapdl = pyansys.launch_mapdl(EXEC_FILE, override=True,",
"else: HAS_ANSYS = EXEC_FILE is not None skip_no_ansys = pytest.mark.skipif(not HAS_ANSYS, reason=\"Requires ANSYS",
"parallel for VM additional_switches = '' if os.name == 'nt' and socket.gethostname() ==",
"figures yield mapdl ### test exit ### # must be after yield as",
"assert mapdl._exited with pytest.raises(RuntimeError): mapdl.prep7() assert not os.path.isfile(mapdl._lockfile) assert 'MAPDL exited' in str(mapdl)",
"for rver in valid_rver: if os.path.isfile(get_ansys_bin(rver)): EXEC_FILE = get_ansys_bin(rver) if 'PYANSYS_IGNORE_ANSYS' in os.environ:",
"None for rver in valid_rver: if os.path.isfile(get_ansys_bin(rver)): EXEC_FILE = get_ansys_bin(rver) if 'PYANSYS_IGNORE_ANSYS' in",
"a valid MAPDL install with CORBA valid_rver = ['182', '190', '191', '192', '193',",
"pyansys.misc import get_ansys_bin import pyansys from pyansys.errors import MapdlExitedError pyvista.OFF_SCREEN = True #",
"import os import pytest import pyvista from pyansys.misc import get_ansys_bin import pyansys from",
"= get_ansys_bin(rver) if 'PYANSYS_IGNORE_ANSYS' in os.environ: HAS_ANSYS = False else: HAS_ANSYS = EXEC_FILE",
"must be after yield as this uses a module scoped fixture mapdl.exit() assert",
"Windows VM # configure shared memory parallel for VM additional_switches = '' if",
"'shm' # necessary on ubuntu and dmp mapdl = pyansys.launch_mapdl(EXEC_FILE, override=True, mode='corba', additional_switches=additional_switches)",
"VM additional_switches = '' if os.name == 'nt' and socket.gethostname() == 'WIN-FRDMRVG7QAB': additional_switches",
"test exit ### # must be after yield as this uses a module",
"'190', '191', '192', '193', '194', '195', '201'] EXEC_FILE = None for rver in",
"# check for a valid MAPDL install with CORBA valid_rver = ['182', '190',",
"socket.gethostname() == 'WIN-FRDMRVG7QAB': additional_switches = '-smp' elif os.name == 'posix': os.environ['I_MPI_SHM_LMT'] = 'shm'",
"if os.name == 'posix': # console only for linux # modes.append('console') collect_ignore =",
"mapdl.exit() assert mapdl._exited with pytest.raises(RuntimeError): mapdl.prep7() assert not os.path.isfile(mapdl._lockfile) assert 'MAPDL exited' in",
"HAS_ANSYS = EXEC_FILE is not None skip_no_ansys = pytest.mark.skipif(not HAS_ANSYS, reason=\"Requires ANSYS installed\")",
"valid_rver = ['182', '190', '191', '192', '193', '194', '195', '201'] EXEC_FILE = None",
"be after yield as this uses a module scoped fixture mapdl.exit() assert mapdl._exited",
"import MapdlExitedError pyvista.OFF_SCREEN = True # check for a valid MAPDL install with",
"HAS_ANSYS, reason=\"Requires ANSYS installed\") modes = ['corba'] # if os.name == 'posix': #",
"MAPDL install with CORBA valid_rver = ['182', '190', '191', '192', '193', '194', '195',",
"@pytest.fixture(scope=\"session\", params=modes) def mapdl(): # launch in shared memory parallel for Windows VM",
"CORBA valid_rver = ['182', '190', '191', '192', '193', '194', '195', '201'] EXEC_FILE =",
"# launch in shared memory parallel for Windows VM # configure shared memory",
"modes.append('console') collect_ignore = [] if not HAS_ANSYS: collect_ignore.append(\"test_post.py\") @pytest.fixture(scope=\"session\", params=modes) def mapdl(): #",
"= 'shm' # necessary on ubuntu and dmp mapdl = pyansys.launch_mapdl(EXEC_FILE, override=True, mode='corba',",
"== 'WIN-FRDMRVG7QAB': additional_switches = '-smp' elif os.name == 'posix': os.environ['I_MPI_SHM_LMT'] = 'shm' #",
"not HAS_ANSYS: collect_ignore.append(\"test_post.py\") @pytest.fixture(scope=\"session\", params=modes) def mapdl(): # launch in shared memory parallel",
"'nt' and socket.gethostname() == 'WIN-FRDMRVG7QAB': additional_switches = '-smp' elif os.name == 'posix': os.environ['I_MPI_SHM_LMT']",
"'201'] EXEC_FILE = None for rver in valid_rver: if os.path.isfile(get_ansys_bin(rver)): EXEC_FILE = get_ansys_bin(rver)",
"is not None skip_no_ansys = pytest.mark.skipif(not HAS_ANSYS, reason=\"Requires ANSYS installed\") modes = ['corba']",
"False # don't show matplotlib figures yield mapdl ### test exit ### #",
"for VM additional_switches = '' if os.name == 'nt' and socket.gethostname() == 'WIN-FRDMRVG7QAB':",
"VM # configure shared memory parallel for VM additional_switches = '' if os.name",
"socket import os import pytest import pyvista from pyansys.misc import get_ansys_bin import pyansys",
"mode='corba', additional_switches=additional_switches) mapdl._show_matplotlib_figures = False # don't show matplotlib figures yield mapdl ###",
"= ['182', '190', '191', '192', '193', '194', '195', '201'] EXEC_FILE = None for",
"'PYANSYS_IGNORE_ANSYS' in os.environ: HAS_ANSYS = False else: HAS_ANSYS = EXEC_FILE is not None",
"import pytest import pyvista from pyansys.misc import get_ansys_bin import pyansys from pyansys.errors import",
"in os.environ: HAS_ANSYS = False else: HAS_ANSYS = EXEC_FILE is not None skip_no_ansys",
"reason=\"Requires ANSYS installed\") modes = ['corba'] # if os.name == 'posix': # console",
"this uses a module scoped fixture mapdl.exit() assert mapdl._exited with pytest.raises(RuntimeError): mapdl.prep7() assert",
"EXEC_FILE = get_ansys_bin(rver) if 'PYANSYS_IGNORE_ANSYS' in os.environ: HAS_ANSYS = False else: HAS_ANSYS =",
"valid_rver: if os.path.isfile(get_ansys_bin(rver)): EXEC_FILE = get_ansys_bin(rver) if 'PYANSYS_IGNORE_ANSYS' in os.environ: HAS_ANSYS = False",
"import pyvista from pyansys.misc import get_ansys_bin import pyansys from pyansys.errors import MapdlExitedError pyvista.OFF_SCREEN",
"don't show matplotlib figures yield mapdl ### test exit ### # must be",
"pytest.raises(RuntimeError): mapdl.prep7() assert not os.path.isfile(mapdl._lockfile) assert 'MAPDL exited' in str(mapdl) with pytest.raises(MapdlExitedError): mapdl.prep7()",
"pytest import pyvista from pyansys.misc import get_ansys_bin import pyansys from pyansys.errors import MapdlExitedError",
"ANSYS installed\") modes = ['corba'] # if os.name == 'posix': # console only",
"check for a valid MAPDL install with CORBA valid_rver = ['182', '190', '191',",
"# modes.append('console') collect_ignore = [] if not HAS_ANSYS: collect_ignore.append(\"test_post.py\") @pytest.fixture(scope=\"session\", params=modes) def mapdl():",
"# must be after yield as this uses a module scoped fixture mapdl.exit()",
"configure shared memory parallel for VM additional_switches = '' if os.name == 'nt'",
"'posix': os.environ['I_MPI_SHM_LMT'] = 'shm' # necessary on ubuntu and dmp mapdl = pyansys.launch_mapdl(EXEC_FILE,",
"if os.name == 'nt' and socket.gethostname() == 'WIN-FRDMRVG7QAB': additional_switches = '-smp' elif os.name",
"False else: HAS_ANSYS = EXEC_FILE is not None skip_no_ansys = pytest.mark.skipif(not HAS_ANSYS, reason=\"Requires",
"for linux # modes.append('console') collect_ignore = [] if not HAS_ANSYS: collect_ignore.append(\"test_post.py\") @pytest.fixture(scope=\"session\", params=modes)",
"rver in valid_rver: if os.path.isfile(get_ansys_bin(rver)): EXEC_FILE = get_ansys_bin(rver) if 'PYANSYS_IGNORE_ANSYS' in os.environ: HAS_ANSYS",
"= '' if os.name == 'nt' and socket.gethostname() == 'WIN-FRDMRVG7QAB': additional_switches = '-smp'",
"# don't show matplotlib figures yield mapdl ### test exit ### # must",
"os.name == 'posix': os.environ['I_MPI_SHM_LMT'] = 'shm' # necessary on ubuntu and dmp mapdl",
"def mapdl(): # launch in shared memory parallel for Windows VM # configure",
"in valid_rver: if os.path.isfile(get_ansys_bin(rver)): EXEC_FILE = get_ansys_bin(rver) if 'PYANSYS_IGNORE_ANSYS' in os.environ: HAS_ANSYS =",
"memory parallel for VM additional_switches = '' if os.name == 'nt' and socket.gethostname()",
"module scoped fixture mapdl.exit() assert mapdl._exited with pytest.raises(RuntimeError): mapdl.prep7() assert not os.path.isfile(mapdl._lockfile) assert",
"True # check for a valid MAPDL install with CORBA valid_rver = ['182',",
"installed\") modes = ['corba'] # if os.name == 'posix': # console only for",
"HAS_ANSYS: collect_ignore.append(\"test_post.py\") @pytest.fixture(scope=\"session\", params=modes) def mapdl(): # launch in shared memory parallel for",
"parallel for Windows VM # configure shared memory parallel for VM additional_switches =",
"import socket import os import pytest import pyvista from pyansys.misc import get_ansys_bin import",
"additional_switches = '-smp' elif os.name == 'posix': os.environ['I_MPI_SHM_LMT'] = 'shm' # necessary on",
"= ['corba'] # if os.name == 'posix': # console only for linux #",
"memory parallel for Windows VM # configure shared memory parallel for VM additional_switches",
"mapdl._show_matplotlib_figures = False # don't show matplotlib figures yield mapdl ### test exit",
"with CORBA valid_rver = ['182', '190', '191', '192', '193', '194', '195', '201'] EXEC_FILE",
"= EXEC_FILE is not None skip_no_ansys = pytest.mark.skipif(not HAS_ANSYS, reason=\"Requires ANSYS installed\") modes",
"from pyansys.errors import MapdlExitedError pyvista.OFF_SCREEN = True # check for a valid MAPDL",
"ubuntu and dmp mapdl = pyansys.launch_mapdl(EXEC_FILE, override=True, mode='corba', additional_switches=additional_switches) mapdl._show_matplotlib_figures = False #",
"os import pytest import pyvista from pyansys.misc import get_ansys_bin import pyansys from pyansys.errors",
"= False else: HAS_ANSYS = EXEC_FILE is not None skip_no_ansys = pytest.mark.skipif(not HAS_ANSYS,",
"pyansys.errors import MapdlExitedError pyvista.OFF_SCREEN = True # check for a valid MAPDL install",
"= pytest.mark.skipif(not HAS_ANSYS, reason=\"Requires ANSYS installed\") modes = ['corba'] # if os.name ==",
"yield as this uses a module scoped fixture mapdl.exit() assert mapdl._exited with pytest.raises(RuntimeError):",
"### test exit ### # must be after yield as this uses a",
"import pyansys from pyansys.errors import MapdlExitedError pyvista.OFF_SCREEN = True # check for a",
"= pyansys.launch_mapdl(EXEC_FILE, override=True, mode='corba', additional_switches=additional_switches) mapdl._show_matplotlib_figures = False # don't show matplotlib figures",
"os.environ: HAS_ANSYS = False else: HAS_ANSYS = EXEC_FILE is not None skip_no_ansys =",
"necessary on ubuntu and dmp mapdl = pyansys.launch_mapdl(EXEC_FILE, override=True, mode='corba', additional_switches=additional_switches) mapdl._show_matplotlib_figures =",
"'191', '192', '193', '194', '195', '201'] EXEC_FILE = None for rver in valid_rver:",
"show matplotlib figures yield mapdl ### test exit ### # must be after",
"collect_ignore = [] if not HAS_ANSYS: collect_ignore.append(\"test_post.py\") @pytest.fixture(scope=\"session\", params=modes) def mapdl(): # launch",
"'194', '195', '201'] EXEC_FILE = None for rver in valid_rver: if os.path.isfile(get_ansys_bin(rver)): EXEC_FILE",
"modes = ['corba'] # if os.name == 'posix': # console only for linux",
"= None for rver in valid_rver: if os.path.isfile(get_ansys_bin(rver)): EXEC_FILE = get_ansys_bin(rver) if 'PYANSYS_IGNORE_ANSYS'",
"pyvista from pyansys.misc import get_ansys_bin import pyansys from pyansys.errors import MapdlExitedError pyvista.OFF_SCREEN =",
"['182', '190', '191', '192', '193', '194', '195', '201'] EXEC_FILE = None for rver",
"uses a module scoped fixture mapdl.exit() assert mapdl._exited with pytest.raises(RuntimeError): mapdl.prep7() assert not",
"import get_ansys_bin import pyansys from pyansys.errors import MapdlExitedError pyvista.OFF_SCREEN = True # check",
"os.name == 'posix': # console only for linux # modes.append('console') collect_ignore = []",
"for Windows VM # configure shared memory parallel for VM additional_switches = ''",
"shared memory parallel for VM additional_switches = '' if os.name == 'nt' and",
"# necessary on ubuntu and dmp mapdl = pyansys.launch_mapdl(EXEC_FILE, override=True, mode='corba', additional_switches=additional_switches) mapdl._show_matplotlib_figures",
"if os.path.isfile(get_ansys_bin(rver)): EXEC_FILE = get_ansys_bin(rver) if 'PYANSYS_IGNORE_ANSYS' in os.environ: HAS_ANSYS = False else:",
"exit ### # must be after yield as this uses a module scoped",
"[] if not HAS_ANSYS: collect_ignore.append(\"test_post.py\") @pytest.fixture(scope=\"session\", params=modes) def mapdl(): # launch in shared",
"a module scoped fixture mapdl.exit() assert mapdl._exited with pytest.raises(RuntimeError): mapdl.prep7() assert not os.path.isfile(mapdl._lockfile)",
"['corba'] # if os.name == 'posix': # console only for linux # modes.append('console')",
"for a valid MAPDL install with CORBA valid_rver = ['182', '190', '191', '192',",
"mapdl(): # launch in shared memory parallel for Windows VM # configure shared",
"os.name == 'nt' and socket.gethostname() == 'WIN-FRDMRVG7QAB': additional_switches = '-smp' elif os.name ==",
"mapdl ### test exit ### # must be after yield as this uses",
"collect_ignore.append(\"test_post.py\") @pytest.fixture(scope=\"session\", params=modes) def mapdl(): # launch in shared memory parallel for Windows",
"EXEC_FILE is not None skip_no_ansys = pytest.mark.skipif(not HAS_ANSYS, reason=\"Requires ANSYS installed\") modes =",
"mapdl = pyansys.launch_mapdl(EXEC_FILE, override=True, mode='corba', additional_switches=additional_switches) mapdl._show_matplotlib_figures = False # don't show matplotlib",
"if 'PYANSYS_IGNORE_ANSYS' in os.environ: HAS_ANSYS = False else: HAS_ANSYS = EXEC_FILE is not",
"'-smp' elif os.name == 'posix': os.environ['I_MPI_SHM_LMT'] = 'shm' # necessary on ubuntu and",
"fixture mapdl.exit() assert mapdl._exited with pytest.raises(RuntimeError): mapdl.prep7() assert not os.path.isfile(mapdl._lockfile) assert 'MAPDL exited'",
"if not HAS_ANSYS: collect_ignore.append(\"test_post.py\") @pytest.fixture(scope=\"session\", params=modes) def mapdl(): # launch in shared memory",
"from pyansys.misc import get_ansys_bin import pyansys from pyansys.errors import MapdlExitedError pyvista.OFF_SCREEN = True",
"dmp mapdl = pyansys.launch_mapdl(EXEC_FILE, override=True, mode='corba', additional_switches=additional_switches) mapdl._show_matplotlib_figures = False # don't show",
"'' if os.name == 'nt' and socket.gethostname() == 'WIN-FRDMRVG7QAB': additional_switches = '-smp' elif",
"after yield as this uses a module scoped fixture mapdl.exit() assert mapdl._exited with",
"MapdlExitedError pyvista.OFF_SCREEN = True # check for a valid MAPDL install with CORBA",
"= [] if not HAS_ANSYS: collect_ignore.append(\"test_post.py\") @pytest.fixture(scope=\"session\", params=modes) def mapdl(): # launch in",
"'195', '201'] EXEC_FILE = None for rver in valid_rver: if os.path.isfile(get_ansys_bin(rver)): EXEC_FILE =",
"pyansys from pyansys.errors import MapdlExitedError pyvista.OFF_SCREEN = True # check for a valid",
"linux # modes.append('console') collect_ignore = [] if not HAS_ANSYS: collect_ignore.append(\"test_post.py\") @pytest.fixture(scope=\"session\", params=modes) def",
"in shared memory parallel for Windows VM # configure shared memory parallel for",
"skip_no_ansys = pytest.mark.skipif(not HAS_ANSYS, reason=\"Requires ANSYS installed\") modes = ['corba'] # if os.name",
"### # must be after yield as this uses a module scoped fixture"
] |
[
"data from orginial Cranfield collection and # implement the BM25 alogrithm information retrieval;",
"at N) are applied. # Tested under Python 3.5 on Ubuntu 16.04. #",
"in punctuation) # Remove all punctuations except full stops and hyphens. args =",
"= math.sqrt(length) # Calculate the previous document length and start a new one.",
"for line in fp: fields = line.split() query_ID = int(fields[0]) pair = (int(fields[1]),",
"(line 17287), \"trans.amer.math.soc.33\" (line 31509), # or \"studies.dash\" (line 516) will not be",
"\"r\") as fp: for line in fp: fields = line.split() query_ID = int(fields[0])",
"except full stops and hyphens. args = get_arguments() if os.path.exists(INDEX_PATH): print(\"[Loading BM25 index",
"Also, treat two consecutive hyphens as a space. for term in line.split(): #",
"Update and go to next line immediately. elif section in CONTENTS: line =",
"frequency * (1.0 + K) / (frequency + K * ((1.0 - B)",
"and go to next line immediately. elif section in CONTENTS: line = line.translate(removing_punctuation_map)",
"length += 1.0 # Treat a compound word as one word; words in",
"\"BM25 evaluation result output in lines of 3-tuples (query ID, document ID, and",
"here: https://docs.python.org/3/library/argparse.html import readline # Used to create a typing history buffer for",
"dots. # And similarly, phrases like \"m. i. t.\" (line 36527) and #",
"the like. import string # Used to do some regex operations. import math",
"on BM25 to calculate similarities. \"\"\" similarities = [] for document_ID in range(1,",
"line in fp: stop_words.add(line.rstrip()) return stop_words def process_documents(): \"\"\" Build vectors of each",
"(-1, 1, 2, 3, 4), which means all # documents in it will",
"in desceding order. similarities = sorted(similarities, key = lambda x : x[1], reverse",
"document_ID in relevance_set: if document_ID in retrieval_set: appearance_times += 1 recall += appearance_times",
"(Discounted Cumulated Gain). dcg = [gain_vector[0]] # Put the first item in `dcg`.",
"for i in range(1, len(gain_vector)): dcg.append(gain_vector[i] / math.log(i + 1, 2) + dcg[-1])",
"relevance_scores[query_ID] = sorted(relevance_scores[query_ID], key = lambda x : x[1]) return relevance_scores def make_query_results():",
"= document_lengths[document] / average_length # Now document_lengths stores a normalised length for each",
"similarities = [] for document_ID in range(1, nums_of_documents + 1): # Document ID",
"False def get_arguments(): parser = argparse.ArgumentParser(description = \"A script used to build BM25",
"28459) or \"a52b06\" (line 25717). if is_valid(element): add_new_word(element) # Filter out all pure",
"if user_query == USER_STOP_WORD: break query_terms = process_single_query(user_query) print(\"Results for query \" +",
"= 0 for document_ID in retrieval_set: if document_ID in relevance_set: appearance_times += 1",
"followed by \".I\", # since they may not be consecutive. num_of_documents += 1",
"Document ID begins from 1. similarity = 0.0 for term in query: if",
"in term_vectors[term]: frequency = (term_vectors[term])[document_ID] n_i = len(term_vectors[term]) idf = math.log((nums_of_documents - n_i",
"Build a structural data from orginial Cranfield collection and # implement the BM25",
"relevance_scores[query_ID].append(pair) # It assumes no repetition of document IDs for each query. else:",
"-*- # Description: Build a structural data from orginial Cranfield collection and #",
"in retrieval_set: appearance_times += 1 recall += appearance_times / len(relevance_set) recall = recall",
"chosen by default if it is not specified\") parser.add_argument(\"-o\", required = False, nargs",
"extra empty string is created # and makes term_split look like [\"sub\", \"\"].",
"# And finally, yield at most `n` results for each query. yield query_ID,",
"\"\" and word not in stop_words and not is_number(word): return True else: return",
"to parse program arguments. # More details are here: https://docs.python.org/3/library/argparse.html import readline #",
"punctuations except full stops and hyphens. args = get_arguments() if os.path.exists(INDEX_PATH): print(\"[Loading BM25",
"load_stop_words() punctuation = string.punctuation[0 : 12] + string.punctuation[14:] removing_punctuation_map = dict((ord(character), \" \")",
"each query. USER_STOP_WORD = \"QUIT\" # When user types `USER_STOP_WORD`, the program ends;",
"len(query_results) return mean_average_precision def ndcg_at_n(n): \"\"\" It yields a list of NDCGs at",
"print(\"Evaluation Results:\") print(\"Precision: {0}\".format(precision()), end = \"\\n\") print(\"Recall: {0}\".format(recall()), end = \"\\n\") print(\"P@{0}:",
"Times}}. document_lengths = {} average_length = 0.0 num_of_documents = 0 with open(DOCUMENT_PATH, \"r\")",
"string is valid. Used to process documents and queries. \"\"\" if word !=",
"for pair in score_list: ideal_gain_vector.append(RELEVANCE_SCORE_FIX - score_list_dict[pair[0]]) idcg = [ideal_gain_vector[0]] for i in",
"[] for pair in query_results[query_ID]: if pair[0] in relevance_set: gain_vector.append(RELEVANCE_SCORE_FIX - score_list_dict[pair[0]]) #",
"immediately. elif section in CONTENTS: line = line.translate(removing_punctuation_map) line = line.replace(\"--\", \" \")",
"if term in term_vectors and document_ID in term_vectors[term]: frequency = (term_vectors[term])[document_ID] n_i =",
"relevance score larger than `RELEVANCE_SCORE_THRESHOLD` # from `QUERY_PATH`. The default value is 4",
"(line 20026) will be converted into integers by just removing dots. # And",
"# A helper function to add a new word in `term_vectors`. if word",
"length and start a new one. # The empty entry for document 0",
"3-tuples (query ID, document ID, and its rank [1 - 15]) form; if",
"# Labels in `cran.all.1400` and `cranqrel` text files. ID = \".I\" TITLE =",
"not \"3\". # Calculate the last length since Cranfield collection does not have",
"print(\"Precision: {0}\".format(precision()), end = \"\\n\") print(\"Recall: {0}\".format(recall()), end = \"\\n\") print(\"P@{0}: {1}\".format(N, p_at_n(N)),",
"not specified\") parser.add_argument(\"-o\", required = False, nargs = \"?\", const = EVALUATION_PATH, metavar",
"term.replace(\"-\", \"\") if is_valid(compound): add_new_word(compound) term_split = term.split(\"-\") if len(term_split) > 1: for",
"\"m.i.t.\" (line 1222) / \"u.s.a.\" (line 32542) into \"mit\" / \"usa\". # In",
"rank)) rank += 1 return query_results def make_relevance_set(query_ID): # Relevant documents (Rel). relevance_set",
"import string # Used to do some regex operations. import math import os",
"term in line.split(): # Split according to whitespace characters and deal with two",
"document_lengths[document_ID] document_ID += 1 # Ignore original document IDs, which is the numbers",
"word in `query_terms`. if word not in stemming: stemming[word] = stemmer.stem(word) stemmed_word =",
"query_results[query_ID]: if pair[0] in relevance_set: appearance_times += 1 current_map += appearance_times / pair[1]",
"STOP_WORDS_PATH = \"stopwords.txt\" DOCUMENT_PATH = \"./cran/cran.all.1400\" QUERY_PATH = \"./cran/cran.qry\" RELEVANCE_PATH = \"./cran/cranqrel\" INDEX_PATH",
"Add the last entry. del query_list[0] # Skip the first one. return query_list",
"+ K) / (frequency + K * ((1.0 - B) + B *",
"relevance_set.add(pair[0]) return relevance_set def make_retrieval_set(query_ID): # Retrieval documents (Ret). retrieval_set = set() for",
"length in document_lengths.items()} for term, vector in term_vectors.items(): term_vectors[term] = {int(ID) : appearance_times",
"last entry. del query_list[0] # Skip the first one. return query_list def bm25_similarities(query):",
"+= 1 else: term_vectors[stemmed_word].update({document_ID : 1}) stemming = {} term_vectors = {} #",
"relevance_scores: relevance_scores[query_ID].append(pair) # It assumes no repetition of document IDs for each query.",
"{0}\".format(recall()), end = \"\\n\") print(\"P@{0}: {1}\".format(N, p_at_n(N)), end = \"\\n\") print(\"Mean Average Precision:",
"BM25 model and relative evaluation methods. If the index JSON file is not",
"in relevance_scores: # Sort pairs in ascending order for each query; the less",
"= {} term_vectors = {} # `term_vectors` structure: {[Key] Term : [Value] {[Key]",
"term_vectors, document_lengths = json.load(fp) # Warning: unlike Python, `dict` type in JSON cannot",
"+ 0.5) / (n_i + 0.5), 2) similarity += frequency * (1.0 +",
"term_vectors.items(): term_vectors[term] = {int(ID) : appearance_times for ID, appearance_times in vector.items()} nums_of_documents =",
"similarities. \"\"\" similarities = [] for document_ID in range(1, nums_of_documents + 1): #",
"NDCG at N) are applied. # Tested under Python 3.5 on Ubuntu 16.04.",
"len(relevance_set) mean_average_precision = mean_average_precision / len(query_results) return mean_average_precision def ndcg_at_n(n): \"\"\" It yields",
"else: return similarities def manual_mode(): \"\"\" When in `manual` mode, the function will",
"if is_valid(element): add_new_word(element) return query_terms def process_queries(): with open(QUERY_PATH, \"r\") as fp: query_list",
"\"\"\" precision = 0.0 for query_ID in relevance_scores: relevance_set = make_relevance_set(query_ID) retrieval_set =",
"query_ID in relevance_scores: # Sort pairs in ascending order for each query; the",
"(Ret). retrieval_set = set() for pair in query_results[query_ID]: retrieval_set.add(pair[0]) return retrieval_set def precision():",
"query_results[query_ID]: if pair[0] in relevance_set: gain_vector.append(RELEVANCE_SCORE_FIX - score_list_dict[pair[0]]) # Convert original ranking scores",
"else: gain_vector.append(0) # Step two: DCG (Discounted Cumulated Gain). dcg = [gain_vector[0]] #",
"pairs (Document ID, Similarity) based on BM25 to calculate similarities. \"\"\" similarities =",
"for query_ID in relevance_scores: relevance_set = make_relevance_set(query_ID) appearance_times = 0 for pair in",
"idf if similarity > 0.0: # Ignore the one with similarity score 0.",
"\"r\") as fp: query_list = {} query = [] query_ID = 0 for",
"# Skip the document with index 0 from document length vector. del document_lengths[0]",
"\"\"\" It yields a list of NDCGs at up to N of each",
"scores to # NDCG-friendly ones. # Constants used in BM25 model. K =",
"document_ID += 1 # Ignore original document IDs, which is the numbers followed",
"larger than `MOST_RELEVANT`. N = 10 def is_number(word): \"\"\" A helper function to",
"for pair in query_results[query_ID]: if pair[0] in relevance_set: gain_vector.append(RELEVANCE_SCORE_FIX - score_list_dict[pair[0]]) # Convert",
"if pair[0] in relevance_set: appearance_times += 1 current_map += appearance_times / pair[1] mean_average_precision",
"current_section continue # Update and go to next line immediately. elif section in",
"in stemming: stemming[word] = stemmer.stem(word) stemmed_word = stemming[word] if stemmed_word not in query_terms:",
"[] query_ID += 1 # Ignore original query IDs, which is the numbers",
"# Step two: DCG (Discounted Cumulated Gain). dcg = [gain_vector[0]] # Put the",
"query_list[query_ID] = query query = [] query_ID += 1 # Ignore original query",
"in `cran.all.1400` and `cranqrel` text files. ID = \".I\" TITLE = \".T\" AUTHORS",
"average_length = (document_lengths[document_ID] + average_length) / num_of_documents for document in document_lengths.keys(): document_lengths[document] =",
"(type \\\"QUIT\\\" to terminate): \") if user_query == USER_STOP_WORD: break query_terms = process_single_query(user_query)",
"process_single_query(user_query) print(\"Results for query \" + str(query_terms)) print(\"Rank\\tID\\tScore\") rank = 1 for result",
"return False def is_valid(word): \"\"\" A helper function to check if a string",
"Cranfield collection does not have ending symbols. document_lengths[document_ID] = math.sqrt(length) # Skip the",
"/ len(query_results) return recall def p_at_n(n): \"\"\" It calculates arithmetic mean of precisions",
"fp: for line in fp: stop_words.add(line.rstrip()) return stop_words def process_documents(): \"\"\" Build vectors",
"num_of_documents for document in document_lengths.keys(): document_lengths[document] = document_lengths[document] / average_length # Now document_lengths",
"pairs of original words and stemmed words are returned. \"\"\" def add_new_word(word): #",
"print(\"NDCG@{0} <Query {1}>: {2}\".format(N, query_ID, ndcg), end = \"\\n\") if __name__ == \"__main__\":",
"index JSON file and exit. print(\"[Generating the index file.]\") with open(INDEX_PATH, \"w\") as",
"\"\") if is_valid(compound): add_new_word(compound) term_split = term.split(\"-\") if len(term_split) > 1: for element",
"at up to N of each query separately. \"\"\" for query_ID, score_list in",
"a new one. # The empty entry for document 0 is also created",
"add_new_word(element) return query_terms def process_queries(): with open(QUERY_PATH, \"r\") as fp: query_list = {}",
"libraries that places locally. import porter STOP_WORDS_PATH = \"stopwords.txt\" DOCUMENT_PATH = \"./cran/cran.all.1400\" QUERY_PATH",
"# Warning: unlike Python, `dict` type in JSON cannot have `int` key, #",
"516) will not be handled as expected. # All float-point numbers like \"3.2x10\"",
"Date created: 2018-05-07 # Here are some Python standard modules used in the",
"+= 1 # Ignore original document IDs, which is the numbers followed by",
"means there is no hyphen in this word. # There may exist a",
"ascending order for each query; the less the relevance # score is, the",
"are returned for each query. USER_STOP_WORD = \"QUIT\" # When user types `USER_STOP_WORD`,",
"exactly the same structure and length as `relevance_scores`. for query_ID in query_list: rank",
"# Add the last entry. del query_list[0] # Skip the first one. return",
"= line.split() query_ID = int(fields[0]) pair = (int(fields[1]), int(fields[2])) if query_ID in relevance_scores:",
"= {} # `query_results` structure: {[KEY] query ID : [Value] [(Document ID, Relevance",
"[(Document ID, Relevance Score)]} with open(RELEVANCE_PATH, \"r\") as fp: for line in fp:",
"query_terms = [] query = query.strip() query = query.translate(removing_punctuation_map) query = query.replace(\"--\", \"",
"term = term.replace(\".\", \"\") # Remove full stops in one term, used to",
"(Rel). relevance_set = set() for pair in relevance_scores[query_ID]: if pair[1] <= RELEVANCE_SCORE_THRESHOLD: #",
"arithmetic mean of precisions for all queries. \"\"\" precision = 0.0 for query_ID",
"lambda x : x[1], reverse = True) if len(similarities) > MOST_RELEVANT: return similarities[0",
"if a string is valid. Used to process documents and queries. \"\"\" if",
"with open(STOP_WORDS_PATH, \"r\") as fp: for line in fp: stop_words.add(line.rstrip()) return stop_words def",
"the program ends; it is case-sensitive. RELEVANCE_SCORE_THRESHOLD = 4 # Filter out ones",
"(line 31509), # or \"studies.dash\" (line 516) will not be handled as expected.",
"and\" (line 14632), which causes an extra empty string is created # and",
"is equal to `N`, precision will be the same as P at N",
"rank = 1 for result in bm25_similarities(query_terms): print(\"{0}\\t{1}\\t{2}\".format(str(rank), result[0], str(result[1])), end = \"\\n\")",
"= \"BM25 evaluation result output in lines of 3-tuples (query ID, document ID,",
"from 001. average_length += document_lengths[document_ID] document_ID += 1 # Ignore original document IDs,",
"0 for pair in query_results[query_ID]: if pair[0] in relevance_set and pair[1] <= n:",
"for document 0 is also created although # in Cranfield collection, document ID",
"stemmed_word not in query_terms: query_terms.append(stemmed_word) query_terms = [] query = query.strip() query =",
"# `term_vectors` structure: {[Key] Term : [Value] {[Key] Document ID : [Value] Appearance",
"as a space. for term in line.split(): # Split according to whitespace characters",
"for line in fp: stop_words.add(line.rstrip()) return stop_words def process_documents(): \"\"\" Build vectors of",
"Gain) at N. ndcg_at_n = [] for pair in zip(dcg, idcg): ndcg_at_n.append(pair[0] /",
"= string.punctuation[0 : 12] + string.punctuation[14:] removing_punctuation_map = dict((ord(character), \" \") for character",
"= query.replace(\"--\", \" \") for term in query.split(): term = term.replace(\".\", \"\").lower() compound",
"if len(term_split) > 1: for element in term_split: if is_valid(element): add_new_word(element) return query_terms",
"compound word as one word; words in `AUTHORS` # and `BIBLIOGRAPHY` section will",
"single line text. Used by `process_queries` function and `manual` mode. \"\"\" def add_new_word(word):",
"# documents in it will be reserved. RELEVANCE_SCORE_FIX = 5 # It is",
"a structural data from orginial Cranfield collection and # implement the BM25 alogrithm",
"Python, `dict` type in JSON cannot have `int` key, # therefore a conversion",
"int(fields[0]) pair = (int(fields[1]), int(fields[2])) if query_ID in relevance_scores: relevance_scores[query_ID].append(pair) # It assumes",
"stemming: stemming[word] = stemmer.stem(word) stemmed_word = stemming[word] if stemmed_word not in query_terms: query_terms.append(stemmed_word)",
"average_length = 0.0 num_of_documents = 0 with open(DOCUMENT_PATH, \"r\") as fp: document_ID =",
"\"./cran/cran.qry\" RELEVANCE_PATH = \"./cran/cranqrel\" INDEX_PATH = \"index.json\" EVALUATION_PATH = \"evaluation_output.txt\" # Labels in",
"stemming = {} term_vectors = {} # `term_vectors` structure: {[Key] Term : [Value]",
"= precision / len(query_results) return precision def recall(): \"\"\" It calculates arithmetic mean",
"recall(): \"\"\" It calculates arithmetic mean of recalls for all queries. \"\"\" recall",
"== \"manual\": manual_mode() elif args.m == \"evaluation\": relevance_scores = load_relevance_scores() query_results = make_query_results()",
"\"\") if is_valid(compound): add_new_word(compound) if section == WORDS: length += 1.0 # Treat",
"manual_mode(): \"\"\" When in `manual` mode, the function will not end until user",
"a term with an ending hyphens like # \"sub- and\" (line 14632), which",
"{[KEY] query ID : [Value] [(Document ID, Relevance Score)]}, which is exactly the",
"BIBLIOGRAPHY, WORDS] CONTENTS = [AUTHORS, BIBLIOGRAPHY, WORDS] DELIMITER_SYMBOL = \"*\" BOUNDARY_LENGTH = 80",
"# and makes term_split look like [\"sub\", \"\"]. for element in term_split: #",
"range(1, len(ideal_gain_vector)): idcg.append(ideal_gain_vector[i] / math.log(i + 1, 2) + idcg[-1]) # Step four:",
"key, # therefore a conversion is of necessity. document_lengths = {int(ID) : length",
"word. # There may exist a term with an ending hyphens like #",
"current_section = line[0 : 2] if current_section in LABELS: if current_section == ID:",
"idcg): ndcg_at_n.append(pair[0] / pair[1]) if len(ndcg_at_n) > n: # And finally, yield at",
"have `int` key, # therefore a conversion is of necessity. document_lengths = {int(ID)",
"\"\"\" for query_ID, score_list in relevance_scores.items(): relevance_set = make_relevance_set(query_ID) score_list_dict = dict(score_list) #",
"porter.PorterStemmer() stop_words = load_stop_words() punctuation = string.punctuation[0 : 12] + string.punctuation[14:] removing_punctuation_map =",
"query: if term in term_vectors and document_ID in term_vectors[term]: frequency = (term_vectors[term])[document_ID] n_i",
"like [\"sub\", \"\"]. for element in term_split: # Deal with each part of",
"relevance_scores: relevance_set = make_relevance_set(query_ID) appearance_times = 0 for pair in query_results[query_ID]: if pair[0]",
"in query_results[query_ID]: if pair[0] in relevance_set and pair[1] <= n: appearance_times += 1",
"of necessity. document_lengths = {int(ID) : length for ID, length in document_lengths.items()} for",
"make_retrieval_set(query_ID) appearance_times = 0 for document_ID in relevance_set: if document_ID in retrieval_set: appearance_times",
"a new word in `term_vectors`. if word not in stemming: stemming[word] = stemmer.stem(word)",
"and calculate lengths of each documents. Also a dictionary containing pairs of original",
"query = query.translate(removing_punctuation_map) query = query.replace(\"--\", \" \") for term in query.split(): term",
"based on BM25 to calculate similarities. \"\"\" similarities = [] for document_ID in",
"ID : [Value] Appearance Times}}. document_lengths = {} average_length = 0.0 num_of_documents =",
"try: int(word) return True except ValueError: return False def is_valid(word): \"\"\" A helper",
"add_new_word(word): # A helper function to add a new word in `query_terms`. if",
"each part of compound words like \"two-step\" (line 38037) or # type names",
"in line.split(): # Split according to whitespace characters and deal with two special",
"ID : [Value] [(Document ID, Relevance Score)]} with open(RELEVANCE_PATH, \"r\") as fp: for",
"will be reserved. RELEVANCE_SCORE_FIX = 5 # It is a number used as",
"will not end until user types \"QUIT\". \"\"\" while True: print(DELIMITER_SYMBOL * BOUNDARY_LENGTH)",
"get_arguments() if os.path.exists(INDEX_PATH): print(\"[Loading BM25 index from file.]\") with open(INDEX_PATH, \"r\") as fp:",
"0.5), 2) similarity += frequency * (1.0 + K) / (frequency + K",
"mode. # More details are here: https://docs.python.org/3/library/readline.html import json # Used to create",
"i. t.\" (line 36527) and # \"i. e.\" (line 11820) will be ignored.",
"+ 1, 2) + idcg[-1]) # Step four: NDCG (Normalised Discounted Cumulated Gain)",
"= 0 with open(DOCUMENT_PATH, \"r\") as fp: document_ID = 0 length = 0.0",
"= 1 for result in bm25_similarities(query_terms): print(\"{0}\\t{1}\\t{2}\".format(str(rank), result[0], str(result[1])), end = \"\\n\") rank",
"{1}\".format(N, p_at_n(N)), end = \"\\n\") print(\"Mean Average Precision: {0}\".format(mean_average_precision()), end = \"\\n\") for",
"query = query.replace(\"--\", \" \") for term in query.split(): term = term.replace(\".\", \"\").lower()",
"query.translate(removing_punctuation_map) query = query.replace(\"--\", \" \") for term in query.split(): term = term.replace(\".\",",
"in fp: fields = line.split() query_ID = int(fields[0]) pair = (int(fields[1]), int(fields[2])) if",
"There may exist a term with an ending hyphens like # \"sub- and\"",
"= json.load(fp) # Warning: unlike Python, `dict` type in JSON cannot have `int`",
"(line 516) will not be handled as expected. # All float-point numbers like",
"dict(score_list) # Convert a list of pairs to dictionary for convienence. # Step",
"average_length # Now document_lengths stores a normalised length for each document. return stemming,",
"= 0.0 for query_ID in relevance_scores: relevance_set = make_relevance_set(query_ID) appearance_times = 0 current_map",
"evaluation methods (precision, recall, MAP, P at N and # NDCG at N)",
"in `bm25_similarities()` function. if args.m == \"manual\": manual_mode() elif args.m == \"evaluation\": relevance_scores",
"except ValueError: return False def is_valid(word): \"\"\" A helper function to check if",
"if document_ID in retrieval_set: appearance_times += 1 recall += appearance_times / len(relevance_set) recall",
"collection. # N.B.: `N` cannot be larger than `MOST_RELEVANT`. N = 10 def",
"retrieval_set = set() for pair in query_results[query_ID]: retrieval_set.add(pair[0]) return retrieval_set def precision(): \"\"\"",
"25717). if is_valid(element): add_new_word(element) # Filter out all pure integers; for example, for",
"pair in score_list: ideal_gain_vector.append(RELEVANCE_SCORE_FIX - score_list_dict[pair[0]]) idcg = [ideal_gain_vector[0]] for i in range(1,",
"else: yield query_ID, ndcg_at_n def print_evaluation_results(): print(\"Evaluation Results:\") print(\"Precision: {0}\".format(precision()), end = \"\\n\")",
"all punctuations except full stops and hyphens. args = get_arguments() if os.path.exists(INDEX_PATH): print(\"[Loading",
"len(term_vectors[term]) idf = math.log((nums_of_documents - n_i + 0.5) / (n_i + 0.5), 2)",
"[Value] [(Document ID, Relevance Score)]}, which is exactly the same structure and length",
"file and exit. print(\"[Generating the index file.]\") with open(INDEX_PATH, \"w\") as fp: json.dump(process_documents(),",
"larger than `RELEVANCE_SCORE_THRESHOLD` # from `QUERY_PATH`. The default value is 4 (-1, 1,",
"# `query_results` structure: {[KEY] query ID : [Value] [(Document ID, Relevance Score)]}, which",
"directory and extra arguments will be ignored in this case\") parser.add_argument(\"-m\", required =",
"= process_single_query(user_query) print(\"Results for query \" + str(query_terms)) print(\"Rank\\tID\\tScore\") rank = 1 for",
"[] for document_ID in range(1, nums_of_documents + 1): # Document ID begins from",
"document_lengths[document_ID])) * idf if similarity > 0.0: # Ignore the one with similarity",
"hyphenated compounds. term = term.replace(\".\", \"\") # Remove full stops in one term,",
"ending symbols. document_lengths[document_ID] = math.sqrt(length) # Skip the document with index 0 from",
"one with similarity score 0. pair = (document_ID, similarity) similarities.append(pair) # Sort results",
"Print `BOUNDARY_LENGTH` `DELIMITER_SYMBOL`s to fill the default # width of terminal window. user_query",
"print(\"Rank\\tID\\tScore\") rank = 1 for result in bm25_similarities(query_terms): print(\"{0}\\t{1}\\t{2}\".format(str(rank), result[0], str(result[1])), end =",
"to do some regex operations. import math import os # Here are some",
"4), which means all # documents in it will be reserved. RELEVANCE_SCORE_FIX =",
"is not specified\") parser.add_argument(\"-o\", required = False, nargs = \"?\", const = EVALUATION_PATH,",
"yield at most `n` results for each query. yield query_ID, ndcg_at_n[0 : n]",
"stemming[word] = stemmer.stem(word) stemmed_word = stemming[word] if stemmed_word not in query_terms: query_terms.append(stemmed_word) query_terms",
"`manual` mode. # More details are here: https://docs.python.org/3/library/readline.html import json # Used to",
"WORDS: length += 1.0 # Treat a compound word as one word; words",
"a conversion is of necessity. document_lengths = {int(ID) : length for ID, length",
"Description: Build a structural data from orginial Cranfield collection and # implement the",
"query_ID = 0 for line in fp: current_section = line[0 : 2] if",
"precision / len(query_results) return precision def recall(): \"\"\" It calculates arithmetic mean of",
"and # \"i. e.\" (line 11820) will be ignored. # \"r.m.s.\" (line 20241)",
"sorted(similarities, key = lambda x : x[1], reverse = True) if len(similarities) >",
"to create a human-readable JSON file for index information and the like. import",
"document_ID in term_vectors[term]: frequency = (term_vectors[term])[document_ID] n_i = len(term_vectors[term]) idf = math.log((nums_of_documents -",
"as minuend to convert original relevance scores to # NDCG-friendly ones. # Constants",
"# therefore a conversion is of necessity. document_lengths = {int(ID) : length for",
"last length since Cranfield collection does not have ending symbols. document_lengths[document_ID] = math.sqrt(length)",
"document length vector. del document_lengths[0] average_length = (document_lengths[document_ID] + average_length) / num_of_documents for",
"returns possible relevant documents for each query based on BM25 model. \"\"\" query_list",
"retrieval_set.add(pair[0]) return retrieval_set def precision(): \"\"\" It calculates arithmetic mean of precisions for",
"ID: query_list[query_ID] = query query = [] query_ID += 1 # Ignore original",
"`RELEVANCE_SCORE_THRESHOLD` # from `QUERY_PATH`. The default value is 4 (-1, 1, 2, 3,",
"although # in Cranfield collection, document ID begins from 001. average_length += document_lengths[document_ID]",
"os.path.exists(INDEX_PATH): print(\"[Loading BM25 index from file.]\") with open(INDEX_PATH, \"r\") as fp: stemming, term_vectors,",
"in stop_words and not is_number(word): return True else: return False def get_arguments(): parser",
"\"i.e.it\" (line 17287), \"trans.amer.math.soc.33\" (line 31509), # or \"studies.dash\" (line 516) will not",
"add_new_word(word): # A helper function to add a new word in `term_vectors`. if",
"ndcg_at_n(N): print(\"NDCG@{0} <Query {1}>: {2}\".format(N, query_ID, ndcg), end = \"\\n\") if __name__ ==",
"saved, but not \"3\". # Calculate the last length since Cranfield collection does",
"query_terms def process_queries(): with open(QUERY_PATH, \"r\") as fp: query_list = {} query =",
"pair[1] mean_average_precision += current_map / len(relevance_set) mean_average_precision = mean_average_precision / len(query_results) return mean_average_precision",
"1.0 B = 0.75 # A constant used in Precision at N and",
"EVALUATION_PATH = \"evaluation_output.txt\" # Labels in `cran.all.1400` and `cranqrel` text files. ID =",
"document_lengths = {int(ID) : length for ID, length in document_lengths.items()} for term, vector",
"NDCG at N. # If `MOST_RELEVANT` is equal to `N`, precision will be",
"- 15]) form; if `FILE NAME` is not given, the default output file",
"\"stopwords.txt\" DOCUMENT_PATH = \"./cran/cran.all.1400\" QUERY_PATH = \"./cran/cran.qry\" RELEVANCE_PATH = \"./cran/cranqrel\" INDEX_PATH = \"index.json\"",
"until user types \"QUIT\". \"\"\" while True: print(DELIMITER_SYMBOL * BOUNDARY_LENGTH) # Print `BOUNDARY_LENGTH`",
"+= appearance_times / pair[1] mean_average_precision += current_map / len(relevance_set) mean_average_precision = mean_average_precision /",
"item in `term_split`, which means there is no hyphen in this word. #",
"# Step one: gain vector. gain_vector = [] for pair in query_results[query_ID]: if",
"length as `relevance_scores`. for query_ID in query_list: rank = 1 query_results[query_ID] = []",
"history buffer for `manual` mode. # More details are here: https://docs.python.org/3/library/readline.html import json",
"character in punctuation) # Remove all punctuations except full stops and hyphens. args",
"is the numbers followed # by \".I\", since they are not consecutive. if",
"are less than or equal # to `RELEVANCE_SCORE_THRESHOLD` here. relevance_set.add(pair[0]) return relevance_set def",
"retrieval_set = make_retrieval_set(query_ID) appearance_times = 0 for document_ID in relevance_set: if document_ID in",
"print(\"P@{0}: {1}\".format(N, p_at_n(N)), end = \"\\n\") print(\"Mean Average Precision: {0}\".format(mean_average_precision()), end = \"\\n\")",
"# type names like \"75s-t6\" (line 28459) or \"a52b06\" (line 25717). if is_valid(element):",
"1222) / \"u.s.a.\" (line 32542) into \"mit\" / \"usa\". # In the meantime,",
"line text. Used by `process_queries` function and `manual` mode. \"\"\" def add_new_word(word): #",
"integers; for example, for \"f8u-3\" (line 35373), # both \"f8u\" and \"f8u3\" will",
"__name__ == \"__main__\": stemmer = porter.PorterStemmer() stop_words = load_stop_words() punctuation = string.punctuation[0 :",
"term and calculate lengths of each documents. Also a dictionary containing pairs of",
"- B) + B * document_lengths[document_ID])) * idf if similarity > 0.0: #",
"1 precision += appearance_times / len(retrieval_set) precision = precision / len(query_results) return precision",
"pair[0] in relevance_set: gain_vector.append(RELEVANCE_SCORE_FIX - score_list_dict[pair[0]]) # Convert original ranking scores to NDCG-usable",
"0.75 # A constant used in Precision at N and NDCG at N.",
"integer. Used to process documents and queries. \"\"\" try: int(word) return True except",
"len(gain_vector)): dcg.append(gain_vector[i] / math.log(i + 1, 2) + dcg[-1]) # Step three: IDCG",
"choices = [\"manual\", \"evaluation\"], default = \"manual\", help = \"mode selection; `manual` mode",
"del document_lengths[0] average_length = (document_lengths[document_ID] + average_length) / num_of_documents for document in document_lengths.keys():",
"words like \"two-step\" (line 38037) or # type names like \"75s-t6\" (line 28459)",
"= \".W\" LABELS = [ID, TITLE, AUTHORS, BIBLIOGRAPHY, WORDS] CONTENTS = [AUTHORS, BIBLIOGRAPHY,",
"with similarity score 0. pair = (document_ID, similarity) similarities.append(pair) # Sort results in",
"in query_results[query_ID]: if pair[0] in relevance_set: gain_vector.append(RELEVANCE_SCORE_FIX - score_list_dict[pair[0]]) # Convert original ranking",
"Python 3.5 on Ubuntu 16.04. # Author: '(<NAME>.) # Date created: 2018-05-07 #",
"relevance_scores: # Sort pairs in ascending order for each query; the less the",
"Similarity) based on BM25 to calculate similarities. \"\"\" similarities = [] for document_ID",
"\"\"\" It returns a descending list with at most top `MOST_RELEVANT` pairs (Document",
"# in Cranfield collection, document ID begins from 001. average_length += document_lengths[document_ID] document_ID",
"precisions for all queries. \"\"\" precision = 0.0 for query_ID in relevance_scores: relevance_set",
"`python3 bm25.py` to generate one in the working directory and extra arguments will",
"query_ID, pair_list in query_results.items(): for pair in pair_list: fp.write(\"{0} {1} {2}\\n\".format(query_ID, pair[0], pair[1]))",
"result[0], str(result[1])), end = \"\\n\") rank += 1 def load_relevance_scores(): relevance_scores = {}",
"two: DCG (Discounted Cumulated Gain). dcg = [gain_vector[0]] # Put the first item",
"up to N of each query separately. \"\"\" for query_ID, score_list in relevance_scores.items():",
"since they are not consecutive. if current_section == WORDS: section = current_section continue",
"not be counted. term_split = term.split(\"-\") if len(term_split) > 1: # If only",
"current_map / len(relevance_set) mean_average_precision = mean_average_precision / len(query_results) return mean_average_precision def ndcg_at_n(n): \"\"\"",
"`MOST_RELEVANT`. N = 10 def is_number(word): \"\"\" A helper function to check if",
"to check if a string is valid. Used to process documents and queries.",
"is created # and makes term_split look like [\"sub\", \"\"]. for element in",
"line immediately. elif section in CONTENTS: line = line.translate(removing_punctuation_map) line = line.replace(\"--\", \"",
"information and the like. import string # Used to do some regex operations.",
"of precisions for all queries. \"\"\" precision = 0.0 for query_ID in relevance_scores:",
"if section == WORDS: length += 1.0 # Treat a compound word as",
"\"..e.g.at\" (line 17393), # \"i.e.it\" (line 17287), \"trans.amer.math.soc.33\" (line 31509), # or \"studies.dash\"",
"term in query.split(): term = term.replace(\".\", \"\").lower() compound = term.replace(\"-\", \"\") if is_valid(compound):",
"i in range(1, len(gain_vector)): dcg.append(gain_vector[i] / math.log(i + 1, 2) + dcg[-1]) #",
"will not be handled as expected. # All float-point numbers like \"3.2x10\" (line",
"Convert a list of pairs to dictionary for convienence. # Step one: gain",
"structure: {[Key] Term : [Value] {[Key] Document ID : [Value] Appearance Times}}. document_lengths",
"and makes term_split look like [\"sub\", \"\"]. for element in term_split: # Deal",
"be converted into integers by just removing dots. # And similarly, phrases like",
"model. \"\"\" query_list = process_queries() query_results = {} # `query_results` structure: {[KEY] query",
"ID = \".I\" TITLE = \".T\" AUTHORS = \".A\" BIBLIOGRAPHY = \".B\" WORDS",
"appearance_times = 0 for document_ID in retrieval_set: if document_ID in relevance_set: appearance_times +=",
"current_section continue elif section in CONTENTS: if query == []: query = process_single_query(line)",
"in CONTENTS: if query == []: query = process_single_query(line) else: query += process_single_query(line)",
"if is_valid(element): add_new_word(element) # Filter out all pure integers; for example, for \"f8u-3\"",
"\"QUIT\" # When user types `USER_STOP_WORD`, the program ends; it is case-sensitive. RELEVANCE_SCORE_THRESHOLD",
"(precision, recall, MAP, P at N and # NDCG at N) are applied.",
"= \"?\", const = EVALUATION_PATH, metavar = \"FILE NAME\", help = \"BM25 evaluation",
"full stops in one term, used to convert abbreviations # like \"m.i.t.\" (line",
"Used to create a typing history buffer for `manual` mode. # More details",
"None: # If `-o` option is available. with open(args.o, \"w\") as fp: for",
"index information and the like. import string # Used to do some regex",
"0 for line in fp: current_section = line[0 : 2] if current_section in",
"stop_words = load_stop_words() punctuation = string.punctuation[0 : 12] + string.punctuation[14:] removing_punctuation_map = dict((ord(character),",
"query separately. \"\"\" for query_ID, score_list in relevance_scores.items(): relevance_set = make_relevance_set(query_ID) score_list_dict =",
"alogrithm information retrieval; # also 5 evaluation methods (precision, recall, MAP, P at",
"compound = term.replace(\"-\", \"\") if is_valid(compound): add_new_word(compound) term_split = term.split(\"-\") if len(term_split) >",
"= \"\\n\") print(\"P@{0}: {1}\".format(N, p_at_n(N)), end = \"\\n\") print(\"Mean Average Precision: {0}\".format(mean_average_precision()), end",
"idf = math.log((nums_of_documents - n_i + 0.5) / (n_i + 0.5), 2) similarity",
"is not given, the default output file name is `evaluation_output.txt`\") return parser.parse_args() def",
"document ID, and its rank [1 - 15]) form; if `FILE NAME` is",
"make_retrieval_set(query_ID): # Retrieval documents (Ret). retrieval_set = set() for pair in query_results[query_ID]: retrieval_set.add(pair[0])",
"is_valid(compound): add_new_word(compound) term_split = term.split(\"-\") if len(term_split) > 1: for element in term_split:",
"process_single_query(line) query_list[query_ID] = query # Add the last entry. del query_list[0] # Skip",
"helper function to add a new word in `term_vectors`. if word not in",
"collection, document ID begins from 001. average_length += document_lengths[document_ID] document_ID += 1 #",
"score_list_dict = dict(score_list) # Convert a list of pairs to dictionary for convienence.",
"extra arguments will be ignored in this case\") parser.add_argument(\"-m\", required = False, choices",
"used as minuend to convert original relevance scores to # NDCG-friendly ones. #",
"if current_section == WORDS: section = current_section continue elif section in CONTENTS: if",
"each query. yield query_ID, ndcg_at_n[0 : n] else: yield query_ID, ndcg_at_n def print_evaluation_results():",
"like \"m. i. t.\" (line 36527) and # \"i. e.\" (line 11820) will",
"and queries. \"\"\" if word != \"\" and word not in stop_words and",
"n: appearance_times += 1 p_at_n += appearance_times / n p_at_n = p_at_n /",
"not in query_terms: query_terms.append(stemmed_word) query_terms = [] query = query.strip() query = query.translate(removing_punctuation_map)",
"> 1: for element in term_split: if is_valid(element): add_new_word(element) return query_terms def process_queries():",
"some regex operations. import math import os # Here are some Python libraries",
"stemming, term_vectors, document_lengths = json.load(fp) # Warning: unlike Python, `dict` type in JSON",
"three: IDCG (Ideal Discounted Cumulated Gain). ideal_gain_vector = [] for pair in score_list:",
"in one term, used to convert abbreviations # like \"m.i.t.\" (line 1222) /",
"continue # Update and go to next line immediately. elif section in CONTENTS:",
"= term.replace(\"-\", \"\") if is_valid(compound): add_new_word(compound) term_split = term.split(\"-\") if len(term_split) > 1:",
": 12] + string.punctuation[14:] removing_punctuation_map = dict((ord(character), \" \") for character in punctuation)",
"both \"f8u\" and \"f8u3\" will be saved, but not \"3\". # Calculate the",
"stops in one term, used to convert abbreviations # like \"m.i.t.\" (line 1222)",
"\"3.2x10\" (line 18799), \"79.5degree\" # (line 20026) will be converted into integers by",
"# N.B.: `N` cannot be larger than `MOST_RELEVANT`. N = 10 def is_number(word):",
"mean_average_precision += current_map / len(relevance_set) mean_average_precision = mean_average_precision / len(query_results) return mean_average_precision def",
"True) if len(similarities) > MOST_RELEVANT: return similarities[0 : MOST_RELEVANT] else: return similarities def",
"Average Precision: {0}\".format(mean_average_precision()), end = \"\\n\") for query_ID, ndcg in ndcg_at_n(N): print(\"NDCG@{0} <Query",
"length = 0.0 for line in fp: current_section = line[0 : 2] if",
"BIBLIOGRAPHY, WORDS] DELIMITER_SYMBOL = \"*\" BOUNDARY_LENGTH = 80 # It decides the length",
"average precision for all queries. \"\"\" mean_average_precision = 0.0 for query_ID in relevance_scores:",
"int(fields[2])) if query_ID in relevance_scores: relevance_scores[query_ID].append(pair) # It assumes no repetition of document",
"import math import os # Here are some Python libraries that places locally.",
"stop_words and not is_number(word): return True else: return False def get_arguments(): parser =",
"end = \"\\n\") rank += 1 def load_relevance_scores(): relevance_scores = {} # `relevance_scores`",
"* idf if similarity > 0.0: # Ignore the one with similarity score",
"mean_average_precision(): \"\"\" It calculates mean average precision for all queries. \"\"\" mean_average_precision =",
"for element in term_split: if is_valid(element): add_new_word(element) return query_terms def process_queries(): with open(QUERY_PATH,",
"for term in query: if term in term_vectors and document_ID in term_vectors[term]: frequency",
"gain vector. gain_vector = [] for pair in query_results[query_ID]: if pair[0] in relevance_set:",
"in query: if term in term_vectors and document_ID in term_vectors[term]: frequency = (term_vectors[term])[document_ID]",
"\"r\") as fp: stemming, term_vectors, document_lengths = json.load(fp) # Warning: unlike Python, `dict`",
"in range(1, nums_of_documents + 1): # Document ID begins from 1. similarity =",
"expected. # All float-point numbers like \"3.2x10\" (line 18799), \"79.5degree\" # (line 20026)",
"Step four: NDCG (Normalised Discounted Cumulated Gain) at N. ndcg_at_n = [] for",
"15 # At most top `MOST_RELEVANT` results are returned for each query. USER_STOP_WORD",
"original words and stemmed words are returned. \"\"\" def add_new_word(word): # A helper",
"= 10 def is_number(word): \"\"\" A helper function to check if a string",
"and # NDCG at N) are applied. # Tested under Python 3.5 on",
"A constant used in Precision at N and NDCG at N. # If",
"convert abbreviations # like \"m.i.t.\" (line 1222) / \"u.s.a.\" (line 32542) into \"mit\"",
"# by \".I\", since they are not consecutive. if current_section == WORDS: section",
"parser.add_argument(\"-o\", required = False, nargs = \"?\", const = EVALUATION_PATH, metavar = \"FILE",
"\"\\n\") rank += 1 def load_relevance_scores(): relevance_scores = {} # `relevance_scores` structure: {[KEY]",
"query; the less the relevance # score is, the more relevant the document",
"(1.0 + K) / (frequency + K * ((1.0 - B) + B",
"queries. \"\"\" try: int(word) return True except ValueError: return False def is_valid(word): \"\"\"",
"Cumulated Gain). ideal_gain_vector = [] for pair in score_list: ideal_gain_vector.append(RELEVANCE_SCORE_FIX - score_list_dict[pair[0]]) idcg",
"it will be reserved. RELEVANCE_SCORE_FIX = 5 # It is a number used",
"def make_relevance_set(query_ID): # Relevant documents (Rel). relevance_set = set() for pair in relevance_scores[query_ID]:",
"\") # Also, treat two consecutive hyphens as a space. for term in",
"mean_average_precision def ndcg_at_n(n): \"\"\" It yields a list of NDCGs at up to",
"# The empty entry for document 0 is also created although # in",
"# Used to create a typing history buffer for `manual` mode. # More",
"the document with index 0 from document length vector. del document_lengths[0] average_length =",
"INDEX_PATH = \"index.json\" EVALUATION_PATH = \"evaluation_output.txt\" # Labels in `cran.all.1400` and `cranqrel` text",
"all queries. \"\"\" recall = 0.0 for query_ID in relevance_scores: relevance_set = make_relevance_set(query_ID)",
"def bm25_similarities(query): \"\"\" It returns a descending list with at most top `MOST_RELEVANT`",
"n: # And finally, yield at most `n` results for each query. yield",
"fp: document_ID = 0 length = 0.0 for line in fp: current_section =",
"retrieval_set: appearance_times += 1 recall += appearance_times / len(relevance_set) recall = recall /",
"similarity = 0.0 for term in query: if term in term_vectors and document_ID",
"# Convert original ranking scores to NDCG-usable scores. else: gain_vector.append(0) # Step two:",
"the default # width of terminal window. user_query = input(\"Enter query (type \\\"QUIT\\\"",
"parser.parse_args() def load_stop_words(): stop_words = set() with open(STOP_WORDS_PATH, \"r\") as fp: for line",
"creates an index JSON file and exit. print(\"[Generating the index file.]\") with open(INDEX_PATH,",
"stemming: stemming[word] = stemmer.stem(word) stemmed_word = stemming[word] if stemmed_word not in term_vectors: term_vectors[stemmed_word]",
"as P at N for Cranfield collection. # N.B.: `N` cannot be larger",
"converted to an integer. Used to process documents and queries. \"\"\" try: int(word)",
"= 0.0 section = current_section continue # Update and go to next line",
"= {} # `relevance_scores` structure: {[KEY] query ID : [Value] [(Document ID, Relevance",
"not in term_vectors: term_vectors[stemmed_word] = {} if document_ID in term_vectors[stemmed_word]: (term_vectors[stemmed_word])[document_ID] += 1",
"a dictionary containing pairs of original words and stemmed words are returned. \"\"\"",
"document_lengths[document] / average_length # Now document_lengths stores a normalised length for each document.",
"relevance_set = make_relevance_set(query_ID) score_list_dict = dict(score_list) # Convert a list of pairs to",
"= len(document_lengths) # It is used in `bm25_similarities()` function. if args.m == \"manual\":",
"for query_ID in relevance_scores: relevance_set = make_relevance_set(query_ID) retrieval_set = make_retrieval_set(query_ID) appearance_times = 0",
"the previous document length and start a new one. # The empty entry",
"If only one item in `term_split`, which means there is no hyphen in",
"TITLE, AUTHORS, BIBLIOGRAPHY, WORDS] CONTENTS = [AUTHORS, BIBLIOGRAPHY, WORDS] DELIMITER_SYMBOL = \"*\" BOUNDARY_LENGTH",
"queries. MOST_RELEVANT = 15 # At most top `MOST_RELEVANT` results are returned for",
"Build vectors of each term and calculate lengths of each documents. Also a",
"else: term_vectors[stemmed_word].update({document_ID : 1}) stemming = {} term_vectors = {} # `term_vectors` structure:",
"is. relevance_scores[query_ID] = sorted(relevance_scores[query_ID], key = lambda x : x[1]) return relevance_scores def",
"if word not in stemming: stemming[word] = stemmer.stem(word) stemmed_word = stemming[word] if stemmed_word",
"def process_documents(): \"\"\" Build vectors of each term and calculate lengths of each",
"0 current_map = 0.0 for pair in query_results[query_ID]: if pair[0] in relevance_set: appearance_times",
"LABELS = [ID, TITLE, AUTHORS, BIBLIOGRAPHY, WORDS] CONTENTS = [AUTHORS, BIBLIOGRAPHY, WORDS] DELIMITER_SYMBOL",
"standard modules used in the script. import argparse # Used to parse program",
"Constants used in BM25 model. K = 1.0 B = 0.75 # A",
"17287), \"trans.amer.math.soc.33\" (line 31509), # or \"studies.dash\" (line 516) will not be handled",
"program ends; it is case-sensitive. RELEVANCE_SCORE_THRESHOLD = 4 # Filter out ones with",
"query = query.strip() query = query.translate(removing_punctuation_map) query = query.replace(\"--\", \" \") for term",
"by just removing dots. # And similarly, phrases like \"m. i. t.\" (line",
"0.5) / (n_i + 0.5), 2) similarity += frequency * (1.0 + K)",
"are applied. # Tested under Python 3.5 on Ubuntu 16.04. # Author: '(<NAME>.)",
"is_number(word): \"\"\" A helper function to check if a string can be converted",
"def make_retrieval_set(query_ID): # Retrieval documents (Ret). retrieval_set = set() for pair in query_results[query_ID]:",
"term.split(\"-\") if len(term_split) > 1: # If only one item in `term_split`, which",
"= [] for pair in zip(dcg, idcg): ndcg_at_n.append(pair[0] / pair[1]) if len(ndcg_at_n) >",
"# We only include queries whose relevance scores are less than or equal",
"= (term_vectors[term])[document_ID] n_i = len(term_vectors[term]) idf = math.log((nums_of_documents - n_i + 0.5) /",
"is a number used as minuend to convert original relevance scores to #",
"be counted. term_split = term.split(\"-\") if len(term_split) > 1: # If only one",
"= 0 for document_ID in relevance_set: if document_ID in retrieval_set: appearance_times += 1",
"(query ID, document ID, and its rank [1 - 15]) form; if `FILE",
"\"./cran/cran.all.1400\" QUERY_PATH = \"./cran/cran.qry\" RELEVANCE_PATH = \"./cran/cranqrel\" INDEX_PATH = \"index.json\" EVALUATION_PATH = \"evaluation_output.txt\"",
"is used in `bm25_similarities()` function. if args.m == \"manual\": manual_mode() elif args.m ==",
"`cran.all.1400` and `cranqrel` text files. ID = \".I\" TITLE = \".T\" AUTHORS =",
"model. K = 1.0 B = 0.75 # A constant used in Precision",
"not have ending symbols. document_lengths[document_ID] = math.sqrt(length) # Skip the document with index",
"query based on BM25 model. \"\"\" query_list = process_queries() query_results = {} #",
"just type `python3 bm25.py` to generate one in the working directory and extra",
"query.strip() query = query.translate(removing_punctuation_map) query = query.replace(\"--\", \" \") for term in query.split():",
"BM25 model. \"\"\" query_list = process_queries() query_results = {} # `query_results` structure: {[KEY]",
"current_section in LABELS: if current_section == ID: query_list[query_ID] = query query = []",
"= [AUTHORS, BIBLIOGRAPHY, WORDS] DELIMITER_SYMBOL = \"*\" BOUNDARY_LENGTH = 80 # It decides",
"Calculate the previous document length and start a new one. # The empty",
"will be ignored in this case\") parser.add_argument(\"-m\", required = False, choices = [\"manual\",",
"returns a descending list with at most top `MOST_RELEVANT` pairs (Document ID, Similarity)",
"original query IDs, which is the numbers followed # by \".I\", since they",
"ndcg_at_n def print_evaluation_results(): print(\"Evaluation Results:\") print(\"Precision: {0}\".format(precision()), end = \"\\n\") print(\"Recall: {0}\".format(recall()), end",
"query_terms = process_single_query(user_query) print(\"Results for query \" + str(query_terms)) print(\"Rank\\tID\\tScore\") rank = 1",
"in fp: current_section = line[0 : 2] if current_section in LABELS: if current_section",
"it is not specified\") parser.add_argument(\"-o\", required = False, nargs = \"?\", const =",
"num_of_documents += 1 length = 0.0 section = current_section continue # Update and",
"between two `manual` queries. MOST_RELEVANT = 15 # At most top `MOST_RELEVANT` results",
"\"r\") as fp: for line in fp: stop_words.add(line.rstrip()) return stop_words def process_documents(): \"\"\"",
"DELIMITER_SYMBOL = \"*\" BOUNDARY_LENGTH = 80 # It decides the length of the",
"# Skip the first one. return query_list def bm25_similarities(query): \"\"\" It returns a",
"document_lengths[document_ID] = math.sqrt(length) # Calculate the previous document length and start a new",
"the index JSON file is not available, just type `python3 bm25.py` to generate",
"and extra arguments will be ignored in this case\") parser.add_argument(\"-m\", required = False,",
"script. import argparse # Used to parse program arguments. # More details are",
"else: return False def get_arguments(): parser = argparse.ArgumentParser(description = \"A script used to",
"a space. for term in line.split(): # Split according to whitespace characters and",
"And similarly, phrases like \"m. i. t.\" (line 36527) and # \"i. e.\"",
"index 0 from document length vector. del document_lengths[0] average_length = (document_lengths[document_ID] + average_length)",
"function will not end until user types \"QUIT\". \"\"\" while True: print(DELIMITER_SYMBOL *",
"similarity score 0. pair = (document_ID, similarity) similarities.append(pair) # Sort results in desceding",
"key = lambda x : x[1], reverse = True) if len(similarities) > MOST_RELEVANT:",
"document_lengths stores a normalised length for each document. return stemming, term_vectors, document_lengths def",
"# Step four: NDCG (Normalised Discounted Cumulated Gain) at N. ndcg_at_n = []",
"= \".T\" AUTHORS = \".A\" BIBLIOGRAPHY = \".B\" WORDS = \".W\" LABELS =",
"with open(RELEVANCE_PATH, \"r\") as fp: for line in fp: fields = line.split() query_ID",
"1 return query_results def make_relevance_set(query_ID): # Relevant documents (Rel). relevance_set = set() for",
"{} query = [] query_ID = 0 for line in fp: current_section =",
"== WORDS: length += 1.0 # Treat a compound word as one word;",
"query_ID in relevance_scores: relevance_scores[query_ID].append(pair) # It assumes no repetition of document IDs for",
"program arguments. # More details are here: https://docs.python.org/3/library/argparse.html import readline # Used to",
"the same as P at N for Cranfield collection. # N.B.: `N` cannot",
"A helper function to check if a string is valid. Used to process",
"score_list: ideal_gain_vector.append(RELEVANCE_SCORE_FIX - score_list_dict[pair[0]]) idcg = [ideal_gain_vector[0]] for i in range(1, len(ideal_gain_vector)): idcg.append(ideal_gain_vector[i]",
"12] + string.punctuation[14:] removing_punctuation_map = dict((ord(character), \" \") for character in punctuation) #",
"term.split(\"-\") if len(term_split) > 1: for element in term_split: if is_valid(element): add_new_word(element) return",
"words are returned. \"\"\" def add_new_word(word): # A helper function to add a",
"parser = argparse.ArgumentParser(description = \"A script used to build BM25 model and relative",
"words and stemmed words are returned. \"\"\" def add_new_word(word): # A helper function",
"/ (frequency + K * ((1.0 - B) + B * document_lengths[document_ID])) *",
"`manual` mode, the function will not end until user types \"QUIT\". \"\"\" while",
"is_valid(compound): add_new_word(compound) if section == WORDS: length += 1.0 # Treat a compound",
"with open(QUERY_PATH, \"r\") as fp: query_list = {} query = [] query_ID =",
"N for all queries. \"\"\" p_at_n = 0.0 for query_ID in relevance_scores: relevance_set",
"# It is used in `bm25_similarities()` function. if args.m == \"manual\": manual_mode() elif",
"ndcg), end = \"\\n\") if __name__ == \"__main__\": stemmer = porter.PorterStemmer() stop_words =",
"def is_number(word): \"\"\" A helper function to check if a string can be",
"text. Used by `process_queries` function and `manual` mode. \"\"\" def add_new_word(word): # A",
": 1}) stemming = {} term_vectors = {} # `term_vectors` structure: {[Key] Term",
"= dict((ord(character), \" \") for character in punctuation) # Remove all punctuations except",
"current_map = 0.0 for pair in query_results[query_ID]: if pair[0] in relevance_set: appearance_times +=",
"a list of pairs to dictionary for convienence. # Step one: gain vector.",
"In the meantime, something like \"..e.g.at\" (line 17393), # \"i.e.it\" (line 17287), \"trans.amer.math.soc.33\"",
"mean of precisions for all queries. \"\"\" precision = 0.0 for query_ID in",
"as one word; words in `AUTHORS` # and `BIBLIOGRAPHY` section will not be",
"# Sort pairs in ascending order for each query; the less the relevance",
"documents (Rel). relevance_set = set() for pair in relevance_scores[query_ID]: if pair[1] <= RELEVANCE_SCORE_THRESHOLD:",
"may not be consecutive. num_of_documents += 1 length = 0.0 section = current_section",
"is not available, just type `python3 bm25.py` to generate one in the working",
"= set() for pair in query_results[query_ID]: retrieval_set.add(pair[0]) return retrieval_set def precision(): \"\"\" It",
"def get_arguments(): parser = argparse.ArgumentParser(description = \"A script used to build BM25 model",
"used in the script. import argparse # Used to parse program arguments. #",
"- score_list_dict[pair[0]]) # Convert original ranking scores to NDCG-usable scores. else: gain_vector.append(0) #",
"term in query: if term in term_vectors and document_ID in term_vectors[term]: frequency =",
"used in `bm25_similarities()` function. if args.m == \"manual\": manual_mode() elif args.m == \"evaluation\":",
"a compound word as one word; words in `AUTHORS` # and `BIBLIOGRAPHY` section",
"ndcg_at_n = [] for pair in zip(dcg, idcg): ndcg_at_n.append(pair[0] / pair[1]) if len(ndcg_at_n)",
"pair[0], pair[1])) else: # For first-time running, it creates an index JSON file",
"= 0 length = 0.0 for line in fp: current_section = line[0 :",
"(line 14632), which causes an extra empty string is created # and makes",
"make_retrieval_set(query_ID) appearance_times = 0 for document_ID in retrieval_set: if document_ID in relevance_set: appearance_times",
"line.split(): # Split according to whitespace characters and deal with two special cases:",
"\"\\n\") for query_ID, ndcg in ndcg_at_n(N): print(\"NDCG@{0} <Query {1}>: {2}\".format(N, query_ID, ndcg), end",
"= term.replace(\"-\", \"\") if is_valid(compound): add_new_word(compound) if section == WORDS: length += 1.0",
"N) are applied. # Tested under Python 3.5 on Ubuntu 16.04. # Author:",
"/ len(query_results) return mean_average_precision def ndcg_at_n(n): \"\"\" It yields a list of NDCGs",
"\"\"\" p_at_n = 0.0 for query_ID in relevance_scores: relevance_set = make_relevance_set(query_ID) appearance_times =",
"True except ValueError: return False def is_valid(word): \"\"\" A helper function to check",
"001. average_length += document_lengths[document_ID] document_ID += 1 # Ignore original document IDs, which",
"term.replace(\".\", \"\") # Remove full stops in one term, used to convert abbreviations",
"# also 5 evaluation methods (precision, recall, MAP, P at N and #",
"queries. \"\"\" recall = 0.0 for query_ID in relevance_scores: relevance_set = make_relevance_set(query_ID) retrieval_set",
"\\\"QUIT\\\" to terminate): \") if user_query == USER_STOP_WORD: break query_terms = process_single_query(user_query) print(\"Results",
"/ len(retrieval_set) precision = precision / len(query_results) return precision def recall(): \"\"\" It",
"average_length += document_lengths[document_ID] document_ID += 1 # Ignore original document IDs, which is",
"= make_relevance_set(query_ID) retrieval_set = make_retrieval_set(query_ID) appearance_times = 0 for document_ID in retrieval_set: if",
"for all queries. \"\"\" recall = 0.0 for query_ID in relevance_scores: relevance_set =",
"p_at_n = 0.0 for query_ID in relevance_scores: relevance_set = make_relevance_set(query_ID) appearance_times = 0",
"# (line 20026) will be converted into integers by just removing dots. #",
"means all # documents in it will be reserved. RELEVANCE_SCORE_FIX = 5 #",
"which causes an extra empty string is created # and makes term_split look",
"31509), # or \"studies.dash\" (line 516) will not be handled as expected. #",
"file is not available, just type `python3 bm25.py` to generate one in the",
"LABELS: if current_section == ID: document_lengths[document_ID] = math.sqrt(length) # Calculate the previous document",
"\"A script used to build BM25 model and relative evaluation methods. If the",
"len(relevance_set) recall = recall / len(query_results) return recall def p_at_n(n): \"\"\" It calculates",
"used to build BM25 model and relative evaluation methods. If the index JSON",
"continue elif section in CONTENTS: if query == []: query = process_single_query(line) else:",
"four: NDCG (Normalised Discounted Cumulated Gain) at N. ndcg_at_n = [] for pair",
"in term_split: if is_valid(element): add_new_word(element) return query_terms def process_queries(): with open(QUERY_PATH, \"r\") as",
"{int(ID) : length for ID, length in document_lengths.items()} for term, vector in term_vectors.items():",
"Cranfield collection. # N.B.: `N` cannot be larger than `MOST_RELEVANT`. N = 10",
"P at N and # NDCG at N) are applied. # Tested under",
"15]) form; if `FILE NAME` is not given, the default output file name",
"with index 0 from document length vector. del document_lengths[0] average_length = (document_lengths[document_ID] +",
"term with an ending hyphens like # \"sub- and\" (line 14632), which causes",
"mean_average_precision / len(query_results) return mean_average_precision def ndcg_at_n(n): \"\"\" It yields a list of",
"return stop_words def process_documents(): \"\"\" Build vectors of each term and calculate lengths",
"= 0.0 for query_ID in relevance_scores: relevance_set = make_relevance_set(query_ID) appearance_times = 0 for",
"current_section == ID: document_lengths[document_ID] = math.sqrt(length) # Calculate the previous document length and",
"it creates an index JSON file and exit. print(\"[Generating the index file.]\") with",
"= term.split(\"-\") if len(term_split) > 1: # If only one item in `term_split`,",
"as fp: for line in fp: fields = line.split() query_ID = int(fields[0]) pair",
"It yields a list of NDCGs at up to N of each query",
"if a string can be converted to an integer. Used to process documents",
"documents in it will be reserved. RELEVANCE_SCORE_FIX = 5 # It is a",
"if len(ndcg_at_n) > n: # And finally, yield at most `n` results for",
"stemmed_word not in term_vectors: term_vectors[stemmed_word] = {} if document_ID in term_vectors[stemmed_word]: (term_vectors[stemmed_word])[document_ID] +=",
"to `RELEVANCE_SCORE_THRESHOLD` here. relevance_set.add(pair[0]) return relevance_set def make_retrieval_set(query_ID): # Retrieval documents (Ret). retrieval_set",
"\"\\n\") if __name__ == \"__main__\": stemmer = porter.PorterStemmer() stop_words = load_stop_words() punctuation =",
"= \"*\" BOUNDARY_LENGTH = 80 # It decides the length of the boundary",
"ones. # Constants used in BM25 model. K = 1.0 B = 0.75",
"[1 - 15]) form; if `FILE NAME` is not given, the default output",
"typing history buffer for `manual` mode. # More details are here: https://docs.python.org/3/library/readline.html import",
"bm25_similarities(query): \"\"\" It returns a descending list with at most top `MOST_RELEVANT` pairs",
"ideal_gain_vector = [] for pair in score_list: ideal_gain_vector.append(RELEVANCE_SCORE_FIX - score_list_dict[pair[0]]) idcg = [ideal_gain_vector[0]]",
"and `manual` mode. \"\"\" def add_new_word(word): # A helper function to add a",
"for document_ID in range(1, nums_of_documents + 1): # Document ID begins from 1.",
"MOST_RELEVANT] else: return similarities def manual_mode(): \"\"\" When in `manual` mode, the function",
"the relevance # score is, the more relevant the document is. relevance_scores[query_ID] =",
"consecutive. if current_section == WORDS: section = current_section continue elif section in CONTENTS:",
"(frequency + K * ((1.0 - B) + B * document_lengths[document_ID])) * idf",
"Cumulated Gain) at N. ndcg_at_n = [] for pair in zip(dcg, idcg): ndcg_at_n.append(pair[0]",
"user_query == USER_STOP_WORD: break query_terms = process_single_query(user_query) print(\"Results for query \" + str(query_terms))",
"in term_vectors.items(): term_vectors[term] = {int(ID) : appearance_times for ID, appearance_times in vector.items()} nums_of_documents",
"of each documents. Also a dictionary containing pairs of original words and stemmed",
"structural data from orginial Cranfield collection and # implement the BM25 alogrithm information",
"[] query_ID = 0 for line in fp: current_section = line[0 : 2]",
"end = \"\\n\") print(\"Recall: {0}\".format(recall()), end = \"\\n\") print(\"P@{0}: {1}\".format(N, p_at_n(N)), end =",
"is 4 (-1, 1, 2, 3, 4), which means all # documents in",
"rank [1 - 15]) form; if `FILE NAME` is not given, the default",
"# It assumes no repetition of document IDs for each query. else: relevance_scores[query_ID]",
"mean average precision for all queries. \"\"\" mean_average_precision = 0.0 for query_ID in",
"# Update and go to next line immediately. elif section in CONTENTS: line",
"special cases: # abbreviations with \".\" and hyphenated compounds. term = term.replace(\".\", \"\")",
"(int(fields[1]), int(fields[2])) if query_ID in relevance_scores: relevance_scores[query_ID].append(pair) # It assumes no repetition of",
"yield query_ID, ndcg_at_n[0 : n] else: yield query_ID, ndcg_at_n def print_evaluation_results(): print(\"Evaluation Results:\")",
"term_vectors and document_ID in term_vectors[term]: frequency = (term_vectors[term])[document_ID] n_i = len(term_vectors[term]) idf =",
"from 1. similarity = 0.0 for term in query: if term in term_vectors",
"== USER_STOP_WORD: break query_terms = process_single_query(user_query) print(\"Results for query \" + str(query_terms)) print(\"Rank\\tID\\tScore\")",
"#!/usr/bin/env python3 # -*- coding: utf-8 -*- # Description: Build a structural data",
"queries. \"\"\" precision = 0.0 for query_ID in relevance_scores: relevance_set = make_relevance_set(query_ID) retrieval_set",
"NDCG-friendly ones. # Constants used in BM25 model. K = 1.0 B =",
"end = \"\\n\") print(\"Mean Average Precision: {0}\".format(mean_average_precision()), end = \"\\n\") for query_ID, ndcg",
"mean of precisions at N for all queries. \"\"\" p_at_n = 0.0 for",
"ones with relevance score larger than `RELEVANCE_SCORE_THRESHOLD` # from `QUERY_PATH`. The default value",
"for term, vector in term_vectors.items(): term_vectors[term] = {int(ID) : appearance_times for ID, appearance_times",
"is the numbers followed by \".I\", # since they may not be consecutive.",
"ignored in this case\") parser.add_argument(\"-m\", required = False, choices = [\"manual\", \"evaluation\"], default",
"Skip the document with index 0 from document length vector. del document_lengths[0] average_length",
"[Value] {[Key] Document ID : [Value] Appearance Times}}. document_lengths = {} average_length =",
"(Ideal Discounted Cumulated Gain). ideal_gain_vector = [] for pair in score_list: ideal_gain_vector.append(RELEVANCE_SCORE_FIX -",
"Cranfield collection and # implement the BM25 alogrithm information retrieval; # also 5",
"for query \" + str(query_terms)) print(\"Rank\\tID\\tScore\") rank = 1 for result in bm25_similarities(query_terms):",
"with open(DOCUMENT_PATH, \"r\") as fp: document_ID = 0 length = 0.0 for line",
"is case-sensitive. RELEVANCE_SCORE_THRESHOLD = 4 # Filter out ones with relevance score larger",
"`N` cannot be larger than `MOST_RELEVANT`. N = 10 def is_number(word): \"\"\" A",
"like # \"sub- and\" (line 14632), which causes an extra empty string is",
"at N. # If `MOST_RELEVANT` is equal to `N`, precision will be the",
"in it will be reserved. RELEVANCE_SCORE_FIX = 5 # It is a number",
"4 (-1, 1, 2, 3, 4), which means all # documents in it",
"= stemmer.stem(word) stemmed_word = stemming[word] if stemmed_word not in term_vectors: term_vectors[stemmed_word] = {}",
"queries whose relevance scores are less than or equal # to `RELEVANCE_SCORE_THRESHOLD` here.",
"\"m. i. t.\" (line 36527) and # \"i. e.\" (line 11820) will be",
"term_split = term.split(\"-\") if len(term_split) > 1: # If only one item in",
"# Document ID begins from 1. similarity = 0.0 for term in query:",
"less than or equal # to `RELEVANCE_SCORE_THRESHOLD` here. relevance_set.add(pair[0]) return relevance_set def make_retrieval_set(query_ID):",
"precision def recall(): \"\"\" It calculates arithmetic mean of recalls for all queries.",
"relevance_scores[query_ID] = [pair] for query_ID in relevance_scores: # Sort pairs in ascending order",
"0 for document_ID in retrieval_set: if document_ID in relevance_set: appearance_times += 1 precision",
"and document_ID in term_vectors[term]: frequency = (term_vectors[term])[document_ID] n_i = len(term_vectors[term]) idf = math.log((nums_of_documents",
"the same structure and length as `relevance_scores`. for query_ID in query_list: rank =",
"boundary between two `manual` queries. MOST_RELEVANT = 15 # At most top `MOST_RELEVANT`",
"file for index information and the like. import string # Used to do",
"generate one in the working directory and extra arguments will be ignored in",
"NAME` is not given, the default output file name is `evaluation_output.txt`\") return parser.parse_args()",
"appearance_times = 0 current_map = 0.0 for pair in query_results[query_ID]: if pair[0] in",
"{0}\".format(mean_average_precision()), end = \"\\n\") for query_ID, ndcg in ndcg_at_n(N): print(\"NDCG@{0} <Query {1}>: {2}\".format(N,",
"# Treat a compound word as one word; words in `AUTHORS` # and",
"document_lengths[document] = document_lengths[document] / average_length # Now document_lengths stores a normalised length for",
"mean of recalls for all queries. \"\"\" recall = 0.0 for query_ID in",
"can be converted to an integer. Used to process documents and queries. \"\"\"",
"is_valid(word): \"\"\" A helper function to check if a string is valid. Used",
"> 1: # If only one item in `term_split`, which means there is",
"return query_terms def process_queries(): with open(QUERY_PATH, \"r\") as fp: query_list = {} query",
"math.log((nums_of_documents - n_i + 0.5) / (n_i + 0.5), 2) similarity += frequency",
"\"3\". # Calculate the last length since Cranfield collection does not have ending",
"only include queries whose relevance scores are less than or equal # to",
"open(RELEVANCE_PATH, \"r\") as fp: for line in fp: fields = line.split() query_ID =",
"retrieval_set = make_retrieval_set(query_ID) appearance_times = 0 for document_ID in retrieval_set: if document_ID in",
"Author: '(<NAME>.) # Date created: 2018-05-07 # Here are some Python standard modules",
"width of terminal window. user_query = input(\"Enter query (type \\\"QUIT\\\" to terminate): \")",
"= stemmer.stem(word) stemmed_word = stemming[word] if stemmed_word not in query_terms: query_terms.append(stemmed_word) query_terms =",
"query_results[query_ID].append((pair[0], rank)) rank += 1 return query_results def make_relevance_set(query_ID): # Relevant documents (Rel).",
"like. import string # Used to do some regex operations. import math import",
"pair[1]) if len(ndcg_at_n) > n: # And finally, yield at most `n` results",
"`MOST_RELEVANT` results are returned for each query. USER_STOP_WORD = \"QUIT\" # When user",
"default value is 4 (-1, 1, 2, 3, 4), which means all #",
"as fp: for line in fp: stop_words.add(line.rstrip()) return stop_words def process_documents(): \"\"\" Build",
"# since they may not be consecutive. num_of_documents += 1 length = 0.0",
"+= 1 current_map += appearance_times / pair[1] mean_average_precision += current_map / len(relevance_set) mean_average_precision",
"= current_section continue elif section in CONTENTS: if query == []: query =",
"Split according to whitespace characters and deal with two special cases: # abbreviations",
"nums_of_documents = len(document_lengths) # It is used in `bm25_similarities()` function. if args.m ==",
"previous document length and start a new one. # The empty entry for",
"in relevance_set and pair[1] <= n: appearance_times += 1 p_at_n += appearance_times /",
"in bm25_similarities(query_list[query_ID]): query_results[query_ID].append((pair[0], rank)) rank += 1 return query_results def make_relevance_set(query_ID): # Relevant",
"punctuation) # Remove all punctuations except full stops and hyphens. args = get_arguments()",
"BM25 alogrithm information retrieval; # also 5 evaluation methods (precision, recall, MAP, P",
"= line.translate(removing_punctuation_map) line = line.replace(\"--\", \" \") # Also, treat two consecutive hyphens",
"two special cases: # abbreviations with \".\" and hyphenated compounds. term = term.replace(\".\",",
"{2}\".format(N, query_ID, ndcg), end = \"\\n\") if __name__ == \"__main__\": stemmer = porter.PorterStemmer()",
"ending hyphens like # \"sub- and\" (line 14632), which causes an extra empty",
"'(<NAME>.) # Date created: 2018-05-07 # Here are some Python standard modules used",
"\") for character in punctuation) # Remove all punctuations except full stops and",
"part of compound words like \"two-step\" (line 38037) or # type names like",
"/ n p_at_n = p_at_n / len(query_results) return p_at_n def mean_average_precision(): \"\"\" It",
"More details are here: https://docs.python.org/3/library/readline.html import json # Used to create a human-readable",
"by default if it is not specified\") parser.add_argument(\"-o\", required = False, nargs =",
"vectors of each term and calculate lengths of each documents. Also a dictionary",
"2018-05-07 # Here are some Python standard modules used in the script. import",
"`AUTHORS` # and `BIBLIOGRAPHY` section will not be counted. term_split = term.split(\"-\") if",
"# NDCG at N) are applied. # Tested under Python 3.5 on Ubuntu",
"next line immediately. elif section in CONTENTS: line = line.translate(removing_punctuation_map) line = line.replace(\"--\",",
"NDCG (Normalised Discounted Cumulated Gain) at N. ndcg_at_n = [] for pair in",
"and the like. import string # Used to do some regex operations. import",
"stores a normalised length for each document. return stemming, term_vectors, document_lengths def process_single_query(query):",
"if len(term_split) > 1: # If only one item in `term_split`, which means",
"Step three: IDCG (Ideal Discounted Cumulated Gain). ideal_gain_vector = [] for pair in",
"in JSON cannot have `int` key, # therefore a conversion is of necessity.",
"MAP, P at N and # NDCG at N) are applied. # Tested",
"to fill the default # width of terminal window. user_query = input(\"Enter query",
"structure: {[KEY] query ID : [Value] [(Document ID, Relevance Score)]} with open(RELEVANCE_PATH, \"r\")",
"do some regex operations. import math import os # Here are some Python",
"relevance_scores = {} # `relevance_scores` structure: {[KEY] query ID : [Value] [(Document ID,",
"= [] for pair in query_results[query_ID]: if pair[0] in relevance_set: gain_vector.append(RELEVANCE_SCORE_FIX - score_list_dict[pair[0]])",
"output file name is `evaluation_output.txt`\") return parser.parse_args() def load_stop_words(): stop_words = set() with",
"document 0 is also created although # in Cranfield collection, document ID begins",
"= query # Add the last entry. del query_list[0] # Skip the first",
"USER_STOP_WORD: break query_terms = process_single_query(user_query) print(\"Results for query \" + str(query_terms)) print(\"Rank\\tID\\tScore\") rank",
"Filter out ones with relevance score larger than `RELEVANCE_SCORE_THRESHOLD` # from `QUERY_PATH`. The",
"similarity += frequency * (1.0 + K) / (frequency + K * ((1.0",
"WORDS] DELIMITER_SYMBOL = \"*\" BOUNDARY_LENGTH = 80 # It decides the length of",
"# Ignore the one with similarity score 0. pair = (document_ID, similarity) similarities.append(pair)",
"\"\"\" if word != \"\" and word not in stop_words and not is_number(word):",
"does not have ending symbols. document_lengths[document_ID] = math.sqrt(length) # Skip the document with",
"necessity. document_lengths = {int(ID) : length for ID, length in document_lengths.items()} for term,",
"ID, Relevance Score)]}, which is exactly the same structure and length as `relevance_scores`.",
"document length and start a new one. # The empty entry for document",
"json # Used to create a human-readable JSON file for index information and",
"def mean_average_precision(): \"\"\" It calculates mean average precision for all queries. \"\"\" mean_average_precision",
"# More details are here: https://docs.python.org/3/library/readline.html import json # Used to create a",
"make_relevance_set(query_ID) score_list_dict = dict(score_list) # Convert a list of pairs to dictionary for",
"== []: query = process_single_query(line) else: query += process_single_query(line) query_list[query_ID] = query #",
"be reserved. RELEVANCE_SCORE_FIX = 5 # It is a number used as minuend",
"user types \"QUIT\". \"\"\" while True: print(DELIMITER_SYMBOL * BOUNDARY_LENGTH) # Print `BOUNDARY_LENGTH` `DELIMITER_SYMBOL`s",
"script used to build BM25 model and relative evaluation methods. If the index",
"CONTENTS: if query == []: query = process_single_query(line) else: query += process_single_query(line) query_list[query_ID]",
"add_new_word(compound) term_split = term.split(\"-\") if len(term_split) > 1: for element in term_split: if",
"[] for pair in score_list: ideal_gain_vector.append(RELEVANCE_SCORE_FIX - score_list_dict[pair[0]]) idcg = [ideal_gain_vector[0]] for i",
"no hyphen in this word. # There may exist a term with an",
"also 5 evaluation methods (precision, recall, MAP, P at N and # NDCG",
"evaluation result output in lines of 3-tuples (query ID, document ID, and its",
"return False def get_arguments(): parser = argparse.ArgumentParser(description = \"A script used to build",
"# Print `BOUNDARY_LENGTH` `DELIMITER_SYMBOL`s to fill the default # width of terminal window.",
"of original words and stemmed words are returned. \"\"\" def add_new_word(word): # A",
"of terminal window. user_query = input(\"Enter query (type \\\"QUIT\\\" to terminate): \") if",
"help = \"BM25 evaluation result output in lines of 3-tuples (query ID, document",
"We only include queries whose relevance scores are less than or equal #",
"than `RELEVANCE_SCORE_THRESHOLD` # from `QUERY_PATH`. The default value is 4 (-1, 1, 2,",
"= porter.PorterStemmer() stop_words = load_stop_words() punctuation = string.punctuation[0 : 12] + string.punctuation[14:] removing_punctuation_map",
"AUTHORS, BIBLIOGRAPHY, WORDS] CONTENTS = [AUTHORS, BIBLIOGRAPHY, WORDS] DELIMITER_SYMBOL = \"*\" BOUNDARY_LENGTH =",
"x[1]) return relevance_scores def make_query_results(): \"\"\" It returns possible relevant documents for each",
"similarities.append(pair) # Sort results in desceding order. similarities = sorted(similarities, key = lambda",
"zip(dcg, idcg): ndcg_at_n.append(pair[0] / pair[1]) if len(ndcg_at_n) > n: # And finally, yield",
"arguments will be ignored in this case\") parser.add_argument(\"-m\", required = False, choices =",
"an index JSON file and exit. print(\"[Generating the index file.]\") with open(INDEX_PATH, \"w\")",
"<= n: appearance_times += 1 p_at_n += appearance_times / n p_at_n = p_at_n",
"`evaluation_output.txt`\") return parser.parse_args() def load_stop_words(): stop_words = set() with open(STOP_WORDS_PATH, \"r\") as fp:",
"+ 1): # Document ID begins from 1. similarity = 0.0 for term",
"as fp: stemming, term_vectors, document_lengths = json.load(fp) # Warning: unlike Python, `dict` type",
"[] query = query.strip() query = query.translate(removing_punctuation_map) query = query.replace(\"--\", \" \") for",
"number used as minuend to convert original relevance scores to # NDCG-friendly ones.",
"Used to create a human-readable JSON file for index information and the like.",
"pair = (int(fields[1]), int(fields[2])) if query_ID in relevance_scores: relevance_scores[query_ID].append(pair) # It assumes no",
"1: for element in term_split: if is_valid(element): add_new_word(element) return query_terms def process_queries(): with",
"original document IDs, which is the numbers followed by \".I\", # since they",
"document IDs, which is the numbers followed by \".I\", # since they may",
"term_vectors = {} # `term_vectors` structure: {[Key] Term : [Value] {[Key] Document ID",
"score 0. pair = (document_ID, similarity) similarities.append(pair) # Sort results in desceding order.",
"similarity) similarities.append(pair) # Sort results in desceding order. similarities = sorted(similarities, key =",
"IDs, which is the numbers followed by \".I\", # since they may not",
"# Author: '(<NAME>.) # Date created: 2018-05-07 # Here are some Python standard",
"Cranfield collection, document ID begins from 001. average_length += document_lengths[document_ID] document_ID += 1",
"make_relevance_set(query_ID) appearance_times = 0 for pair in query_results[query_ID]: if pair[0] in relevance_set and",
"return relevance_scores def make_query_results(): \"\"\" It returns possible relevant documents for each query",
"appearance_times += 1 p_at_n += appearance_times / n p_at_n = p_at_n / len(query_results)",
"= EVALUATION_PATH, metavar = \"FILE NAME\", help = \"BM25 evaluation result output in",
"ValueError: return False def is_valid(word): \"\"\" A helper function to check if a",
"cannot have `int` key, # therefore a conversion is of necessity. document_lengths =",
"numbers like \"3.2x10\" (line 18799), \"79.5degree\" # (line 20026) will be converted into",
"section will not be counted. term_split = term.split(\"-\") if len(term_split) > 1: #",
"will become \"rm\" stored in the dictionary after stemming. compound = term.replace(\"-\", \"\")",
"in `AUTHORS` # and `BIBLIOGRAPHY` section will not be counted. term_split = term.split(\"-\")",
"USER_STOP_WORD = \"QUIT\" # When user types `USER_STOP_WORD`, the program ends; it is",
"line in fp: fields = line.split() query_ID = int(fields[0]) pair = (int(fields[1]), int(fields[2]))",
"section == WORDS: length += 1.0 # Treat a compound word as one",
"in ndcg_at_n(N): print(\"NDCG@{0} <Query {1}>: {2}\".format(N, query_ID, ndcg), end = \"\\n\") if __name__",
"(line 28459) or \"a52b06\" (line 25717). if is_valid(element): add_new_word(element) # Filter out all",
"# And similarly, phrases like \"m. i. t.\" (line 36527) and # \"i.",
"RELEVANCE_SCORE_THRESHOLD: # We only include queries whose relevance scores are less than or",
"document_ID in retrieval_set: appearance_times += 1 recall += appearance_times / len(relevance_set) recall =",
"recall = 0.0 for query_ID in relevance_scores: relevance_set = make_relevance_set(query_ID) retrieval_set = make_retrieval_set(query_ID)",
"to calculate similarities. \"\"\" similarities = [] for document_ID in range(1, nums_of_documents +",
"= \"\\n\") if __name__ == \"__main__\": stemmer = porter.PorterStemmer() stop_words = load_stop_words() punctuation",
"each documents. Also a dictionary containing pairs of original words and stemmed words",
"help = \"mode selection; `manual` mode is chosen by default if it is",
"if current_section == ID: document_lengths[document_ID] = math.sqrt(length) # Calculate the previous document length",
"containing pairs of original words and stemmed words are returned. \"\"\" def add_new_word(word):",
"the script. import argparse # Used to parse program arguments. # More details",
"{} # `relevance_scores` structure: {[KEY] query ID : [Value] [(Document ID, Relevance Score)]}",
"Relevant documents (Rel). relevance_set = set() for pair in relevance_scores[query_ID]: if pair[1] <=",
"= p_at_n / len(query_results) return p_at_n def mean_average_precision(): \"\"\" It calculates mean average",
"p_at_n(N)), end = \"\\n\") print(\"Mean Average Precision: {0}\".format(mean_average_precision()), end = \"\\n\") for query_ID,",
"\"manual\": manual_mode() elif args.m == \"evaluation\": relevance_scores = load_relevance_scores() query_results = make_query_results() print_evaluation_results()",
"to NDCG-usable scores. else: gain_vector.append(0) # Step two: DCG (Discounted Cumulated Gain). dcg",
"appearance_times / len(relevance_set) recall = recall / len(query_results) return recall def p_at_n(n): \"\"\"",
"\" \") # Also, treat two consecutive hyphens as a space. for term",
"N.B.: `N` cannot be larger than `MOST_RELEVANT`. N = 10 def is_number(word): \"\"\"",
"string.punctuation[0 : 12] + string.punctuation[14:] removing_punctuation_map = dict((ord(character), \" \") for character in",
"= [] query_ID += 1 # Ignore original query IDs, which is the",
": [Value] [(Document ID, Relevance Score)]} with open(RELEVANCE_PATH, \"r\") as fp: for line",
"* BOUNDARY_LENGTH) # Print `BOUNDARY_LENGTH` `DELIMITER_SYMBOL`s to fill the default # width of",
"window. user_query = input(\"Enter query (type \\\"QUIT\\\" to terminate): \") if user_query ==",
"order for each query; the less the relevance # score is, the more",
"def p_at_n(n): \"\"\" It calculates arithmetic mean of precisions at N for all",
"the default output file name is `evaluation_output.txt`\") return parser.parse_args() def load_stop_words(): stop_words =",
"not None: # If `-o` option is available. with open(args.o, \"w\") as fp:",
"# or \"studies.dash\" (line 516) will not be handled as expected. # All",
"required = False, choices = [\"manual\", \"evaluation\"], default = \"manual\", help = \"mode",
"\"r.m.s.\" (line 20241) will become \"rm\" stored in the dictionary after stemming. compound",
"for pair in query_results[query_ID]: retrieval_set.add(pair[0]) return retrieval_set def precision(): \"\"\" It calculates arithmetic",
"recall = recall / len(query_results) return recall def p_at_n(n): \"\"\" It calculates arithmetic",
"str(query_terms)) print(\"Rank\\tID\\tScore\") rank = 1 for result in bm25_similarities(query_terms): print(\"{0}\\t{1}\\t{2}\".format(str(rank), result[0], str(result[1])), end",
"pair in bm25_similarities(query_list[query_ID]): query_results[query_ID].append((pair[0], rank)) rank += 1 return query_results def make_relevance_set(query_ID): #",
"for document_ID in relevance_set: if document_ID in retrieval_set: appearance_times += 1 recall +=",
"one. # The empty entry for document 0 is also created although #",
"query_list[0] # Skip the first one. return query_list def bm25_similarities(query): \"\"\" It returns",
"/ \"usa\". # In the meantime, something like \"..e.g.at\" (line 17393), # \"i.e.it\"",
"relevance # score is, the more relevant the document is. relevance_scores[query_ID] = sorted(relevance_scores[query_ID],",
"`QUERY_PATH`. The default value is 4 (-1, 1, 2, 3, 4), which means",
"query_ID += 1 # Ignore original query IDs, which is the numbers followed",
"process documents and queries. \"\"\" if word != \"\" and word not in",
"convienence. # Step one: gain vector. gain_vector = [] for pair in query_results[query_ID]:",
"term_vectors, document_lengths def process_single_query(query): \"\"\" Process single line text. Used by `process_queries` function",
"= 0.0 num_of_documents = 0 with open(DOCUMENT_PATH, \"r\") as fp: document_ID = 0",
"in this case\") parser.add_argument(\"-m\", required = False, choices = [\"manual\", \"evaluation\"], default =",
"for query_ID, pair_list in query_results.items(): for pair in pair_list: fp.write(\"{0} {1} {2}\\n\".format(query_ID, pair[0],",
"for pair in zip(dcg, idcg): ndcg_at_n.append(pair[0] / pair[1]) if len(ndcg_at_n) > n: #",
"applied. # Tested under Python 3.5 on Ubuntu 16.04. # Author: '(<NAME>.) #",
"open(QUERY_PATH, \"r\") as fp: query_list = {} query = [] query_ID = 0",
"term, used to convert abbreviations # like \"m.i.t.\" (line 1222) / \"u.s.a.\" (line",
"+= document_lengths[document_ID] document_ID += 1 # Ignore original document IDs, which is the",
"query_ID in relevance_scores: relevance_set = make_relevance_set(query_ID) appearance_times = 0 for pair in query_results[query_ID]:",
"Remove all punctuations except full stops and hyphens. args = get_arguments() if os.path.exists(INDEX_PATH):",
"EVALUATION_PATH, metavar = \"FILE NAME\", help = \"BM25 evaluation result output in lines",
"pair in query_results[query_ID]: retrieval_set.add(pair[0]) return retrieval_set def precision(): \"\"\" It calculates arithmetic mean",
"lengths of each documents. Also a dictionary containing pairs of original words and",
"math import os # Here are some Python libraries that places locally. import",
"in query_terms: query_terms.append(stemmed_word) query_terms = [] query = query.strip() query = query.translate(removing_punctuation_map) query",
"all queries. \"\"\" precision = 0.0 for query_ID in relevance_scores: relevance_set = make_relevance_set(query_ID)",
"Precision: {0}\".format(mean_average_precision()), end = \"\\n\") for query_ID, ndcg in ndcg_at_n(N): print(\"NDCG@{0} <Query {1}>:",
"1.0 # Treat a compound word as one word; words in `AUTHORS` #",
"# Description: Build a structural data from orginial Cranfield collection and # implement",
"all queries. \"\"\" mean_average_precision = 0.0 for query_ID in relevance_scores: relevance_set = make_relevance_set(query_ID)",
"# and `BIBLIOGRAPHY` section will not be counted. term_split = term.split(\"-\") if len(term_split)",
"first item in `dcg`. for i in range(1, len(gain_vector)): dcg.append(gain_vector[i] / math.log(i +",
"argparse # Used to parse program arguments. # More details are here: https://docs.python.org/3/library/argparse.html",
"K * ((1.0 - B) + B * document_lengths[document_ID])) * idf if similarity",
"similarity > 0.0: # Ignore the one with similarity score 0. pair =",
"= lambda x : x[1], reverse = True) if len(similarities) > MOST_RELEVANT: return",
"term.replace(\".\", \"\").lower() compound = term.replace(\"-\", \"\") if is_valid(compound): add_new_word(compound) term_split = term.split(\"-\") if",
"line.translate(removing_punctuation_map) line = line.replace(\"--\", \" \") # Also, treat two consecutive hyphens as",
"Results:\") print(\"Precision: {0}\".format(precision()), end = \"\\n\") print(\"Recall: {0}\".format(recall()), end = \"\\n\") print(\"P@{0}: {1}\".format(N,",
"make_relevance_set(query_ID) retrieval_set = make_retrieval_set(query_ID) appearance_times = 0 for document_ID in relevance_set: if document_ID",
"vector. del document_lengths[0] average_length = (document_lengths[document_ID] + average_length) / num_of_documents for document in",
"meantime, something like \"..e.g.at\" (line 17393), # \"i.e.it\" (line 17287), \"trans.amer.math.soc.33\" (line 31509),",
"normalised length for each document. return stemming, term_vectors, document_lengths def process_single_query(query): \"\"\" Process",
"open(STOP_WORDS_PATH, \"r\") as fp: for line in fp: stop_words.add(line.rstrip()) return stop_words def process_documents():",
"make_query_results() print_evaluation_results() if args.o is not None: # If `-o` option is available.",
"print(\"Results for query \" + str(query_terms)) print(\"Rank\\tID\\tScore\") rank = 1 for result in",
"as fp: query_list = {} query = [] query_ID = 0 for line",
"regex operations. import math import os # Here are some Python libraries that",
"in zip(dcg, idcg): ndcg_at_n.append(pair[0] / pair[1]) if len(ndcg_at_n) > n: # And finally,",
"os # Here are some Python libraries that places locally. import porter STOP_WORDS_PATH",
"# abbreviations with \".\" and hyphenated compounds. term = term.replace(\".\", \"\") # Remove",
"retrieval; # also 5 evaluation methods (precision, recall, MAP, P at N and",
"def load_relevance_scores(): relevance_scores = {} # `relevance_scores` structure: {[KEY] query ID : [Value]",
"space. for term in line.split(): # Split according to whitespace characters and deal",
"mode. \"\"\" def add_new_word(word): # A helper function to add a new word",
"top `MOST_RELEVANT` results are returned for each query. USER_STOP_WORD = \"QUIT\" # When",
"`BIBLIOGRAPHY` section will not be counted. term_split = term.split(\"-\") if len(term_split) > 1:",
"gain_vector.append(RELEVANCE_SCORE_FIX - score_list_dict[pair[0]]) # Convert original ranking scores to NDCG-usable scores. else: gain_vector.append(0)",
"[gain_vector[0]] # Put the first item in `dcg`. for i in range(1, len(gain_vector)):",
"= process_single_query(line) else: query += process_single_query(line) query_list[query_ID] = query # Add the last",
"`DELIMITER_SYMBOL`s to fill the default # width of terminal window. user_query = input(\"Enter",
"recall, MAP, P at N and # NDCG at N) are applied. #",
"query_ID, ndcg_at_n[0 : n] else: yield query_ID, ndcg_at_n def print_evaluation_results(): print(\"Evaluation Results:\") print(\"Precision:",
"term_split look like [\"sub\", \"\"]. for element in term_split: # Deal with each",
"query_list = {} query = [] query_ID = 0 for line in fp:",
"import os # Here are some Python libraries that places locally. import porter",
"# At most top `MOST_RELEVANT` results are returned for each query. USER_STOP_WORD =",
"{int(ID) : appearance_times for ID, appearance_times in vector.items()} nums_of_documents = len(document_lengths) # It",
"and exit. print(\"[Generating the index file.]\") with open(INDEX_PATH, \"w\") as fp: json.dump(process_documents(), fp)",
"(Normalised Discounted Cumulated Gain) at N. ndcg_at_n = [] for pair in zip(dcg,",
"for ID, appearance_times in vector.items()} nums_of_documents = len(document_lengths) # It is used in",
"dcg[-1]) # Step three: IDCG (Ideal Discounted Cumulated Gain). ideal_gain_vector = [] for",
"required = False, nargs = \"?\", const = EVALUATION_PATH, metavar = \"FILE NAME\",",
"= line[0 : 2] if current_section in LABELS: if current_section == ID: query_list[query_ID]",
"`BOUNDARY_LENGTH` `DELIMITER_SYMBOL`s to fill the default # width of terminal window. user_query =",
"documents. Also a dictionary containing pairs of original words and stemmed words are",
"\"\"\" query_list = process_queries() query_results = {} # `query_results` structure: {[KEY] query ID",
"set() for pair in query_results[query_ID]: retrieval_set.add(pair[0]) return retrieval_set def precision(): \"\"\" It calculates",
"`MOST_RELEVANT` pairs (Document ID, Similarity) based on BM25 to calculate similarities. \"\"\" similarities",
"in relevance_scores[query_ID]: if pair[1] <= RELEVANCE_SCORE_THRESHOLD: # We only include queries whose relevance",
"removing_punctuation_map = dict((ord(character), \" \") for character in punctuation) # Remove all punctuations",
"this case\") parser.add_argument(\"-m\", required = False, choices = [\"manual\", \"evaluation\"], default = \"manual\",",
"term_split = term.split(\"-\") if len(term_split) > 1: for element in term_split: if is_valid(element):",
"`term_vectors` structure: {[Key] Term : [Value] {[Key] Document ID : [Value] Appearance Times}}.",
"of compound words like \"two-step\" (line 38037) or # type names like \"75s-t6\"",
"abbreviations # like \"m.i.t.\" (line 1222) / \"u.s.a.\" (line 32542) into \"mit\" /",
"`relevance_scores` structure: {[KEY] query ID : [Value] [(Document ID, Relevance Score)]} with open(RELEVANCE_PATH,",
"When user types `USER_STOP_WORD`, the program ends; it is case-sensitive. RELEVANCE_SCORE_THRESHOLD = 4",
"to convert abbreviations # like \"m.i.t.\" (line 1222) / \"u.s.a.\" (line 32542) into",
"= current_section continue # Update and go to next line immediately. elif section",
"include queries whose relevance scores are less than or equal # to `RELEVANCE_SCORE_THRESHOLD`",
"pair in pair_list: fp.write(\"{0} {1} {2}\\n\".format(query_ID, pair[0], pair[1])) else: # For first-time running,",
"= load_relevance_scores() query_results = make_query_results() print_evaluation_results() if args.o is not None: # If",
"than or equal # to `RELEVANCE_SCORE_THRESHOLD` here. relevance_set.add(pair[0]) return relevance_set def make_retrieval_set(query_ID): #",
"in retrieval_set: if document_ID in relevance_set: appearance_times += 1 precision += appearance_times /",
"5 evaluation methods (precision, recall, MAP, P at N and # NDCG at",
"mean_average_precision = mean_average_precision / len(query_results) return mean_average_precision def ndcg_at_n(n): \"\"\" It yields a",
"if current_section == ID: query_list[query_ID] = query query = [] query_ID += 1",
"(line 18799), \"79.5degree\" # (line 20026) will be converted into integers by just",
"# Used to do some regex operations. import math import os # Here",
"integers by just removing dots. # And similarly, phrases like \"m. i. t.\"",
"\"r\") as fp: document_ID = 0 length = 0.0 for line in fp:",
"Calculate the last length since Cranfield collection does not have ending symbols. document_lengths[document_ID]",
"deal with two special cases: # abbreviations with \".\" and hyphenated compounds. term",
"pairs in ascending order for each query; the less the relevance # score",
"length vector. del document_lengths[0] average_length = (document_lengths[document_ID] + average_length) / num_of_documents for document",
"document_lengths[document_ID] = math.sqrt(length) # Skip the document with index 0 from document length",
"is available. with open(args.o, \"w\") as fp: for query_ID, pair_list in query_results.items(): for",
"in Precision at N and NDCG at N. # If `MOST_RELEVANT` is equal",
"= 0.75 # A constant used in Precision at N and NDCG at",
"relevance_set = make_relevance_set(query_ID) appearance_times = 0 for pair in query_results[query_ID]: if pair[0] in",
"in term_vectors[stemmed_word]: (term_vectors[stemmed_word])[document_ID] += 1 else: term_vectors[stemmed_word].update({document_ID : 1}) stemming = {} term_vectors",
"0 is also created although # in Cranfield collection, document ID begins from",
"load_relevance_scores() query_results = make_query_results() print_evaluation_results() if args.o is not None: # If `-o`",
"evaluation methods. If the index JSON file is not available, just type `python3",
"score larger than `RELEVANCE_SCORE_THRESHOLD` # from `QUERY_PATH`. The default value is 4 (-1,",
"in query_results.items(): for pair in pair_list: fp.write(\"{0} {1} {2}\\n\".format(query_ID, pair[0], pair[1])) else: #",
"treat two consecutive hyphens as a space. for term in line.split(): # Split",
"for each query based on BM25 model. \"\"\" query_list = process_queries() query_results =",
"pair[1] <= RELEVANCE_SCORE_THRESHOLD: # We only include queries whose relevance scores are less",
"create a human-readable JSON file for index information and the like. import string",
"def add_new_word(word): # A helper function to add a new word in `term_vectors`.",
"the document is. relevance_scores[query_ID] = sorted(relevance_scores[query_ID], key = lambda x : x[1]) return",
"term = term.replace(\".\", \"\").lower() compound = term.replace(\"-\", \"\") if is_valid(compound): add_new_word(compound) term_split =",
"# Here are some Python standard modules used in the script. import argparse",
"document IDs for each query. else: relevance_scores[query_ID] = [pair] for query_ID in relevance_scores:",
"range(1, nums_of_documents + 1): # Document ID begins from 1. similarity = 0.0",
"not be handled as expected. # All float-point numbers like \"3.2x10\" (line 18799),",
"Put the first item in `dcg`. for i in range(1, len(gain_vector)): dcg.append(gain_vector[i] /",
"= make_relevance_set(query_ID) retrieval_set = make_retrieval_set(query_ID) appearance_times = 0 for document_ID in relevance_set: if",
"# Remove full stops in one term, used to convert abbreviations # like",
"from `QUERY_PATH`. The default value is 4 (-1, 1, 2, 3, 4), which",
"precision for all queries. \"\"\" mean_average_precision = 0.0 for query_ID in relevance_scores: relevance_set",
"RELEVANCE_PATH = \"./cran/cranqrel\" INDEX_PATH = \"index.json\" EVALUATION_PATH = \"evaluation_output.txt\" # Labels in `cran.all.1400`",
"to generate one in the working directory and extra arguments will be ignored",
"= [\"manual\", \"evaluation\"], default = \"manual\", help = \"mode selection; `manual` mode is",
"= query.strip() query = query.translate(removing_punctuation_map) query = query.replace(\"--\", \" \") for term in",
"`relevance_scores`. for query_ID in query_list: rank = 1 query_results[query_ID] = [] for pair",
"at most `n` results for each query. yield query_ID, ndcg_at_n[0 : n] else:",
"\"\"\" It calculates mean average precision for all queries. \"\"\" mean_average_precision = 0.0",
"word as one word; words in `AUTHORS` # and `BIBLIOGRAPHY` section will not",
"readline # Used to create a typing history buffer for `manual` mode. #",
"word in `term_vectors`. if word not in stemming: stemming[word] = stemmer.stem(word) stemmed_word =",
"for i in range(1, len(ideal_gain_vector)): idcg.append(ideal_gain_vector[i] / math.log(i + 1, 2) + idcg[-1])",
"query_results = make_query_results() print_evaluation_results() if args.o is not None: # If `-o` option",
"element in term_split: if is_valid(element): add_new_word(element) return query_terms def process_queries(): with open(QUERY_PATH, \"r\")",
"arithmetic mean of recalls for all queries. \"\"\" recall = 0.0 for query_ID",
"Used to process documents and queries. \"\"\" if word != \"\" and word",
"NAME\", help = \"BM25 evaluation result output in lines of 3-tuples (query ID,",
"print(\"[Loading BM25 index from file.]\") with open(INDEX_PATH, \"r\") as fp: stemming, term_vectors, document_lengths",
"document_lengths.keys(): document_lengths[document] = document_lengths[document] / average_length # Now document_lengths stores a normalised length",
"retrieval_set def precision(): \"\"\" It calculates arithmetic mean of precisions for all queries.",
"results in desceding order. similarities = sorted(similarities, key = lambda x : x[1],",
"type names like \"75s-t6\" (line 28459) or \"a52b06\" (line 25717). if is_valid(element): add_new_word(element)",
"print_evaluation_results(): print(\"Evaluation Results:\") print(\"Precision: {0}\".format(precision()), end = \"\\n\") print(\"Recall: {0}\".format(recall()), end = \"\\n\")",
"= {} query = [] query_ID = 0 for line in fp: current_section",
"and word not in stop_words and not is_number(word): return True else: return False",
"= False, choices = [\"manual\", \"evaluation\"], default = \"manual\", help = \"mode selection;",
"hyphens like # \"sub- and\" (line 14632), which causes an extra empty string",
"+= 1 length = 0.0 section = current_section continue # Update and go",
"scores. else: gain_vector.append(0) # Step two: DCG (Discounted Cumulated Gain). dcg = [gain_vector[0]]",
"term_vectors[term]: frequency = (term_vectors[term])[document_ID] n_i = len(term_vectors[term]) idf = math.log((nums_of_documents - n_i +",
"become \"rm\" stored in the dictionary after stemming. compound = term.replace(\"-\", \"\") if",
"`query_terms`. if word not in stemming: stemming[word] = stemmer.stem(word) stemmed_word = stemming[word] if",
"20241) will become \"rm\" stored in the dictionary after stemming. compound = term.replace(\"-\",",
"return retrieval_set def precision(): \"\"\" It calculates arithmetic mean of precisions for all",
"query ID : [Value] [(Document ID, Relevance Score)]} with open(RELEVANCE_PATH, \"r\") as fp:",
"0.0 for query_ID in relevance_scores: relevance_set = make_relevance_set(query_ID) retrieval_set = make_retrieval_set(query_ID) appearance_times =",
"they are not consecutive. if current_section == WORDS: section = current_section continue elif",
"args = get_arguments() if os.path.exists(INDEX_PATH): print(\"[Loading BM25 index from file.]\") with open(INDEX_PATH, \"r\")",
"Warning: unlike Python, `dict` type in JSON cannot have `int` key, # therefore",
"arithmetic mean of precisions at N for all queries. \"\"\" p_at_n = 0.0",
"idcg.append(ideal_gain_vector[i] / math.log(i + 1, 2) + idcg[-1]) # Step four: NDCG (Normalised",
"the first one. return query_list def bm25_similarities(query): \"\"\" It returns a descending list",
"+= appearance_times / len(retrieval_set) precision = precision / len(query_results) return precision def recall():",
": MOST_RELEVANT] else: return similarities def manual_mode(): \"\"\" When in `manual` mode, the",
"result output in lines of 3-tuples (query ID, document ID, and its rank",
"+= 1 p_at_n += appearance_times / n p_at_n = p_at_n / len(query_results) return",
"document_lengths = json.load(fp) # Warning: unlike Python, `dict` type in JSON cannot have",
"documents for each query based on BM25 model. \"\"\" query_list = process_queries() query_results",
"Treat a compound word as one word; words in `AUTHORS` # and `BIBLIOGRAPHY`",
"{2}\\n\".format(query_ID, pair[0], pair[1])) else: # For first-time running, it creates an index JSON",
"its rank [1 - 15]) form; if `FILE NAME` is not given, the",
"entry. del query_list[0] # Skip the first one. return query_list def bm25_similarities(query): \"\"\"",
"AUTHORS = \".A\" BIBLIOGRAPHY = \".B\" WORDS = \".W\" LABELS = [ID, TITLE,",
"(line 38037) or # type names like \"75s-t6\" (line 28459) or \"a52b06\" (line",
"`term_vectors`. if word not in stemming: stemming[word] = stemmer.stem(word) stemmed_word = stemming[word] if",
"the one with similarity score 0. pair = (document_ID, similarity) similarities.append(pair) # Sort",
"in Cranfield collection, document ID begins from 001. average_length += document_lengths[document_ID] document_ID +=",
"pair in relevance_scores[query_ID]: if pair[1] <= RELEVANCE_SCORE_THRESHOLD: # We only include queries whose",
"query_ID in relevance_scores: relevance_set = make_relevance_set(query_ID) appearance_times = 0 current_map = 0.0 for",
"== \"__main__\": stemmer = porter.PorterStemmer() stop_words = load_stop_words() punctuation = string.punctuation[0 : 12]",
"args.o is not None: # If `-o` option is available. with open(args.o, \"w\")",
"print_evaluation_results() if args.o is not None: # If `-o` option is available. with",
"be ignored. # \"r.m.s.\" (line 20241) will become \"rm\" stored in the dictionary",
"= \"FILE NAME\", help = \"BM25 evaluation result output in lines of 3-tuples",
"# If `-o` option is available. with open(args.o, \"w\") as fp: for query_ID,",
"not in stop_words and not is_number(word): return True else: return False def get_arguments():",
"end = \"\\n\") for query_ID, ndcg in ndcg_at_n(N): print(\"NDCG@{0} <Query {1}>: {2}\".format(N, query_ID,",
"\"f8u3\" will be saved, but not \"3\". # Calculate the last length since",
"= [gain_vector[0]] # Put the first item in `dcg`. for i in range(1,",
"string is created # and makes term_split look like [\"sub\", \"\"]. for element",
"parser.add_argument(\"-m\", required = False, choices = [\"manual\", \"evaluation\"], default = \"manual\", help =",
"/ math.log(i + 1, 2) + dcg[-1]) # Step three: IDCG (Ideal Discounted",
"specified\") parser.add_argument(\"-o\", required = False, nargs = \"?\", const = EVALUATION_PATH, metavar =",
"operations. import math import os # Here are some Python libraries that places",
"rank = 1 query_results[query_ID] = [] for pair in bm25_similarities(query_list[query_ID]): query_results[query_ID].append((pair[0], rank)) rank",
"def load_stop_words(): stop_words = set() with open(STOP_WORDS_PATH, \"r\") as fp: for line in",
"check if a string can be converted to an integer. Used to process",
"a number used as minuend to convert original relevance scores to # NDCG-friendly",
"\"\"\" while True: print(DELIMITER_SYMBOL * BOUNDARY_LENGTH) # Print `BOUNDARY_LENGTH` `DELIMITER_SYMBOL`s to fill the",
"fp: stemming, term_vectors, document_lengths = json.load(fp) # Warning: unlike Python, `dict` type in",
"1 def load_relevance_scores(): relevance_scores = {} # `relevance_scores` structure: {[KEY] query ID :",
"appearance_times += 1 precision += appearance_times / len(retrieval_set) precision = precision / len(query_results)",
"query # Add the last entry. del query_list[0] # Skip the first one.",
"process_queries() query_results = {} # `query_results` structure: {[KEY] query ID : [Value] [(Document",
"compounds. term = term.replace(\".\", \"\") # Remove full stops in one term, used",
"calculates arithmetic mean of precisions for all queries. \"\"\" precision = 0.0 for",
"MOST_RELEVANT = 15 # At most top `MOST_RELEVANT` results are returned for each",
"Here are some Python libraries that places locally. import porter STOP_WORDS_PATH = \"stopwords.txt\"",
"query_ID in query_list: rank = 1 query_results[query_ID] = [] for pair in bm25_similarities(query_list[query_ID]):",
"for query_ID, score_list in relevance_scores.items(): relevance_set = make_relevance_set(query_ID) score_list_dict = dict(score_list) # Convert",
"0.0: # Ignore the one with similarity score 0. pair = (document_ID, similarity)",
"Sort results in desceding order. similarities = sorted(similarities, key = lambda x :",
"relevance_set = make_relevance_set(query_ID) appearance_times = 0 current_map = 0.0 for pair in query_results[query_ID]:",
"not be consecutive. num_of_documents += 1 length = 0.0 section = current_section continue",
"= get_arguments() if os.path.exists(INDEX_PATH): print(\"[Loading BM25 index from file.]\") with open(INDEX_PATH, \"r\") as",
"begins from 1. similarity = 0.0 for term in query: if term in",
"\"mit\" / \"usa\". # In the meantime, something like \"..e.g.at\" (line 17393), #",
"0.0 for line in fp: current_section = line[0 : 2] if current_section in",
"which is the numbers followed # by \".I\", since they are not consecutive.",
"type in JSON cannot have `int` key, # therefore a conversion is of",
"Labels in `cran.all.1400` and `cranqrel` text files. ID = \".I\" TITLE = \".T\"",
"top `MOST_RELEVANT` pairs (Document ID, Similarity) based on BM25 to calculate similarities. \"\"\"",
"JSON cannot have `int` key, # therefore a conversion is of necessity. document_lengths",
"# Now document_lengths stores a normalised length for each document. return stemming, term_vectors,",
"[(Document ID, Relevance Score)]}, which is exactly the same structure and length as",
"stemmed_word = stemming[word] if stemmed_word not in term_vectors: term_vectors[stemmed_word] = {} if document_ID",
"in relevance_set: if document_ID in retrieval_set: appearance_times += 1 recall += appearance_times /",
"calculate lengths of each documents. Also a dictionary containing pairs of original words",
"query_results = {} # `query_results` structure: {[KEY] query ID : [Value] [(Document ID,",
"used in BM25 model. K = 1.0 B = 0.75 # A constant",
"rank += 1 def load_relevance_scores(): relevance_scores = {} # `relevance_scores` structure: {[KEY] query",
"each query. else: relevance_scores[query_ID] = [pair] for query_ID in relevance_scores: # Sort pairs",
"appearance_times = 0 for document_ID in relevance_set: if document_ID in retrieval_set: appearance_times +=",
"= line[0 : 2] if current_section in LABELS: if current_section == ID: document_lengths[document_ID]",
"\".I\" TITLE = \".T\" AUTHORS = \".A\" BIBLIOGRAPHY = \".B\" WORDS = \".W\"",
"and pair[1] <= n: appearance_times += 1 p_at_n += appearance_times / n p_at_n",
"same as P at N for Cranfield collection. # N.B.: `N` cannot be",
"file name is `evaluation_output.txt`\") return parser.parse_args() def load_stop_words(): stop_words = set() with open(STOP_WORDS_PATH,",
"equal to `N`, precision will be the same as P at N for",
"for example, for \"f8u-3\" (line 35373), # both \"f8u\" and \"f8u3\" will be",
"\"\\n\") print(\"Mean Average Precision: {0}\".format(mean_average_precision()), end = \"\\n\") for query_ID, ndcg in ndcg_at_n(N):",
"\"./cran/cranqrel\" INDEX_PATH = \"index.json\" EVALUATION_PATH = \"evaluation_output.txt\" # Labels in `cran.all.1400` and `cranqrel`",
"with open(INDEX_PATH, \"r\") as fp: stemming, term_vectors, document_lengths = json.load(fp) # Warning: unlike",
"p_at_n(n): \"\"\" It calculates arithmetic mean of precisions at N for all queries.",
"= [] for pair in score_list: ideal_gain_vector.append(RELEVANCE_SCORE_FIX - score_list_dict[pair[0]]) idcg = [ideal_gain_vector[0]] for",
"[ID, TITLE, AUTHORS, BIBLIOGRAPHY, WORDS] CONTENTS = [AUTHORS, BIBLIOGRAPHY, WORDS] DELIMITER_SYMBOL = \"*\"",
"# Used to parse program arguments. # More details are here: https://docs.python.org/3/library/argparse.html import",
"Used by `process_queries` function and `manual` mode. \"\"\" def add_new_word(word): # A helper",
"created: 2018-05-07 # Here are some Python standard modules used in the script.",
"have ending symbols. document_lengths[document_ID] = math.sqrt(length) # Skip the document with index 0",
"return True else: return False def get_arguments(): parser = argparse.ArgumentParser(description = \"A script",
"scores are less than or equal # to `RELEVANCE_SCORE_THRESHOLD` here. relevance_set.add(pair[0]) return relevance_set",
"list of pairs to dictionary for convienence. # Step one: gain vector. gain_vector",
"\"\"\" It calculates arithmetic mean of precisions for all queries. \"\"\" precision =",
"\") for term in query.split(): term = term.replace(\".\", \"\").lower() compound = term.replace(\"-\", \"\")",
"gain_vector = [] for pair in query_results[query_ID]: if pair[0] in relevance_set: gain_vector.append(RELEVANCE_SCORE_FIX -",
"set() for pair in relevance_scores[query_ID]: if pair[1] <= RELEVANCE_SCORE_THRESHOLD: # We only include",
"def process_queries(): with open(QUERY_PATH, \"r\") as fp: query_list = {} query = []",
"= query query = [] query_ID += 1 # Ignore original query IDs,",
"0. pair = (document_ID, similarity) similarities.append(pair) # Sort results in desceding order. similarities",
"words in `AUTHORS` # and `BIBLIOGRAPHY` section will not be counted. term_split =",
"end = \"\\n\") print(\"P@{0}: {1}\".format(N, p_at_n(N)), end = \"\\n\") print(\"Mean Average Precision: {0}\".format(mean_average_precision()),",
"the last entry. del query_list[0] # Skip the first one. return query_list def",
"like \"m.i.t.\" (line 1222) / \"u.s.a.\" (line 32542) into \"mit\" / \"usa\". #",
"document in document_lengths.keys(): document_lengths[document] = document_lengths[document] / average_length # Now document_lengths stores a",
"{} if document_ID in term_vectors[stemmed_word]: (term_vectors[stemmed_word])[document_ID] += 1 else: term_vectors[stemmed_word].update({document_ID : 1}) stemming",
"https://docs.python.org/3/library/argparse.html import readline # Used to create a typing history buffer for `manual`",
"+= 1 return query_results def make_relevance_set(query_ID): # Relevant documents (Rel). relevance_set = set()",
"equal # to `RELEVANCE_SCORE_THRESHOLD` here. relevance_set.add(pair[0]) return relevance_set def make_retrieval_set(query_ID): # Retrieval documents",
"# Put the first item in `dcg`. for i in range(1, len(gain_vector)): dcg.append(gain_vector[i]",
"one item in `term_split`, which means there is no hyphen in this word.",
"fp: stop_words.add(line.rstrip()) return stop_words def process_documents(): \"\"\" Build vectors of each term and",
"* document_lengths[document_ID])) * idf if similarity > 0.0: # Ignore the one with",
"something like \"..e.g.at\" (line 17393), # \"i.e.it\" (line 17287), \"trans.amer.math.soc.33\" (line 31509), #",
"\"\"\" try: int(word) return True except ValueError: return False def is_valid(word): \"\"\" A",
"float-point numbers like \"3.2x10\" (line 18799), \"79.5degree\" # (line 20026) will be converted",
"If `-o` option is available. with open(args.o, \"w\") as fp: for query_ID, pair_list",
"len(ndcg_at_n) > n: # And finally, yield at most `n` results for each",
"recall / len(query_results) return recall def p_at_n(n): \"\"\" It calculates arithmetic mean of",
"n] else: yield query_ID, ndcg_at_n def print_evaluation_results(): print(\"Evaluation Results:\") print(\"Precision: {0}\".format(precision()), end =",
"is_valid(element): add_new_word(element) # Filter out all pure integers; for example, for \"f8u-3\" (line",
"\"\"\" Build vectors of each term and calculate lengths of each documents. Also",
"to check if a string can be converted to an integer. Used to",
"> 0.0: # Ignore the one with similarity score 0. pair = (document_ID,",
"and start a new one. # The empty entry for document 0 is",
"\" + str(query_terms)) print(\"Rank\\tID\\tScore\") rank = 1 for result in bm25_similarities(query_terms): print(\"{0}\\t{1}\\t{2}\".format(str(rank), result[0],",
"= \"./cran/cranqrel\" INDEX_PATH = \"index.json\" EVALUATION_PATH = \"evaluation_output.txt\" # Labels in `cran.all.1400` and",
"hyphen in this word. # There may exist a term with an ending",
"at N for all queries. \"\"\" p_at_n = 0.0 for query_ID in relevance_scores:",
"def manual_mode(): \"\"\" When in `manual` mode, the function will not end until",
"for query_ID in relevance_scores: relevance_set = make_relevance_set(query_ID) appearance_times = 0 current_map = 0.0",
"relevance_scores = load_relevance_scores() query_results = make_query_results() print_evaluation_results() if args.o is not None: #",
"if stemmed_word not in term_vectors: term_vectors[stemmed_word] = {} if document_ID in term_vectors[stemmed_word]: (term_vectors[stemmed_word])[document_ID]",
"query_results[query_ID]: retrieval_set.add(pair[0]) return retrieval_set def precision(): \"\"\" It calculates arithmetic mean of precisions",
"`dict` type in JSON cannot have `int` key, # therefore a conversion is",
"from file.]\") with open(INDEX_PATH, \"r\") as fp: stemming, term_vectors, document_lengths = json.load(fp) #",
"calculates mean average precision for all queries. \"\"\" mean_average_precision = 0.0 for query_ID",
"with each part of compound words like \"two-step\" (line 38037) or # type",
"like \"75s-t6\" (line 28459) or \"a52b06\" (line 25717). if is_valid(element): add_new_word(element) # Filter",
"punctuation = string.punctuation[0 : 12] + string.punctuation[14:] removing_punctuation_map = dict((ord(character), \" \") for",
"length for ID, length in document_lengths.items()} for term, vector in term_vectors.items(): term_vectors[term] =",
"for ID, length in document_lengths.items()} for term, vector in term_vectors.items(): term_vectors[term] = {int(ID)",
"methods (precision, recall, MAP, P at N and # NDCG at N) are",
"query_ID in relevance_scores: relevance_set = make_relevance_set(query_ID) retrieval_set = make_retrieval_set(query_ID) appearance_times = 0 for",
"0.0 for term in query: if term in term_vectors and document_ID in term_vectors[term]:",
"handled as expected. # All float-point numbers like \"3.2x10\" (line 18799), \"79.5degree\" #",
"== \"evaluation\": relevance_scores = load_relevance_scores() query_results = make_query_results() print_evaluation_results() if args.o is not",
"methods. If the index JSON file is not available, just type `python3 bm25.py`",
"name is `evaluation_output.txt`\") return parser.parse_args() def load_stop_words(): stop_words = set() with open(STOP_WORDS_PATH, \"r\")",
"= \"\\n\") rank += 1 def load_relevance_scores(): relevance_scores = {} # `relevance_scores` structure:",
"Step two: DCG (Discounted Cumulated Gain). dcg = [gain_vector[0]] # Put the first",
"ID begins from 001. average_length += document_lengths[document_ID] document_ID += 1 # Ignore original",
"in relevance_set: appearance_times += 1 precision += appearance_times / len(retrieval_set) precision = precision",
"len(term_split) > 1: for element in term_split: if is_valid(element): add_new_word(element) return query_terms def",
"reserved. RELEVANCE_SCORE_FIX = 5 # It is a number used as minuend to",
"consecutive. num_of_documents += 1 length = 0.0 section = current_section continue # Update",
"first-time running, it creates an index JSON file and exit. print(\"[Generating the index",
"{[KEY] query ID : [Value] [(Document ID, Relevance Score)]} with open(RELEVANCE_PATH, \"r\") as",
"It calculates mean average precision for all queries. \"\"\" mean_average_precision = 0.0 for",
"LABELS: if current_section == ID: query_list[query_ID] = query query = [] query_ID +=",
"# Also, treat two consecutive hyphens as a space. for term in line.split():",
"collection does not have ending symbols. document_lengths[document_ID] = math.sqrt(length) # Skip the document",
"{[Key] Term : [Value] {[Key] Document ID : [Value] Appearance Times}}. document_lengths =",
"All float-point numbers like \"3.2x10\" (line 18799), \"79.5degree\" # (line 20026) will be",
"in the working directory and extra arguments will be ignored in this case\")",
"# from `QUERY_PATH`. The default value is 4 (-1, 1, 2, 3, 4),",
"as fp: document_ID = 0 length = 0.0 for line in fp: current_section",
"each document. return stemming, term_vectors, document_lengths def process_single_query(query): \"\"\" Process single line text.",
"stop_words.add(line.rstrip()) return stop_words def process_documents(): \"\"\" Build vectors of each term and calculate",
"5 # It is a number used as minuend to convert original relevance",
"for element in term_split: # Deal with each part of compound words like",
"0.0 for query_ID in relevance_scores: relevance_set = make_relevance_set(query_ID) appearance_times = 0 for pair",
"and not is_number(word): return True else: return False def get_arguments(): parser = argparse.ArgumentParser(description",
"relevance scores are less than or equal # to `RELEVANCE_SCORE_THRESHOLD` here. relevance_set.add(pair[0]) return",
"reverse = True) if len(similarities) > MOST_RELEVANT: return similarities[0 : MOST_RELEVANT] else: return",
"ends; it is case-sensitive. RELEVANCE_SCORE_THRESHOLD = 4 # Filter out ones with relevance",
"Step one: gain vector. gain_vector = [] for pair in query_results[query_ID]: if pair[0]",
"1 # Ignore original document IDs, which is the numbers followed by \".I\",",
"True: print(DELIMITER_SYMBOL * BOUNDARY_LENGTH) # Print `BOUNDARY_LENGTH` `DELIMITER_SYMBOL`s to fill the default #",
"a normalised length for each document. return stemming, term_vectors, document_lengths def process_single_query(query): \"\"\"",
"original ranking scores to NDCG-usable scores. else: gain_vector.append(0) # Step two: DCG (Discounted",
"(line 36527) and # \"i. e.\" (line 11820) will be ignored. # \"r.m.s.\"",
"in relevance_scores: relevance_set = make_relevance_set(query_ID) appearance_times = 0 current_map = 0.0 for pair",
"\".I\", # since they may not be consecutive. num_of_documents += 1 length =",
"1: # If only one item in `term_split`, which means there is no",
"query_list: rank = 1 query_results[query_ID] = [] for pair in bm25_similarities(query_list[query_ID]): query_results[query_ID].append((pair[0], rank))",
"is also created although # in Cranfield collection, document ID begins from 001.",
"results for each query. yield query_ID, ndcg_at_n[0 : n] else: yield query_ID, ndcg_at_n",
"frequency = (term_vectors[term])[document_ID] n_i = len(term_vectors[term]) idf = math.log((nums_of_documents - n_i + 0.5)",
"of each query separately. \"\"\" for query_ID, score_list in relevance_scores.items(): relevance_set = make_relevance_set(query_ID)",
"the numbers followed by \".I\", # since they may not be consecutive. num_of_documents",
"/ len(query_results) return p_at_n def mean_average_precision(): \"\"\" It calculates mean average precision for",
"= (document_lengths[document_ID] + average_length) / num_of_documents for document in document_lengths.keys(): document_lengths[document] = document_lengths[document]",
"each query; the less the relevance # score is, the more relevant the",
"x[1], reverse = True) if len(similarities) > MOST_RELEVANT: return similarities[0 : MOST_RELEVANT] else:",
"return similarities[0 : MOST_RELEVANT] else: return similarities def manual_mode(): \"\"\" When in `manual`",
"Ignore original document IDs, which is the numbers followed by \".I\", # since",
"available, just type `python3 bm25.py` to generate one in the working directory and",
"# It is a number used as minuend to convert original relevance scores",
"# If only one item in `term_split`, which means there is no hyphen",
"sorted(relevance_scores[query_ID], key = lambda x : x[1]) return relevance_scores def make_query_results(): \"\"\" It",
": 2] if current_section in LABELS: if current_section == ID: document_lengths[document_ID] = math.sqrt(length)",
"document_ID in term_vectors[stemmed_word]: (term_vectors[stemmed_word])[document_ID] += 1 else: term_vectors[stemmed_word].update({document_ID : 1}) stemming = {}",
"# NDCG-friendly ones. # Constants used in BM25 model. K = 1.0 B",
"p_at_n def mean_average_precision(): \"\"\" It calculates mean average precision for all queries. \"\"\"",
"[]: query = process_single_query(line) else: query += process_single_query(line) query_list[query_ID] = query # Add",
"characters and deal with two special cases: # abbreviations with \".\" and hyphenated",
"in bm25_similarities(query_terms): print(\"{0}\\t{1}\\t{2}\".format(str(rank), result[0], str(result[1])), end = \"\\n\") rank += 1 def load_relevance_scores():",
"def is_valid(word): \"\"\" A helper function to check if a string is valid.",
"they may not be consecutive. num_of_documents += 1 length = 0.0 section =",
"for all queries. \"\"\" p_at_n = 0.0 for query_ID in relevance_scores: relevance_set =",
"at N. ndcg_at_n = [] for pair in zip(dcg, idcg): ndcg_at_n.append(pair[0] / pair[1])",
"lambda x : x[1]) return relevance_scores def make_query_results(): \"\"\" It returns possible relevant",
"query_results.items(): for pair in pair_list: fp.write(\"{0} {1} {2}\\n\".format(query_ID, pair[0], pair[1])) else: # For",
"files. ID = \".I\" TITLE = \".T\" AUTHORS = \".A\" BIBLIOGRAPHY = \".B\"",
"add_new_word(compound) if section == WORDS: length += 1.0 # Treat a compound word",
"section in CONTENTS: if query == []: query = process_single_query(line) else: query +=",
"bm25_similarities(query_list[query_ID]): query_results[query_ID].append((pair[0], rank)) rank += 1 return query_results def make_relevance_set(query_ID): # Relevant documents",
"(line 20241) will become \"rm\" stored in the dictionary after stemming. compound =",
"`cranqrel` text files. ID = \".I\" TITLE = \".T\" AUTHORS = \".A\" BIBLIOGRAPHY",
"add a new word in `term_vectors`. if word not in stemming: stemming[word] =",
"> MOST_RELEVANT: return similarities[0 : MOST_RELEVANT] else: return similarities def manual_mode(): \"\"\" When",
"math.sqrt(length) # Skip the document with index 0 from document length vector. del",
"BOUNDARY_LENGTH) # Print `BOUNDARY_LENGTH` `DELIMITER_SYMBOL`s to fill the default # width of terminal",
"JSON file is not available, just type `python3 bm25.py` to generate one in",
"document_lengths = {} average_length = 0.0 num_of_documents = 0 with open(DOCUMENT_PATH, \"r\") as",
"and \"f8u3\" will be saved, but not \"3\". # Calculate the last length",
"selection; `manual` mode is chosen by default if it is not specified\") parser.add_argument(\"-o\",",
"e.\" (line 11820) will be ignored. # \"r.m.s.\" (line 20241) will become \"rm\"",
"def ndcg_at_n(n): \"\"\" It yields a list of NDCGs at up to N",
"for document_ID in retrieval_set: if document_ID in relevance_set: appearance_times += 1 precision +=",
"ID: document_lengths[document_ID] = math.sqrt(length) # Calculate the previous document length and start a",
"process_queries(): with open(QUERY_PATH, \"r\") as fp: query_list = {} query = [] query_ID",
"A helper function to add a new word in `query_terms`. if word not",
"term in term_vectors and document_ID in term_vectors[term]: frequency = (term_vectors[term])[document_ID] n_i = len(term_vectors[term])",
"[Value] [(Document ID, Relevance Score)]} with open(RELEVANCE_PATH, \"r\") as fp: for line in",
"\"\"\" A helper function to check if a string is valid. Used to",
"in vector.items()} nums_of_documents = len(document_lengths) # It is used in `bm25_similarities()` function. if",
"# `relevance_scores` structure: {[KEY] query ID : [Value] [(Document ID, Relevance Score)]} with",
"+= 1 def load_relevance_scores(): relevance_scores = {} # `relevance_scores` structure: {[KEY] query ID",
"def recall(): \"\"\" It calculates arithmetic mean of recalls for all queries. \"\"\"",
"ID, Similarity) based on BM25 to calculate similarities. \"\"\" similarities = [] for",
"# Relevant documents (Rel). relevance_set = set() for pair in relevance_scores[query_ID]: if pair[1]",
"const = EVALUATION_PATH, metavar = \"FILE NAME\", help = \"BM25 evaluation result output",
"than `MOST_RELEVANT`. N = 10 def is_number(word): \"\"\" A helper function to check",
"and `BIBLIOGRAPHY` section will not be counted. term_split = term.split(\"-\") if len(term_split) >",
"= \"evaluation_output.txt\" # Labels in `cran.all.1400` and `cranqrel` text files. ID = \".I\"",
"+= current_map / len(relevance_set) mean_average_precision = mean_average_precision / len(query_results) return mean_average_precision def ndcg_at_n(n):",
"of precisions at N for all queries. \"\"\" p_at_n = 0.0 for query_ID",
"BM25 index from file.]\") with open(INDEX_PATH, \"r\") as fp: stemming, term_vectors, document_lengths =",
"len(document_lengths) # It is used in `bm25_similarities()` function. if args.m == \"manual\": manual_mode()",
"in query_results[query_ID]: if pair[0] in relevance_set: appearance_times += 1 current_map += appearance_times /",
"orginial Cranfield collection and # implement the BM25 alogrithm information retrieval; # also",
"4 # Filter out ones with relevance score larger than `RELEVANCE_SCORE_THRESHOLD` # from",
"used to convert abbreviations # like \"m.i.t.\" (line 1222) / \"u.s.a.\" (line 32542)",
"str(result[1])), end = \"\\n\") rank += 1 def load_relevance_scores(): relevance_scores = {} #",
"like \"3.2x10\" (line 18799), \"79.5degree\" # (line 20026) will be converted into integers",
"each query separately. \"\"\" for query_ID, score_list in relevance_scores.items(): relevance_set = make_relevance_set(query_ID) score_list_dict",
"function. if args.m == \"manual\": manual_mode() elif args.m == \"evaluation\": relevance_scores = load_relevance_scores()",
"`manual` mode is chosen by default if it is not specified\") parser.add_argument(\"-o\", required",
"\"\"\" similarities = [] for document_ID in range(1, nums_of_documents + 1): # Document",
"def process_single_query(query): \"\"\" Process single line text. Used by `process_queries` function and `manual`",
"+ 0.5), 2) similarity += frequency * (1.0 + K) / (frequency +",
"query. else: relevance_scores[query_ID] = [pair] for query_ID in relevance_scores: # Sort pairs in",
"structure: {[KEY] query ID : [Value] [(Document ID, Relevance Score)]}, which is exactly",
"not in stemming: stemming[word] = stemmer.stem(word) stemmed_word = stemming[word] if stemmed_word not in",
"stemmer.stem(word) stemmed_word = stemming[word] if stemmed_word not in query_terms: query_terms.append(stemmed_word) query_terms = []",
"(line 35373), # both \"f8u\" and \"f8u3\" will be saved, but not \"3\".",
"# to `RELEVANCE_SCORE_THRESHOLD` here. relevance_set.add(pair[0]) return relevance_set def make_retrieval_set(query_ID): # Retrieval documents (Ret).",
"document_ID in relevance_set: appearance_times += 1 precision += appearance_times / len(retrieval_set) precision =",
"is valid. Used to process documents and queries. \"\"\" if word != \"\"",
"or \"a52b06\" (line 25717). if is_valid(element): add_new_word(element) # Filter out all pure integers;",
"def make_query_results(): \"\"\" It returns possible relevant documents for each query based on",
"line.split() query_ID = int(fields[0]) pair = (int(fields[1]), int(fields[2])) if query_ID in relevance_scores: relevance_scores[query_ID].append(pair)",
"appearance_times += 1 recall += appearance_times / len(relevance_set) recall = recall / len(query_results)",
"while True: print(DELIMITER_SYMBOL * BOUNDARY_LENGTH) # Print `BOUNDARY_LENGTH` `DELIMITER_SYMBOL`s to fill the default",
"nums_of_documents + 1): # Document ID begins from 1. similarity = 0.0 for",
"pair[1] <= n: appearance_times += 1 p_at_n += appearance_times / n p_at_n =",
"= set() with open(STOP_WORDS_PATH, \"r\") as fp: for line in fp: stop_words.add(line.rstrip()) return",
"n_i = len(term_vectors[term]) idf = math.log((nums_of_documents - n_i + 0.5) / (n_i +",
"https://docs.python.org/3/library/readline.html import json # Used to create a human-readable JSON file for index",
"will be the same as P at N for Cranfield collection. # N.B.:",
"mode is chosen by default if it is not specified\") parser.add_argument(\"-o\", required =",
"in stemming: stemming[word] = stemmer.stem(word) stemmed_word = stemming[word] if stemmed_word not in term_vectors:",
"1 recall += appearance_times / len(relevance_set) recall = recall / len(query_results) return recall",
"= set() for pair in relevance_scores[query_ID]: if pair[1] <= RELEVANCE_SCORE_THRESHOLD: # We only",
"Ubuntu 16.04. # Author: '(<NAME>.) # Date created: 2018-05-07 # Here are some",
"`term_split`, which means there is no hyphen in this word. # There may",
"one in the working directory and extra arguments will be ignored in this",
"\"\"\" It calculates arithmetic mean of precisions at N for all queries. \"\"\"",
": [Value] {[Key] Document ID : [Value] Appearance Times}}. document_lengths = {} average_length",
"in document_lengths.items()} for term, vector in term_vectors.items(): term_vectors[term] = {int(ID) : appearance_times for",
"gain_vector.append(0) # Step two: DCG (Discounted Cumulated Gain). dcg = [gain_vector[0]] # Put",
"convert original relevance scores to # NDCG-friendly ones. # Constants used in BM25",
"here: https://docs.python.org/3/library/readline.html import json # Used to create a human-readable JSON file for",
"two consecutive hyphens as a space. for term in line.split(): # Split according",
"= \"QUIT\" # When user types `USER_STOP_WORD`, the program ends; it is case-sensitive.",
"# When user types `USER_STOP_WORD`, the program ends; it is case-sensitive. RELEVANCE_SCORE_THRESHOLD =",
"= 0 for line in fp: current_section = line[0 : 2] if current_section",
"\"f8u-3\" (line 35373), # both \"f8u\" and \"f8u3\" will be saved, but not",
"[AUTHORS, BIBLIOGRAPHY, WORDS] DELIMITER_SYMBOL = \"*\" BOUNDARY_LENGTH = 80 # It decides the",
"for character in punctuation) # Remove all punctuations except full stops and hyphens.",
"# Date created: 2018-05-07 # Here are some Python standard modules used in",
"exist a term with an ending hyphens like # \"sub- and\" (line 14632),",
"returned. \"\"\" def add_new_word(word): # A helper function to add a new word",
"query_list = process_queries() query_results = {} # `query_results` structure: {[KEY] query ID :",
"if query == []: query = process_single_query(line) else: query += process_single_query(line) query_list[query_ID] =",
"unlike Python, `dict` type in JSON cannot have `int` key, # therefore a",
"= {} if document_ID in term_vectors[stemmed_word]: (term_vectors[stemmed_word])[document_ID] += 1 else: term_vectors[stemmed_word].update({document_ID : 1})",
"similarities[0 : MOST_RELEVANT] else: return similarities def manual_mode(): \"\"\" When in `manual` mode,",
"pair in query_results[query_ID]: if pair[0] in relevance_set: appearance_times += 1 current_map += appearance_times",
"result in bm25_similarities(query_terms): print(\"{0}\\t{1}\\t{2}\".format(str(rank), result[0], str(result[1])), end = \"\\n\") rank += 1 def",
"out ones with relevance score larger than `RELEVANCE_SCORE_THRESHOLD` # from `QUERY_PATH`. The default",
"query_results def make_relevance_set(query_ID): # Relevant documents (Rel). relevance_set = set() for pair in",
"to process documents and queries. \"\"\" try: int(word) return True except ValueError: return",
"\".I\", since they are not consecutive. if current_section == WORDS: section = current_section",
"N for Cranfield collection. # N.B.: `N` cannot be larger than `MOST_RELEVANT`. N",
"since they may not be consecutive. num_of_documents += 1 length = 0.0 section",
"/ (n_i + 0.5), 2) similarity += frequency * (1.0 + K) /",
"documents (Ret). retrieval_set = set() for pair in query_results[query_ID]: retrieval_set.add(pair[0]) return retrieval_set def",
"1, 2) + dcg[-1]) # Step three: IDCG (Ideal Discounted Cumulated Gain). ideal_gain_vector",
"query_ID = int(fields[0]) pair = (int(fields[1]), int(fields[2])) if query_ID in relevance_scores: relevance_scores[query_ID].append(pair) #",
"(line 25717). if is_valid(element): add_new_word(element) # Filter out all pure integers; for example,",
"be the same as P at N for Cranfield collection. # N.B.: `N`",
"will be saved, but not \"3\". # Calculate the last length since Cranfield",
"+= 1 recall += appearance_times / len(relevance_set) recall = recall / len(query_results) return",
"= lambda x : x[1]) return relevance_scores def make_query_results(): \"\"\" It returns possible",
"from document length vector. del document_lengths[0] average_length = (document_lengths[document_ID] + average_length) / num_of_documents",
"\"\").lower() compound = term.replace(\"-\", \"\") if is_valid(compound): add_new_word(compound) term_split = term.split(\"-\") if len(term_split)",
"DOCUMENT_PATH = \"./cran/cran.all.1400\" QUERY_PATH = \"./cran/cran.qry\" RELEVANCE_PATH = \"./cran/cranqrel\" INDEX_PATH = \"index.json\" EVALUATION_PATH",
"{1} {2}\\n\".format(query_ID, pair[0], pair[1])) else: # For first-time running, it creates an index",
"like \"..e.g.at\" (line 17393), # \"i.e.it\" (line 17287), \"trans.amer.math.soc.33\" (line 31509), # or",
"pairs to dictionary for convienence. # Step one: gain vector. gain_vector = []",
"pair_list: fp.write(\"{0} {1} {2}\\n\".format(query_ID, pair[0], pair[1])) else: # For first-time running, it creates",
"* (1.0 + K) / (frequency + K * ((1.0 - B) +",
"B * document_lengths[document_ID])) * idf if similarity > 0.0: # Ignore the one",
"entry for document 0 is also created although # in Cranfield collection, document",
"math.sqrt(length) # Calculate the previous document length and start a new one. #",
"== ID: document_lengths[document_ID] = math.sqrt(length) # Calculate the previous document length and start",
"ID, Relevance Score)]} with open(RELEVANCE_PATH, \"r\") as fp: for line in fp: fields",
"\"two-step\" (line 38037) or # type names like \"75s-t6\" (line 28459) or \"a52b06\"",
"for all queries. \"\"\" precision = 0.0 for query_ID in relevance_scores: relevance_set =",
"+= 1 precision += appearance_times / len(retrieval_set) precision = precision / len(query_results) return",
"N = 10 def is_number(word): \"\"\" A helper function to check if a",
"= 80 # It decides the length of the boundary between two `manual`",
"= 0 current_map = 0.0 for pair in query_results[query_ID]: if pair[0] in relevance_set:",
"return query_list def bm25_similarities(query): \"\"\" It returns a descending list with at most",
"document_ID in retrieval_set: if document_ID in relevance_set: appearance_times += 1 precision += appearance_times",
"# Ignore original query IDs, which is the numbers followed # by \".I\",",
"Remove full stops in one term, used to convert abbreviations # like \"m.i.t.\"",
"term_split: if is_valid(element): add_new_word(element) return query_terms def process_queries(): with open(QUERY_PATH, \"r\") as fp:",
"= make_relevance_set(query_ID) score_list_dict = dict(score_list) # Convert a list of pairs to dictionary",
"fields = line.split() query_ID = int(fields[0]) pair = (int(fields[1]), int(fields[2])) if query_ID in",
"of the boundary between two `manual` queries. MOST_RELEVANT = 15 # At most",
"\" \") for character in punctuation) # Remove all punctuations except full stops",
"current_map += appearance_times / pair[1] mean_average_precision += current_map / len(relevance_set) mean_average_precision = mean_average_precision",
"section = current_section continue elif section in CONTENTS: if query == []: query",
"look like [\"sub\", \"\"]. for element in term_split: # Deal with each part",
"Relevance Score)]} with open(RELEVANCE_PATH, \"r\") as fp: for line in fp: fields =",
"check if a string is valid. Used to process documents and queries. \"\"\"",
"to add a new word in `query_terms`. if word not in stemming: stemming[word]",
"`dcg`. for i in range(1, len(gain_vector)): dcg.append(gain_vector[i] / math.log(i + 1, 2) +",
"query_results[query_ID]: if pair[0] in relevance_set and pair[1] <= n: appearance_times += 1 p_at_n",
"0.0 section = current_section continue # Update and go to next line immediately.",
"Document ID : [Value] Appearance Times}}. document_lengths = {} average_length = 0.0 num_of_documents",
"end until user types \"QUIT\". \"\"\" while True: print(DELIMITER_SYMBOL * BOUNDARY_LENGTH) # Print",
"/ len(relevance_set) recall = recall / len(query_results) return recall def p_at_n(n): \"\"\" It",
"Python standard modules used in the script. import argparse # Used to parse",
"BIBLIOGRAPHY = \".B\" WORDS = \".W\" LABELS = [ID, TITLE, AUTHORS, BIBLIOGRAPHY, WORDS]",
"for Cranfield collection. # N.B.: `N` cannot be larger than `MOST_RELEVANT`. N =",
"and its rank [1 - 15]) form; if `FILE NAME` is not given,",
"= 4 # Filter out ones with relevance score larger than `RELEVANCE_SCORE_THRESHOLD` #",
"case\") parser.add_argument(\"-m\", required = False, choices = [\"manual\", \"evaluation\"], default = \"manual\", help",
"causes an extra empty string is created # and makes term_split look like",
"process_documents(): \"\"\" Build vectors of each term and calculate lengths of each documents.",
"# Split according to whitespace characters and deal with two special cases: #",
"relevance_scores def make_query_results(): \"\"\" It returns possible relevant documents for each query based",
"= \"./cran/cran.qry\" RELEVANCE_PATH = \"./cran/cranqrel\" INDEX_PATH = \"index.json\" EVALUATION_PATH = \"evaluation_output.txt\" # Labels",
"form; if `FILE NAME` is not given, the default output file name is",
"in range(1, len(ideal_gain_vector)): idcg.append(ideal_gain_vector[i] / math.log(i + 1, 2) + idcg[-1]) # Step",
"documents and queries. \"\"\" try: int(word) return True except ValueError: return False def",
"stemmed words are returned. \"\"\" def add_new_word(word): # A helper function to add",
"query == []: query = process_single_query(line) else: query += process_single_query(line) query_list[query_ID] = query",
"1}) stemming = {} term_vectors = {} # `term_vectors` structure: {[Key] Term :",
"= line.replace(\"--\", \" \") # Also, treat two consecutive hyphens as a space.",
"precisions at N for all queries. \"\"\" p_at_n = 0.0 for query_ID in",
"# A helper function to add a new word in `query_terms`. if word",
"finally, yield at most `n` results for each query. yield query_ID, ndcg_at_n[0 :",
"in relevance_scores: relevance_set = make_relevance_set(query_ID) appearance_times = 0 for pair in query_results[query_ID]: if",
"/ num_of_documents for document in document_lengths.keys(): document_lengths[document] = document_lengths[document] / average_length # Now",
": appearance_times for ID, appearance_times in vector.items()} nums_of_documents = len(document_lengths) # It is",
"= \"manual\", help = \"mode selection; `manual` mode is chosen by default if",
"\"w\") as fp: for query_ID, pair_list in query_results.items(): for pair in pair_list: fp.write(\"{0}",
"len(query_results) return precision def recall(): \"\"\" It calculates arithmetic mean of recalls for",
"WORDS: section = current_section continue elif section in CONTENTS: if query == []:",
"is, the more relevant the document is. relevance_scores[query_ID] = sorted(relevance_scores[query_ID], key = lambda",
"score_list in relevance_scores.items(): relevance_set = make_relevance_set(query_ID) score_list_dict = dict(score_list) # Convert a list",
"all pure integers; for example, for \"f8u-3\" (line 35373), # both \"f8u\" and",
"20026) will be converted into integers by just removing dots. # And similarly,",
"It is used in `bm25_similarities()` function. if args.m == \"manual\": manual_mode() elif args.m",
"[\"sub\", \"\"]. for element in term_split: # Deal with each part of compound",
"== WORDS: section = current_section continue elif section in CONTENTS: if query ==",
"each query based on BM25 model. \"\"\" query_list = process_queries() query_results = {}",
"to N of each query separately. \"\"\" for query_ID, score_list in relevance_scores.items(): relevance_set",
"in the dictionary after stemming. compound = term.replace(\"-\", \"\") if is_valid(compound): add_new_word(compound) if",
"in pair_list: fp.write(\"{0} {1} {2}\\n\".format(query_ID, pair[0], pair[1])) else: # For first-time running, it",
"\"i. e.\" (line 11820) will be ignored. # \"r.m.s.\" (line 20241) will become",
"to create a typing history buffer for `manual` mode. # More details are",
"buffer for `manual` mode. # More details are here: https://docs.python.org/3/library/readline.html import json #",
"/ pair[1]) if len(ndcg_at_n) > n: # And finally, yield at most `n`",
"in fp: stop_words.add(line.rstrip()) return stop_words def process_documents(): \"\"\" Build vectors of each term",
"dictionary after stemming. compound = term.replace(\"-\", \"\") if is_valid(compound): add_new_word(compound) if section ==",
"names like \"75s-t6\" (line 28459) or \"a52b06\" (line 25717). if is_valid(element): add_new_word(element) #",
"hyphens. args = get_arguments() if os.path.exists(INDEX_PATH): print(\"[Loading BM25 index from file.]\") with open(INDEX_PATH,",
"= {} # `term_vectors` structure: {[Key] Term : [Value] {[Key] Document ID :",
"if __name__ == \"__main__\": stemmer = porter.PorterStemmer() stop_words = load_stop_words() punctuation = string.punctuation[0",
"for pair in query_results[query_ID]: if pair[0] in relevance_set and pair[1] <= n: appearance_times",
"= dict(score_list) # Convert a list of pairs to dictionary for convienence. #",
"of each term and calculate lengths of each documents. Also a dictionary containing",
"= mean_average_precision / len(query_results) return mean_average_precision def ndcg_at_n(n): \"\"\" It yields a list",
"stemming[word] if stemmed_word not in query_terms: query_terms.append(stemmed_word) query_terms = [] query = query.strip()",
"of document IDs for each query. else: relevance_scores[query_ID] = [pair] for query_ID in",
"N and NDCG at N. # If `MOST_RELEVANT` is equal to `N`, precision",
"def add_new_word(word): # A helper function to add a new word in `query_terms`.",
"range(1, len(gain_vector)): dcg.append(gain_vector[i] / math.log(i + 1, 2) + dcg[-1]) # Step three:",
"elif args.m == \"evaluation\": relevance_scores = load_relevance_scores() query_results = make_query_results() print_evaluation_results() if args.o",
"therefore a conversion is of necessity. document_lengths = {int(ID) : length for ID,",
"relevance_set: appearance_times += 1 precision += appearance_times / len(retrieval_set) precision = precision /",
"relevance_set: appearance_times += 1 current_map += appearance_times / pair[1] mean_average_precision += current_map /",
"cannot be larger than `MOST_RELEVANT`. N = 10 def is_number(word): \"\"\" A helper",
"and deal with two special cases: # abbreviations with \".\" and hyphenated compounds.",
"function to add a new word in `query_terms`. if word not in stemming:",
"separately. \"\"\" for query_ID, score_list in relevance_scores.items(): relevance_set = make_relevance_set(query_ID) score_list_dict = dict(score_list)",
"del query_list[0] # Skip the first one. return query_list def bm25_similarities(query): \"\"\" It",
"(document_lengths[document_ID] + average_length) / num_of_documents for document in document_lengths.keys(): document_lengths[document] = document_lengths[document] /",
"average_length) / num_of_documents for document in document_lengths.keys(): document_lengths[document] = document_lengths[document] / average_length #",
"and stemmed words are returned. \"\"\" def add_new_word(word): # A helper function to",
"= \"index.json\" EVALUATION_PATH = \"evaluation_output.txt\" # Labels in `cran.all.1400` and `cranqrel` text files.",
"for query_ID in query_list: rank = 1 query_results[query_ID] = [] for pair in",
"default if it is not specified\") parser.add_argument(\"-o\", required = False, nargs = \"?\",",
"# like \"m.i.t.\" (line 1222) / \"u.s.a.\" (line 32542) into \"mit\" / \"usa\".",
"(line 1222) / \"u.s.a.\" (line 32542) into \"mit\" / \"usa\". # In the",
"elif section in CONTENTS: if query == []: query = process_single_query(line) else: query",
"based on BM25 model. \"\"\" query_list = process_queries() query_results = {} # `query_results`",
"list with at most top `MOST_RELEVANT` pairs (Document ID, Similarity) based on BM25",
"in CONTENTS: line = line.translate(removing_punctuation_map) line = line.replace(\"--\", \" \") # Also, treat",
"True else: return False def get_arguments(): parser = argparse.ArgumentParser(description = \"A script used",
"length for each document. return stemming, term_vectors, document_lengths def process_single_query(query): \"\"\" Process single",
"\"a52b06\" (line 25717). if is_valid(element): add_new_word(element) # Filter out all pure integers; for",
"process_single_query(line) else: query += process_single_query(line) query_list[query_ID] = query # Add the last entry.",
"\"\\n\") print(\"P@{0}: {1}\".format(N, p_at_n(N)), end = \"\\n\") print(\"Mean Average Precision: {0}\".format(mean_average_precision()), end =",
"in term_vectors and document_ID in term_vectors[term]: frequency = (term_vectors[term])[document_ID] n_i = len(term_vectors[term]) idf",
"CONTENTS: line = line.translate(removing_punctuation_map) line = line.replace(\"--\", \" \") # Also, treat two",
"`-o` option is available. with open(args.o, \"w\") as fp: for query_ID, pair_list in",
"N and # NDCG at N) are applied. # Tested under Python 3.5",
"output in lines of 3-tuples (query ID, document ID, and its rank [1",
"p_at_n += appearance_times / n p_at_n = p_at_n / len(query_results) return p_at_n def",
"The default value is 4 (-1, 1, 2, 3, 4), which means all",
"term_vectors[stemmed_word].update({document_ID : 1}) stemming = {} term_vectors = {} # `term_vectors` structure: {[Key]",
"implement the BM25 alogrithm information retrieval; # also 5 evaluation methods (precision, recall,",
"stemmed_word = stemming[word] if stemmed_word not in query_terms: query_terms.append(stemmed_word) query_terms = [] query",
"return relevance_set def make_retrieval_set(query_ID): # Retrieval documents (Ret). retrieval_set = set() for pair",
"removing dots. # And similarly, phrases like \"m. i. t.\" (line 36527) and",
"0 from document length vector. del document_lengths[0] average_length = (document_lengths[document_ID] + average_length) /",
"for pair in pair_list: fp.write(\"{0} {1} {2}\\n\".format(query_ID, pair[0], pair[1])) else: # For first-time",
"if current_section in LABELS: if current_section == ID: query_list[query_ID] = query query =",
"list of NDCGs at up to N of each query separately. \"\"\" for",
"fp.write(\"{0} {1} {2}\\n\".format(query_ID, pair[0], pair[1])) else: # For first-time running, it creates an",
"\"\"\" def add_new_word(word): # A helper function to add a new word in",
"relevance_scores[query_ID]: if pair[1] <= RELEVANCE_SCORE_THRESHOLD: # We only include queries whose relevance scores",
"= (int(fields[1]), int(fields[2])) if query_ID in relevance_scores: relevance_scores[query_ID].append(pair) # It assumes no repetition",
"# \"i. e.\" (line 11820) will be ignored. # \"r.m.s.\" (line 20241) will",
"\"75s-t6\" (line 28459) or \"a52b06\" (line 25717). if is_valid(element): add_new_word(element) # Filter out",
"Used to parse program arguments. # More details are here: https://docs.python.org/3/library/argparse.html import readline",
"{0}\".format(precision()), end = \"\\n\") print(\"Recall: {0}\".format(recall()), end = \"\\n\") print(\"P@{0}: {1}\".format(N, p_at_n(N)), end",
"`MOST_RELEVANT` is equal to `N`, precision will be the same as P at",
"+ B * document_lengths[document_ID])) * idf if similarity > 0.0: # Ignore the",
"less the relevance # score is, the more relevant the document is. relevance_scores[query_ID]",
"Python libraries that places locally. import porter STOP_WORDS_PATH = \"stopwords.txt\" DOCUMENT_PATH = \"./cran/cran.all.1400\"",
"are not consecutive. if current_section == WORDS: section = current_section continue elif section",
"(line 11820) will be ignored. # \"r.m.s.\" (line 20241) will become \"rm\" stored",
"for term in query.split(): term = term.replace(\".\", \"\").lower() compound = term.replace(\"-\", \"\") if",
"into integers by just removing dots. # And similarly, phrases like \"m. i.",
"begins from 001. average_length += document_lengths[document_ID] document_ID += 1 # Ignore original document",
"= sorted(relevance_scores[query_ID], key = lambda x : x[1]) return relevance_scores def make_query_results(): \"\"\"",
"/ pair[1] mean_average_precision += current_map / len(relevance_set) mean_average_precision = mean_average_precision / len(query_results) return",
"add a new word in `query_terms`. if word not in stemming: stemming[word] =",
"args.m == \"manual\": manual_mode() elif args.m == \"evaluation\": relevance_scores = load_relevance_scores() query_results =",
"not end until user types \"QUIT\". \"\"\" while True: print(DELIMITER_SYMBOL * BOUNDARY_LENGTH) #",
"2) similarity += frequency * (1.0 + K) / (frequency + K *",
"num_of_documents = 0 with open(DOCUMENT_PATH, \"r\") as fp: document_ID = 0 length =",
"(line 32542) into \"mit\" / \"usa\". # In the meantime, something like \"..e.g.at\"",
"(document_ID, similarity) similarities.append(pair) # Sort results in desceding order. similarities = sorted(similarities, key",
"stops and hyphens. args = get_arguments() if os.path.exists(INDEX_PATH): print(\"[Loading BM25 index from file.]\")",
"in score_list: ideal_gain_vector.append(RELEVANCE_SCORE_FIX - score_list_dict[pair[0]]) idcg = [ideal_gain_vector[0]] for i in range(1, len(ideal_gain_vector)):",
"32542) into \"mit\" / \"usa\". # In the meantime, something like \"..e.g.at\" (line",
"Process single line text. Used by `process_queries` function and `manual` mode. \"\"\" def",
"type `python3 bm25.py` to generate one in the working directory and extra arguments",
"query IDs, which is the numbers followed # by \".I\", since they are",
"to build BM25 model and relative evaluation methods. If the index JSON file",
"relevance_set: if document_ID in retrieval_set: appearance_times += 1 recall += appearance_times / len(relevance_set)",
"of 3-tuples (query ID, document ID, and its rank [1 - 15]) form;",
"in the script. import argparse # Used to parse program arguments. # More",
"len(similarities) > MOST_RELEVANT: return similarities[0 : MOST_RELEVANT] else: return similarities def manual_mode(): \"\"\"",
"if is_valid(compound): add_new_word(compound) if section == WORDS: length += 1.0 # Treat a",
"not is_number(word): return True else: return False def get_arguments(): parser = argparse.ArgumentParser(description =",
"terminate): \") if user_query == USER_STOP_WORD: break query_terms = process_single_query(user_query) print(\"Results for query",
"import porter STOP_WORDS_PATH = \"stopwords.txt\" DOCUMENT_PATH = \"./cran/cran.all.1400\" QUERY_PATH = \"./cran/cran.qry\" RELEVANCE_PATH =",
"an integer. Used to process documents and queries. \"\"\" try: int(word) return True",
"with \".\" and hyphenated compounds. term = term.replace(\".\", \"\") # Remove full stops",
"as `relevance_scores`. for query_ID in query_list: rank = 1 query_results[query_ID] = [] for",
"stemmer.stem(word) stemmed_word = stemming[word] if stemmed_word not in term_vectors: term_vectors[stemmed_word] = {} if",
"empty string is created # and makes term_split look like [\"sub\", \"\"]. for",
"{} term_vectors = {} # `term_vectors` structure: {[Key] Term : [Value] {[Key] Document",
"query.replace(\"--\", \" \") for term in query.split(): term = term.replace(\".\", \"\").lower() compound =",
"`RELEVANCE_SCORE_THRESHOLD` here. relevance_set.add(pair[0]) return relevance_set def make_retrieval_set(query_ID): # Retrieval documents (Ret). retrieval_set =",
"math.log(i + 1, 2) + idcg[-1]) # Step four: NDCG (Normalised Discounted Cumulated",
"\".B\" WORDS = \".W\" LABELS = [ID, TITLE, AUTHORS, BIBLIOGRAPHY, WORDS] CONTENTS =",
"out all pure integers; for example, for \"f8u-3\" (line 35373), # both \"f8u\"",
"vector. gain_vector = [] for pair in query_results[query_ID]: if pair[0] in relevance_set: gain_vector.append(RELEVANCE_SCORE_FIX",
"relevance_scores: relevance_set = make_relevance_set(query_ID) appearance_times = 0 current_map = 0.0 for pair in",
"one term, used to convert abbreviations # like \"m.i.t.\" (line 1222) / \"u.s.a.\"",
"and relative evaluation methods. If the index JSON file is not available, just",
"as fp: for query_ID, pair_list in query_results.items(): for pair in pair_list: fp.write(\"{0} {1}",
"= term.split(\"-\") if len(term_split) > 1: for element in term_split: if is_valid(element): add_new_word(element)",
"with at most top `MOST_RELEVANT` pairs (Document ID, Similarity) based on BM25 to",
"\"\"\" mean_average_precision = 0.0 for query_ID in relevance_scores: relevance_set = make_relevance_set(query_ID) appearance_times =",
"return similarities def manual_mode(): \"\"\" When in `manual` mode, the function will not",
"ideal_gain_vector.append(RELEVANCE_SCORE_FIX - score_list_dict[pair[0]]) idcg = [ideal_gain_vector[0]] for i in range(1, len(ideal_gain_vector)): idcg.append(ideal_gain_vector[i] /",
"if current_section in LABELS: if current_section == ID: document_lengths[document_ID] = math.sqrt(length) # Calculate",
"/ math.log(i + 1, 2) + idcg[-1]) # Step four: NDCG (Normalised Discounted",
"lines of 3-tuples (query ID, document ID, and its rank [1 - 15])",
"math.log(i + 1, 2) + dcg[-1]) # Step three: IDCG (Ideal Discounted Cumulated",
"def precision(): \"\"\" It calculates arithmetic mean of precisions for all queries. \"\"\"",
"coding: utf-8 -*- # Description: Build a structural data from orginial Cranfield collection",
"= True) if len(similarities) > MOST_RELEVANT: return similarities[0 : MOST_RELEVANT] else: return similarities",
"= \"./cran/cran.all.1400\" QUERY_PATH = \"./cran/cran.qry\" RELEVANCE_PATH = \"./cran/cranqrel\" INDEX_PATH = \"index.json\" EVALUATION_PATH =",
"at N for Cranfield collection. # N.B.: `N` cannot be larger than `MOST_RELEVANT`.",
"pair in query_results[query_ID]: if pair[0] in relevance_set and pair[1] <= n: appearance_times +=",
"end = \"\\n\") if __name__ == \"__main__\": stemmer = porter.PorterStemmer() stop_words = load_stop_words()",
"{} # `term_vectors` structure: {[Key] Term : [Value] {[Key] Document ID : [Value]",
"the function will not end until user types \"QUIT\". \"\"\" while True: print(DELIMITER_SYMBOL",
"an extra empty string is created # and makes term_split look like [\"sub\",",
"stemming[word] if stemmed_word not in term_vectors: term_vectors[stemmed_word] = {} if document_ID in term_vectors[stemmed_word]:",
"line[0 : 2] if current_section in LABELS: if current_section == ID: query_list[query_ID] =",
"after stemming. compound = term.replace(\"-\", \"\") if is_valid(compound): add_new_word(compound) if section == WORDS:",
"desceding order. similarities = sorted(similarities, key = lambda x : x[1], reverse =",
"= len(term_vectors[term]) idf = math.log((nums_of_documents - n_i + 0.5) / (n_i + 0.5),",
"ID : [Value] [(Document ID, Relevance Score)]}, which is exactly the same structure",
"= recall / len(query_results) return recall def p_at_n(n): \"\"\" It calculates arithmetic mean",
"= \"mode selection; `manual` mode is chosen by default if it is not",
"\"\"\" A helper function to check if a string can be converted to",
"query. yield query_ID, ndcg_at_n[0 : n] else: yield query_ID, ndcg_at_n def print_evaluation_results(): print(\"Evaluation",
"((1.0 - B) + B * document_lengths[document_ID])) * idf if similarity > 0.0:",
"will be converted into integers by just removing dots. # And similarly, phrases",
"It returns a descending list with at most top `MOST_RELEVANT` pairs (Document ID,",
"helper function to check if a string can be converted to an integer.",
"ndcg_at_n[0 : n] else: yield query_ID, ndcg_at_n def print_evaluation_results(): print(\"Evaluation Results:\") print(\"Precision: {0}\".format(precision()),",
"valid. Used to process documents and queries. \"\"\" if word != \"\" and",
"0 for document_ID in relevance_set: if document_ID in retrieval_set: appearance_times += 1 recall",
"according to whitespace characters and deal with two special cases: # abbreviations with",
"there is no hyphen in this word. # There may exist a term",
"in LABELS: if current_section == ID: query_list[query_ID] = query query = [] query_ID",
"18799), \"79.5degree\" # (line 20026) will be converted into integers by just removing",
"with an ending hyphens like # \"sub- and\" (line 14632), which causes an",
"query.split(): term = term.replace(\".\", \"\").lower() compound = term.replace(\"-\", \"\") if is_valid(compound): add_new_word(compound) term_split",
"for pair in query_results[query_ID]: if pair[0] in relevance_set: appearance_times += 1 current_map +=",
"key = lambda x : x[1]) return relevance_scores def make_query_results(): \"\"\" It returns",
"load_stop_words(): stop_words = set() with open(STOP_WORDS_PATH, \"r\") as fp: for line in fp:",
"= math.log((nums_of_documents - n_i + 0.5) / (n_i + 0.5), 2) similarity +=",
"if similarity > 0.0: # Ignore the one with similarity score 0. pair",
"= \"\\n\") for query_ID, ndcg in ndcg_at_n(N): print(\"NDCG@{0} <Query {1}>: {2}\".format(N, query_ID, ndcg),",
"bm25.py` to generate one in the working directory and extra arguments will be",
"38037) or # type names like \"75s-t6\" (line 28459) or \"a52b06\" (line 25717).",
"`n` results for each query. yield query_ID, ndcg_at_n[0 : n] else: yield query_ID,",
"returned for each query. USER_STOP_WORD = \"QUIT\" # When user types `USER_STOP_WORD`, the",
"pure integers; for example, for \"f8u-3\" (line 35373), # both \"f8u\" and \"f8u3\"",
"calculate similarities. \"\"\" similarities = [] for document_ID in range(1, nums_of_documents + 1):",
"len(query_results) return p_at_n def mean_average_precision(): \"\"\" It calculates mean average precision for all",
"make_query_results(): \"\"\" It returns possible relevant documents for each query based on BM25",
"len(term_split) > 1: # If only one item in `term_split`, which means there",
"For first-time running, it creates an index JSON file and exit. print(\"[Generating the",
"in range(1, len(gain_vector)): dcg.append(gain_vector[i] / math.log(i + 1, 2) + dcg[-1]) # Step",
"the meantime, something like \"..e.g.at\" (line 17393), # \"i.e.it\" (line 17287), \"trans.amer.math.soc.33\" (line",
"example, for \"f8u-3\" (line 35373), # both \"f8u\" and \"f8u3\" will be saved,",
"score_list_dict[pair[0]]) idcg = [ideal_gain_vector[0]] for i in range(1, len(ideal_gain_vector)): idcg.append(ideal_gain_vector[i] / math.log(i +",
"query. USER_STOP_WORD = \"QUIT\" # When user types `USER_STOP_WORD`, the program ends; it",
"compound = term.replace(\"-\", \"\") if is_valid(compound): add_new_word(compound) if section == WORDS: length +=",
"len(query_results) return recall def p_at_n(n): \"\"\" It calculates arithmetic mean of precisions at",
"in document_lengths.keys(): document_lengths[document] = document_lengths[document] / average_length # Now document_lengths stores a normalised",
"load_relevance_scores(): relevance_scores = {} # `relevance_scores` structure: {[KEY] query ID : [Value] [(Document",
"BM25 to calculate similarities. \"\"\" similarities = [] for document_ID in range(1, nums_of_documents",
"document. return stemming, term_vectors, document_lengths def process_single_query(query): \"\"\" Process single line text. Used",
"36527) and # \"i. e.\" (line 11820) will be ignored. # \"r.m.s.\" (line",
"in relevance_scores.items(): relevance_set = make_relevance_set(query_ID) score_list_dict = dict(score_list) # Convert a list of",
"to terminate): \") if user_query == USER_STOP_WORD: break query_terms = process_single_query(user_query) print(\"Results for",
"precision(): \"\"\" It calculates arithmetic mean of precisions for all queries. \"\"\" precision",
"n_i + 0.5) / (n_i + 0.5), 2) similarity += frequency * (1.0",
"yield query_ID, ndcg_at_n def print_evaluation_results(): print(\"Evaluation Results:\") print(\"Precision: {0}\".format(precision()), end = \"\\n\") print(\"Recall:",
"appearance_times in vector.items()} nums_of_documents = len(document_lengths) # It is used in `bm25_similarities()` function.",
"/ len(query_results) return precision def recall(): \"\"\" It calculates arithmetic mean of recalls",
"And finally, yield at most `n` results for each query. yield query_ID, ndcg_at_n[0",
"\"\"\" Process single line text. Used by `process_queries` function and `manual` mode. \"\"\"",
"Discounted Cumulated Gain). ideal_gain_vector = [] for pair in score_list: ideal_gain_vector.append(RELEVANCE_SCORE_FIX - score_list_dict[pair[0]])",
"NDCG-usable scores. else: gain_vector.append(0) # Step two: DCG (Discounted Cumulated Gain). dcg =",
"BM25 model. K = 1.0 B = 0.75 # A constant used in",
"same structure and length as `relevance_scores`. for query_ID in query_list: rank = 1",
"document_lengths[0] average_length = (document_lengths[document_ID] + average_length) / num_of_documents for document in document_lengths.keys(): document_lengths[document]",
"case-sensitive. RELEVANCE_SCORE_THRESHOLD = 4 # Filter out ones with relevance score larger than",
"A helper function to check if a string can be converted to an",
"queries. \"\"\" if word != \"\" and word not in stop_words and not",
"if pair[0] in relevance_set and pair[1] <= n: appearance_times += 1 p_at_n +=",
"= [pair] for query_ID in relevance_scores: # Sort pairs in ascending order for",
"numbers followed # by \".I\", since they are not consecutive. if current_section ==",
"is no hyphen in this word. # There may exist a term with",
"if args.m == \"manual\": manual_mode() elif args.m == \"evaluation\": relevance_scores = load_relevance_scores() query_results",
"types \"QUIT\". \"\"\" while True: print(DELIMITER_SYMBOL * BOUNDARY_LENGTH) # Print `BOUNDARY_LENGTH` `DELIMITER_SYMBOL`s to",
"1): # Document ID begins from 1. similarity = 0.0 for term in",
"which means all # documents in it will be reserved. RELEVANCE_SCORE_FIX = 5",
"# Constants used in BM25 model. K = 1.0 B = 0.75 #",
"\"\"]. for element in term_split: # Deal with each part of compound words",
"[pair] for query_ID in relevance_scores: # Sort pairs in ascending order for each",
"term_vectors[stemmed_word]: (term_vectors[stemmed_word])[document_ID] += 1 else: term_vectors[stemmed_word].update({document_ID : 1}) stemming = {} term_vectors =",
"a descending list with at most top `MOST_RELEVANT` pairs (Document ID, Similarity) based",
"query = process_single_query(line) else: query += process_single_query(line) query_list[query_ID] = query # Add the",
"\") if user_query == USER_STOP_WORD: break query_terms = process_single_query(user_query) print(\"Results for query \"",
"which means there is no hyphen in this word. # There may exist",
"Deal with each part of compound words like \"two-step\" (line 38037) or #",
"by \".I\", since they are not consecutive. if current_section == WORDS: section =",
"in LABELS: if current_section == ID: document_lengths[document_ID] = math.sqrt(length) # Calculate the previous",
"It calculates arithmetic mean of recalls for all queries. \"\"\" recall = 0.0",
"in BM25 model. K = 1.0 B = 0.75 # A constant used",
"11820) will be ignored. # \"r.m.s.\" (line 20241) will become \"rm\" stored in",
"\"studies.dash\" (line 516) will not be handled as expected. # All float-point numbers",
"\"evaluation\"], default = \"manual\", help = \"mode selection; `manual` mode is chosen by",
"break query_terms = process_single_query(user_query) print(\"Results for query \" + str(query_terms)) print(\"Rank\\tID\\tScore\") rank =",
"option is available. with open(args.o, \"w\") as fp: for query_ID, pair_list in query_results.items():",
"ignored. # \"r.m.s.\" (line 20241) will become \"rm\" stored in the dictionary after",
"are here: https://docs.python.org/3/library/readline.html import json # Used to create a human-readable JSON file",
"results are returned for each query. USER_STOP_WORD = \"QUIT\" # When user types",
"not available, just type `python3 bm25.py` to generate one in the working directory",
"yields a list of NDCGs at up to N of each query separately.",
"also created although # in Cranfield collection, document ID begins from 001. average_length",
"Ignore the one with similarity score 0. pair = (document_ID, similarity) similarities.append(pair) #",
"to dictionary for convienence. # Step one: gain vector. gain_vector = [] for",
"Also a dictionary containing pairs of original words and stemmed words are returned.",
"return precision def recall(): \"\"\" It calculates arithmetic mean of recalls for all",
"some Python standard modules used in the script. import argparse # Used to",
"of NDCGs at up to N of each query separately. \"\"\" for query_ID,",
"Used to do some regex operations. import math import os # Here are",
"1 # Ignore original query IDs, which is the numbers followed # by",
"1 query_results[query_ID] = [] for pair in bm25_similarities(query_list[query_ID]): query_results[query_ID].append((pair[0], rank)) rank += 1",
"if os.path.exists(INDEX_PATH): print(\"[Loading BM25 index from file.]\") with open(INDEX_PATH, \"r\") as fp: stemming,",
": length for ID, length in document_lengths.items()} for term, vector in term_vectors.items(): term_vectors[term]",
"appearance_times / n p_at_n = p_at_n / len(query_results) return p_at_n def mean_average_precision(): \"\"\"",
"= argparse.ArgumentParser(description = \"A script used to build BM25 model and relative evaluation",
"ndcg_at_n.append(pair[0] / pair[1]) if len(ndcg_at_n) > n: # And finally, yield at most",
"It calculates arithmetic mean of precisions at N for all queries. \"\"\" p_at_n",
"be converted to an integer. Used to process documents and queries. \"\"\" try:",
"else: query += process_single_query(line) query_list[query_ID] = query # Add the last entry. del",
"term.replace(\"-\", \"\") if is_valid(compound): add_new_word(compound) if section == WORDS: length += 1.0 #",
"Ignore original query IDs, which is the numbers followed # by \".I\", since",
"in query_results[query_ID]: retrieval_set.add(pair[0]) return retrieval_set def precision(): \"\"\" It calculates arithmetic mean of",
"places locally. import porter STOP_WORDS_PATH = \"stopwords.txt\" DOCUMENT_PATH = \"./cran/cran.all.1400\" QUERY_PATH = \"./cran/cran.qry\"",
"the less the relevance # score is, the more relevant the document is.",
"similarities def manual_mode(): \"\"\" When in `manual` mode, the function will not end",
"fp: fields = line.split() query_ID = int(fields[0]) pair = (int(fields[1]), int(fields[2])) if query_ID",
"3, 4), which means all # documents in it will be reserved. RELEVANCE_SCORE_FIX",
"term_vectors[term] = {int(ID) : appearance_times for ID, appearance_times in vector.items()} nums_of_documents = len(document_lengths)",
"document_ID = 0 length = 0.0 for line in fp: current_section = line[0",
"one: gain vector. gain_vector = [] for pair in query_results[query_ID]: if pair[0] in",
"\"mode selection; `manual` mode is chosen by default if it is not specified\")",
"scores to NDCG-usable scores. else: gain_vector.append(0) # Step two: DCG (Discounted Cumulated Gain).",
"`int` key, # therefore a conversion is of necessity. document_lengths = {int(ID) :",
"human-readable JSON file for index information and the like. import string # Used",
"\"\"\" When in `manual` mode, the function will not end until user types",
"default output file name is `evaluation_output.txt`\") return parser.parse_args() def load_stop_words(): stop_words = set()",
"else: # For first-time running, it creates an index JSON file and exit.",
"in query.split(): term = term.replace(\".\", \"\").lower() compound = term.replace(\"-\", \"\") if is_valid(compound): add_new_word(compound)",
"x : x[1]) return relevance_scores def make_query_results(): \"\"\" It returns possible relevant documents",
"(term_vectors[term])[document_ID] n_i = len(term_vectors[term]) idf = math.log((nums_of_documents - n_i + 0.5) / (n_i",
"details are here: https://docs.python.org/3/library/argparse.html import readline # Used to create a typing history",
"ndcg in ndcg_at_n(N): print(\"NDCG@{0} <Query {1}>: {2}\".format(N, query_ID, ndcg), end = \"\\n\") if",
"1, 2) + idcg[-1]) # Step four: NDCG (Normalised Discounted Cumulated Gain) at",
"3.5 on Ubuntu 16.04. # Author: '(<NAME>.) # Date created: 2018-05-07 # Here",
"be handled as expected. # All float-point numbers like \"3.2x10\" (line 18799), \"79.5degree\"",
"# -*- coding: utf-8 -*- # Description: Build a structural data from orginial",
"ID, length in document_lengths.items()} for term, vector in term_vectors.items(): term_vectors[term] = {int(ID) :",
"vector.items()} nums_of_documents = len(document_lengths) # It is used in `bm25_similarities()` function. if args.m",
"+ dcg[-1]) # Step three: IDCG (Ideal Discounted Cumulated Gain). ideal_gain_vector = []",
"string can be converted to an integer. Used to process documents and queries.",
"(n_i + 0.5), 2) similarity += frequency * (1.0 + K) / (frequency",
"are here: https://docs.python.org/3/library/argparse.html import readline # Used to create a typing history buffer",
"return query_results def make_relevance_set(query_ID): # Relevant documents (Rel). relevance_set = set() for pair",
"\"trans.amer.math.soc.33\" (line 31509), # or \"studies.dash\" (line 516) will not be handled as",
"P at N for Cranfield collection. # N.B.: `N` cannot be larger than",
"or # type names like \"75s-t6\" (line 28459) or \"a52b06\" (line 25717). if",
"\"\\n\") print(\"Recall: {0}\".format(recall()), end = \"\\n\") print(\"P@{0}: {1}\".format(N, p_at_n(N)), end = \"\\n\") print(\"Mean",
"dict((ord(character), \" \") for character in punctuation) # Remove all punctuations except full",
"of pairs to dictionary for convienence. # Step one: gain vector. gain_vector =",
"hyphens as a space. for term in line.split(): # Split according to whitespace",
"user_query = input(\"Enter query (type \\\"QUIT\\\" to terminate): \") if user_query == USER_STOP_WORD:",
"original relevance scores to # NDCG-friendly ones. # Constants used in BM25 model.",
"current_section == ID: query_list[query_ID] = query query = [] query_ID += 1 #",
"but not \"3\". # Calculate the last length since Cranfield collection does not",
"is not None: # If `-o` option is available. with open(args.o, \"w\") as",
"in `dcg`. for i in range(1, len(gain_vector)): dcg.append(gain_vector[i] / math.log(i + 1, 2)",
"False, choices = [\"manual\", \"evaluation\"], default = \"manual\", help = \"mode selection; `manual`",
"line[0 : 2] if current_section in LABELS: if current_section == ID: document_lengths[document_ID] =",
"# \"r.m.s.\" (line 20241) will become \"rm\" stored in the dictionary after stemming.",
"at N and NDCG at N. # If `MOST_RELEVANT` is equal to `N`,",
"<Query {1}>: {2}\".format(N, query_ID, ndcg), end = \"\\n\") if __name__ == \"__main__\": stemmer",
"to an integer. Used to process documents and queries. \"\"\" try: int(word) return",
"index from file.]\") with open(INDEX_PATH, \"r\") as fp: stemming, term_vectors, document_lengths = json.load(fp)",
"# Used to create a human-readable JSON file for index information and the",
"element in term_split: # Deal with each part of compound words like \"two-step\"",
"= make_retrieval_set(query_ID) appearance_times = 0 for document_ID in relevance_set: if document_ID in retrieval_set:",
"= {} average_length = 0.0 num_of_documents = 0 with open(DOCUMENT_PATH, \"r\") as fp:",
"# Ignore original document IDs, which is the numbers followed by \".I\", #",
"document_ID in range(1, nums_of_documents + 1): # Document ID begins from 1. similarity",
"the more relevant the document is. relevance_scores[query_ID] = sorted(relevance_scores[query_ID], key = lambda x",
"= make_query_results() print_evaluation_results() if args.o is not None: # If `-o` option is",
"on Ubuntu 16.04. # Author: '(<NAME>.) # Date created: 2018-05-07 # Here are",
"print(\"{0}\\t{1}\\t{2}\".format(str(rank), result[0], str(result[1])), end = \"\\n\") rank += 1 def load_relevance_scores(): relevance_scores =",
"ID, document ID, and its rank [1 - 15]) form; if `FILE NAME`",
"Retrieval documents (Ret). retrieval_set = set() for pair in query_results[query_ID]: retrieval_set.add(pair[0]) return retrieval_set",
"constant used in Precision at N and NDCG at N. # If `MOST_RELEVANT`",
"n p_at_n = p_at_n / len(query_results) return p_at_n def mean_average_precision(): \"\"\" It calculates",
"If the index JSON file is not available, just type `python3 bm25.py` to",
"that places locally. import porter STOP_WORDS_PATH = \"stopwords.txt\" DOCUMENT_PATH = \"./cran/cran.all.1400\" QUERY_PATH =",
"\"u.s.a.\" (line 32542) into \"mit\" / \"usa\". # In the meantime, something like",
"with two special cases: # abbreviations with \".\" and hyphenated compounds. term =",
"Precision at N and NDCG at N. # If `MOST_RELEVANT` is equal to",
"\"__main__\": stemmer = porter.PorterStemmer() stop_words = load_stop_words() punctuation = string.punctuation[0 : 12] +",
"in query_list: rank = 1 query_results[query_ID] = [] for pair in bm25_similarities(query_list[query_ID]): query_results[query_ID].append((pair[0],",
"# Remove all punctuations except full stops and hyphens. args = get_arguments() if",
"return p_at_n def mean_average_precision(): \"\"\" It calculates mean average precision for all queries.",
"which is exactly the same structure and length as `relevance_scores`. for query_ID in",
"(Document ID, Similarity) based on BM25 to calculate similarities. \"\"\" similarities = []",
"# A constant used in Precision at N and NDCG at N. #",
"assumes no repetition of document IDs for each query. else: relevance_scores[query_ID] = [pair]",
"`FILE NAME` is not given, the default output file name is `evaluation_output.txt`\") return",
"since Cranfield collection does not have ending symbols. document_lengths[document_ID] = math.sqrt(length) # Skip",
"= query.translate(removing_punctuation_map) query = query.replace(\"--\", \" \") for term in query.split(): term =",
"conversion is of necessity. document_lengths = {int(ID) : length for ID, length in",
"ndcg_at_n(n): \"\"\" It yields a list of NDCGs at up to N of",
"for pair in relevance_scores[query_ID]: if pair[1] <= RELEVANCE_SCORE_THRESHOLD: # We only include queries",
"relevance_set = make_relevance_set(query_ID) retrieval_set = make_retrieval_set(query_ID) appearance_times = 0 for document_ID in relevance_set:",
"for query_ID in relevance_scores: # Sort pairs in ascending order for each query;",
"and # implement the BM25 alogrithm information retrieval; # also 5 evaluation methods",
"empty entry for document 0 is also created although # in Cranfield collection,",
"[Value] Appearance Times}}. document_lengths = {} average_length = 0.0 num_of_documents = 0 with",
"term, vector in term_vectors.items(): term_vectors[term] = {int(ID) : appearance_times for ID, appearance_times in",
"# width of terminal window. user_query = input(\"Enter query (type \\\"QUIT\\\" to terminate):",
"10 def is_number(word): \"\"\" A helper function to check if a string can",
"calculates arithmetic mean of recalls for all queries. \"\"\" recall = 0.0 for",
"BOUNDARY_LENGTH = 80 # It decides the length of the boundary between two",
"= [] query_ID = 0 for line in fp: current_section = line[0 :",
"TITLE = \".T\" AUTHORS = \".A\" BIBLIOGRAPHY = \".B\" WORDS = \".W\" LABELS",
"set() with open(STOP_WORDS_PATH, \"r\") as fp: for line in fp: stop_words.add(line.rstrip()) return stop_words",
"\".A\" BIBLIOGRAPHY = \".B\" WORDS = \".W\" LABELS = [ID, TITLE, AUTHORS, BIBLIOGRAPHY,",
"no repetition of document IDs for each query. else: relevance_scores[query_ID] = [pair] for",
"word not in stop_words and not is_number(word): return True else: return False def",
"in relevance_set: gain_vector.append(RELEVANCE_SCORE_FIX - score_list_dict[pair[0]]) # Convert original ranking scores to NDCG-usable scores.",
"from orginial Cranfield collection and # implement the BM25 alogrithm information retrieval; #",
"for term in line.split(): # Split according to whitespace characters and deal with",
"word != \"\" and word not in stop_words and not is_number(word): return True",
"one word; words in `AUTHORS` # and `BIBLIOGRAPHY` section will not be counted.",
"= term.replace(\".\", \"\") # Remove full stops in one term, used to convert",
"= process_queries() query_results = {} # `query_results` structure: {[KEY] query ID : [Value]",
"# Step three: IDCG (Ideal Discounted Cumulated Gain). ideal_gain_vector = [] for pair",
"[ideal_gain_vector[0]] for i in range(1, len(ideal_gain_vector)): idcg.append(ideal_gain_vector[i] / math.log(i + 1, 2) +",
"structure and length as `relevance_scores`. for query_ID in query_list: rank = 1 query_results[query_ID]",
"\" \") for term in query.split(): term = term.replace(\".\", \"\").lower() compound = term.replace(\"-\",",
"is `evaluation_output.txt`\") return parser.parse_args() def load_stop_words(): stop_words = set() with open(STOP_WORDS_PATH, \"r\") as",
"= [] query = query.strip() query = query.translate(removing_punctuation_map) query = query.replace(\"--\", \" \")",
"details are here: https://docs.python.org/3/library/readline.html import json # Used to create a human-readable JSON",
"A helper function to add a new word in `term_vectors`. if word not",
"It assumes no repetition of document IDs for each query. else: relevance_scores[query_ID] =",
"= [] for pair in bm25_similarities(query_list[query_ID]): query_results[query_ID].append((pair[0], rank)) rank += 1 return query_results",
"relevance_scores: relevance_set = make_relevance_set(query_ID) retrieval_set = make_retrieval_set(query_ID) appearance_times = 0 for document_ID in",
"abbreviations with \".\" and hyphenated compounds. term = term.replace(\".\", \"\") # Remove full",
"add_new_word(element) # Filter out all pure integers; for example, for \"f8u-3\" (line 35373),",
"= 0.0 for term in query: if term in term_vectors and document_ID in",
"or equal # to `RELEVANCE_SCORE_THRESHOLD` here. relevance_set.add(pair[0]) return relevance_set def make_retrieval_set(query_ID): # Retrieval",
"relevance_set def make_retrieval_set(query_ID): # Retrieval documents (Ret). retrieval_set = set() for pair in",
"= term.replace(\".\", \"\").lower() compound = term.replace(\"-\", \"\") if is_valid(compound): add_new_word(compound) term_split = term.split(\"-\")",
"whitespace characters and deal with two special cases: # abbreviations with \".\" and",
"(line 17393), # \"i.e.it\" (line 17287), \"trans.amer.math.soc.33\" (line 31509), # or \"studies.dash\" (line",
"precision += appearance_times / len(retrieval_set) precision = precision / len(query_results) return precision def",
"query \" + str(query_terms)) print(\"Rank\\tID\\tScore\") rank = 1 for result in bm25_similarities(query_terms): print(\"{0}\\t{1}\\t{2}\".format(str(rank),",
"in this word. # There may exist a term with an ending hyphens",
"<= RELEVANCE_SCORE_THRESHOLD: # We only include queries whose relevance scores are less than",
"# If `MOST_RELEVANT` is equal to `N`, precision will be the same as",
"return parser.parse_args() def load_stop_words(): stop_words = set() with open(STOP_WORDS_PATH, \"r\") as fp: for",
"16.04. # Author: '(<NAME>.) # Date created: 2018-05-07 # Here are some Python",
"\"manual\", help = \"mode selection; `manual` mode is chosen by default if it",
"= [ID, TITLE, AUTHORS, BIBLIOGRAPHY, WORDS] CONTENTS = [AUTHORS, BIBLIOGRAPHY, WORDS] DELIMITER_SYMBOL =",
"DCG (Discounted Cumulated Gain). dcg = [gain_vector[0]] # Put the first item in",
"line = line.translate(removing_punctuation_map) line = line.replace(\"--\", \" \") # Also, treat two consecutive",
"IDs for each query. else: relevance_scores[query_ID] = [pair] for query_ID in relevance_scores: #",
"a list of NDCGs at up to N of each query separately. \"\"\"",
"= sorted(similarities, key = lambda x : x[1], reverse = True) if len(similarities)",
"1 length = 0.0 section = current_section continue # Update and go to",
"followed # by \".I\", since they are not consecutive. if current_section == WORDS:",
"else: relevance_scores[query_ID] = [pair] for query_ID in relevance_scores: # Sort pairs in ascending",
"relevant the document is. relevance_scores[query_ID] = sorted(relevance_scores[query_ID], key = lambda x : x[1])",
"It calculates arithmetic mean of precisions for all queries. \"\"\" precision = 0.0",
"len(retrieval_set) precision = precision / len(query_results) return precision def recall(): \"\"\" It calculates",
"1 current_map += appearance_times / pair[1] mean_average_precision += current_map / len(relevance_set) mean_average_precision =",
"the BM25 alogrithm information retrieval; # also 5 evaluation methods (precision, recall, MAP,",
"the boundary between two `manual` queries. MOST_RELEVANT = 15 # At most top",
"appearance_times += 1 current_map += appearance_times / pair[1] mean_average_precision += current_map / len(relevance_set)",
"document is. relevance_scores[query_ID] = sorted(relevance_scores[query_ID], key = lambda x : x[1]) return relevance_scores",
"relative evaluation methods. If the index JSON file is not available, just type",
"working directory and extra arguments will be ignored in this case\") parser.add_argument(\"-m\", required",
"# Deal with each part of compound words like \"two-step\" (line 38037) or",
"new one. # The empty entry for document 0 is also created although",
"be consecutive. num_of_documents += 1 length = 0.0 section = current_section continue #",
"+= appearance_times / n p_at_n = p_at_n / len(query_results) return p_at_n def mean_average_precision():",
"Now document_lengths stores a normalised length for each document. return stemming, term_vectors, document_lengths",
"pair in query_results[query_ID]: if pair[0] in relevance_set: gain_vector.append(RELEVANCE_SCORE_FIX - score_list_dict[pair[0]]) # Convert original",
"for each query. else: relevance_scores[query_ID] = [pair] for query_ID in relevance_scores: # Sort",
"# Filter out ones with relevance score larger than `RELEVANCE_SCORE_THRESHOLD` # from `QUERY_PATH`.",
"information retrieval; # also 5 evaluation methods (precision, recall, MAP, P at N",
"2] if current_section in LABELS: if current_section == ID: query_list[query_ID] = query query",
"not consecutive. if current_section == WORDS: section = current_section continue elif section in",
"`bm25_similarities()` function. if args.m == \"manual\": manual_mode() elif args.m == \"evaluation\": relevance_scores =",
"ID, and its rank [1 - 15]) form; if `FILE NAME` is not",
"# Calculate the last length since Cranfield collection does not have ending symbols.",
"precision = 0.0 for query_ID in relevance_scores: relevance_set = make_relevance_set(query_ID) retrieval_set = make_retrieval_set(query_ID)",
"dcg = [gain_vector[0]] # Put the first item in `dcg`. for i in",
"will not be counted. term_split = term.split(\"-\") if len(term_split) > 1: # If",
"input(\"Enter query (type \\\"QUIT\\\" to terminate): \") if user_query == USER_STOP_WORD: break query_terms",
"with open(args.o, \"w\") as fp: for query_ID, pair_list in query_results.items(): for pair in",
"for each query. USER_STOP_WORD = \"QUIT\" # When user types `USER_STOP_WORD`, the program",
"- score_list_dict[pair[0]]) idcg = [ideal_gain_vector[0]] for i in range(1, len(ideal_gain_vector)): idcg.append(ideal_gain_vector[i] / math.log(i",
"\"FILE NAME\", help = \"BM25 evaluation result output in lines of 3-tuples (query",
"+= 1 # Ignore original query IDs, which is the numbers followed #",
"rank += 1 return query_results def make_relevance_set(query_ID): # Relevant documents (Rel). relevance_set =",
"[\"manual\", \"evaluation\"], default = \"manual\", help = \"mode selection; `manual` mode is chosen",
"section in CONTENTS: line = line.translate(removing_punctuation_map) line = line.replace(\"--\", \" \") # Also,",
"# More details are here: https://docs.python.org/3/library/argparse.html import readline # Used to create a",
"len(ideal_gain_vector)): idcg.append(ideal_gain_vector[i] / math.log(i + 1, 2) + idcg[-1]) # Step four: NDCG",
"1 for result in bm25_similarities(query_terms): print(\"{0}\\t{1}\\t{2}\".format(str(rank), result[0], str(result[1])), end = \"\\n\") rank +=",
"the working directory and extra arguments will be ignored in this case\") parser.add_argument(\"-m\",",
"= [] for document_ID in range(1, nums_of_documents + 1): # Document ID begins",
"most `n` results for each query. yield query_ID, ndcg_at_n[0 : n] else: yield",
"repetition of document IDs for each query. else: relevance_scores[query_ID] = [pair] for query_ID",
"+= frequency * (1.0 + K) / (frequency + K * ((1.0 -",
"and `cranqrel` text files. ID = \".I\" TITLE = \".T\" AUTHORS = \".A\"",
"WORDS = \".W\" LABELS = [ID, TITLE, AUTHORS, BIBLIOGRAPHY, WORDS] CONTENTS = [AUTHORS,",
"function to check if a string is valid. Used to process documents and",
"queries. \"\"\" mean_average_precision = 0.0 for query_ID in relevance_scores: relevance_set = make_relevance_set(query_ID) appearance_times",
"# Sort results in desceding order. similarities = sorted(similarities, key = lambda x",
"2] if current_section in LABELS: if current_section == ID: document_lengths[document_ID] = math.sqrt(length) #",
"phrases like \"m. i. t.\" (line 36527) and # \"i. e.\" (line 11820)",
"descending list with at most top `MOST_RELEVANT` pairs (Document ID, Similarity) based on",
"function and `manual` mode. \"\"\" def add_new_word(word): # A helper function to add",
"for line in fp: current_section = line[0 : 2] if current_section in LABELS:",
"int(word) return True except ValueError: return False def is_valid(word): \"\"\" A helper function",
"/ average_length # Now document_lengths stores a normalised length for each document. return",
"= load_stop_words() punctuation = string.punctuation[0 : 12] + string.punctuation[14:] removing_punctuation_map = dict((ord(character), \"",
"JSON file for index information and the like. import string # Used to",
"relevance_scores.items(): relevance_set = make_relevance_set(query_ID) score_list_dict = dict(score_list) # Convert a list of pairs",
"if word != \"\" and word not in stop_words and not is_number(word): return",
"1, 2, 3, 4), which means all # documents in it will be",
"word not in stemming: stemming[word] = stemmer.stem(word) stemmed_word = stemming[word] if stemmed_word not",
"given, the default output file name is `evaluation_output.txt`\") return parser.parse_args() def load_stop_words(): stop_words",
"{} # `query_results` structure: {[KEY] query ID : [Value] [(Document ID, Relevance Score)]},",
"= make_relevance_set(query_ID) appearance_times = 0 for pair in query_results[query_ID]: if pair[0] in relevance_set",
"relevance_set: gain_vector.append(RELEVANCE_SCORE_FIX - score_list_dict[pair[0]]) # Convert original ranking scores to NDCG-usable scores. else:",
"term_vectors[stemmed_word] = {} if document_ID in term_vectors[stemmed_word]: (term_vectors[stemmed_word])[document_ID] += 1 else: term_vectors[stemmed_word].update({document_ID :",
"# In the meantime, something like \"..e.g.at\" (line 17393), # \"i.e.it\" (line 17287),",
"query_list[query_ID] = query # Add the last entry. del query_list[0] # Skip the",
": x[1]) return relevance_scores def make_query_results(): \"\"\" It returns possible relevant documents for",
"precision = precision / len(query_results) return precision def recall(): \"\"\" It calculates arithmetic",
"80 # It decides the length of the boundary between two `manual` queries.",
"may exist a term with an ending hyphens like # \"sub- and\" (line",
"pair in zip(dcg, idcg): ndcg_at_n.append(pair[0] / pair[1]) if len(ndcg_at_n) > n: # And",
"line in fp: current_section = line[0 : 2] if current_section in LABELS: if",
"appearance_times = 0 for pair in query_results[query_ID]: if pair[0] in relevance_set and pair[1]",
"json.load(fp) # Warning: unlike Python, `dict` type in JSON cannot have `int` key,",
"0.0 for pair in query_results[query_ID]: if pair[0] in relevance_set: appearance_times += 1 current_map",
"== ID: query_list[query_ID] = query query = [] query_ID += 1 # Ignore",
"default # width of terminal window. user_query = input(\"Enter query (type \\\"QUIT\\\" to",
"More details are here: https://docs.python.org/3/library/argparse.html import readline # Used to create a typing",
"by `process_queries` function and `manual` mode. \"\"\" def add_new_word(word): # A helper function",
"print(\"Mean Average Precision: {0}\".format(mean_average_precision()), end = \"\\n\") for query_ID, ndcg in ndcg_at_n(N): print(\"NDCG@{0}",
"False def is_valid(word): \"\"\" A helper function to check if a string is",
"argparse.ArgumentParser(description = \"A script used to build BM25 model and relative evaluation methods.",
"string # Used to do some regex operations. import math import os #",
"1. similarity = 0.0 for term in query: if term in term_vectors and",
"bm25_similarities(query_terms): print(\"{0}\\t{1}\\t{2}\".format(str(rank), result[0], str(result[1])), end = \"\\n\") rank += 1 def load_relevance_scores(): relevance_scores",
"helper function to add a new word in `query_terms`. if word not in",
"2) + idcg[-1]) # Step four: NDCG (Normalised Discounted Cumulated Gain) at N.",
"manual_mode() elif args.m == \"evaluation\": relevance_scores = load_relevance_scores() query_results = make_query_results() print_evaluation_results() if",
"as expected. # All float-point numbers like \"3.2x10\" (line 18799), \"79.5degree\" # (line",
"here. relevance_set.add(pair[0]) return relevance_set def make_retrieval_set(query_ID): # Retrieval documents (Ret). retrieval_set = set()",
"= 0.0 for query_ID in relevance_scores: relevance_set = make_relevance_set(query_ID) retrieval_set = make_retrieval_set(query_ID) appearance_times",
"\"QUIT\". \"\"\" while True: print(DELIMITER_SYMBOL * BOUNDARY_LENGTH) # Print `BOUNDARY_LENGTH` `DELIMITER_SYMBOL`s to fill",
"retrieval_set: if document_ID in relevance_set: appearance_times += 1 precision += appearance_times / len(retrieval_set)",
"into \"mit\" / \"usa\". # In the meantime, something like \"..e.g.at\" (line 17393),",
"current_section in LABELS: if current_section == ID: document_lengths[document_ID] = math.sqrt(length) # Calculate the",
"a string can be converted to an integer. Used to process documents and",
"all queries. \"\"\" p_at_n = 0.0 for query_ID in relevance_scores: relevance_set = make_relevance_set(query_ID)",
"score is, the more relevant the document is. relevance_scores[query_ID] = sorted(relevance_scores[query_ID], key =",
"pair[1])) else: # For first-time running, it creates an index JSON file and",
"index JSON file is not available, just type `python3 bm25.py` to generate one",
"Sort pairs in ascending order for each query; the less the relevance #",
"this word. # There may exist a term with an ending hyphens like",
"text files. ID = \".I\" TITLE = \".T\" AUTHORS = \".A\" BIBLIOGRAPHY =",
"the first item in `dcg`. for i in range(1, len(gain_vector)): dcg.append(gain_vector[i] / math.log(i",
"+= appearance_times / len(relevance_set) recall = recall / len(query_results) return recall def p_at_n(n):",
"print(DELIMITER_SYMBOL * BOUNDARY_LENGTH) # Print `BOUNDARY_LENGTH` `DELIMITER_SYMBOL`s to fill the default # width",
"for all queries. \"\"\" mean_average_precision = 0.0 for query_ID in relevance_scores: relevance_set =",
"for index information and the like. import string # Used to do some",
"stemming[word] = stemmer.stem(word) stemmed_word = stemming[word] if stemmed_word not in term_vectors: term_vectors[stemmed_word] =",
"{} average_length = 0.0 num_of_documents = 0 with open(DOCUMENT_PATH, \"r\") as fp: document_ID",
"word; words in `AUTHORS` # and `BIBLIOGRAPHY` section will not be counted. term_split",
"RELEVANCE_SCORE_THRESHOLD = 4 # Filter out ones with relevance score larger than `RELEVANCE_SCORE_THRESHOLD`",
"\"usa\". # In the meantime, something like \"..e.g.at\" (line 17393), # \"i.e.it\" (line",
"= make_retrieval_set(query_ID) appearance_times = 0 for document_ID in retrieval_set: if document_ID in relevance_set:",
"most top `MOST_RELEVANT` results are returned for each query. USER_STOP_WORD = \"QUIT\" #",
"vector in term_vectors.items(): term_vectors[term] = {int(ID) : appearance_times for ID, appearance_times in vector.items()}",
"+ string.punctuation[14:] removing_punctuation_map = dict((ord(character), \" \") for character in punctuation) # Remove",
"process documents and queries. \"\"\" try: int(word) return True except ValueError: return False",
"import json # Used to create a human-readable JSON file for index information",
"Relevance Score)]}, which is exactly the same structure and length as `relevance_scores`. for",
"0.0 for query_ID in relevance_scores: relevance_set = make_relevance_set(query_ID) appearance_times = 0 current_map =",
"+= process_single_query(line) query_list[query_ID] = query # Add the last entry. del query_list[0] #",
"idcg = [ideal_gain_vector[0]] for i in range(1, len(ideal_gain_vector)): idcg.append(ideal_gain_vector[i] / math.log(i + 1,",
"appearance_times / pair[1] mean_average_precision += current_map / len(relevance_set) mean_average_precision = mean_average_precision / len(query_results)",
"\"f8u\" and \"f8u3\" will be saved, but not \"3\". # Calculate the last",
"p_at_n / len(query_results) return p_at_n def mean_average_precision(): \"\"\" It calculates mean average precision",
"a new word in `query_terms`. if word not in stemming: stemming[word] = stemmer.stem(word)",
"if document_ID in term_vectors[stemmed_word]: (term_vectors[stemmed_word])[document_ID] += 1 else: term_vectors[stemmed_word].update({document_ID : 1}) stemming =",
"query_results[query_ID] = [] for pair in bm25_similarities(query_list[query_ID]): query_results[query_ID].append((pair[0], rank)) rank += 1 return",
": x[1], reverse = True) if len(similarities) > MOST_RELEVANT: return similarities[0 : MOST_RELEVANT]",
"converted into integers by just removing dots. # And similarly, phrases like \"m.",
"query_ID, ndcg in ndcg_at_n(N): print(\"NDCG@{0} <Query {1}>: {2}\".format(N, query_ID, ndcg), end = \"\\n\")",
"minuend to convert original relevance scores to # NDCG-friendly ones. # Constants used",
"and queries. \"\"\" try: int(word) return True except ValueError: return False def is_valid(word):",
"0 with open(DOCUMENT_PATH, \"r\") as fp: document_ID = 0 length = 0.0 for",
"x : x[1], reverse = True) if len(similarities) > MOST_RELEVANT: return similarities[0 :",
"query_terms: query_terms.append(stemmed_word) query_terms = [] query = query.strip() query = query.translate(removing_punctuation_map) query =",
"appearance_times / len(retrieval_set) precision = precision / len(query_results) return precision def recall(): \"\"\"",
"running, it creates an index JSON file and exit. print(\"[Generating the index file.]\")",
"in `manual` mode, the function will not end until user types \"QUIT\". \"\"\"",
"-*- coding: utf-8 -*- # Description: Build a structural data from orginial Cranfield",
"make_relevance_set(query_ID) retrieval_set = make_retrieval_set(query_ID) appearance_times = 0 for document_ID in retrieval_set: if document_ID",
"similarities = sorted(similarities, key = lambda x : x[1], reverse = True) if",
"query_ID, score_list in relevance_scores.items(): relevance_set = make_relevance_set(query_ID) score_list_dict = dict(score_list) # Convert a",
"Term : [Value] {[Key] Document ID : [Value] Appearance Times}}. document_lengths = {}",
"is chosen by default if it is not specified\") parser.add_argument(\"-o\", required = False,",
"the numbers followed # by \".I\", since they are not consecutive. if current_section",
"recalls for all queries. \"\"\" recall = 0.0 for query_ID in relevance_scores: relevance_set",
"if `FILE NAME` is not given, the default output file name is `evaluation_output.txt`\")",
"query = [] query_ID = 0 for line in fp: current_section = line[0",
"for pair in bm25_similarities(query_list[query_ID]): query_results[query_ID].append((pair[0], rank)) rank += 1 return query_results def make_relevance_set(query_ID):",
"import readline # Used to create a typing history buffer for `manual` mode.",
"modules used in the script. import argparse # Used to parse program arguments.",
"with relevance score larger than `RELEVANCE_SCORE_THRESHOLD` # from `QUERY_PATH`. The default value is",
"NDCGs at up to N of each query separately. \"\"\" for query_ID, score_list",
"it is case-sensitive. RELEVANCE_SCORE_THRESHOLD = 4 # Filter out ones with relevance score",
"!= \"\" and word not in stop_words and not is_number(word): return True else:",
": [Value] [(Document ID, Relevance Score)]}, which is exactly the same structure and",
"`query_results` structure: {[KEY] query ID : [Value] [(Document ID, Relevance Score)]}, which is",
"for each query; the less the relevance # score is, the more relevant",
"\"79.5degree\" # (line 20026) will be converted into integers by just removing dots.",
"if document_ID in relevance_set: appearance_times += 1 precision += appearance_times / len(retrieval_set) precision",
"* ((1.0 - B) + B * document_lengths[document_ID])) * idf if similarity >",
": n] else: yield query_ID, ndcg_at_n def print_evaluation_results(): print(\"Evaluation Results:\") print(\"Precision: {0}\".format(precision()), end",
"some Python libraries that places locally. import porter STOP_WORDS_PATH = \"stopwords.txt\" DOCUMENT_PATH =",
"N. ndcg_at_n = [] for pair in zip(dcg, idcg): ndcg_at_n.append(pair[0] / pair[1]) if",
"\"\"\" recall = 0.0 for query_ID in relevance_scores: relevance_set = make_relevance_set(query_ID) retrieval_set =",
"line.replace(\"--\", \" \") # Also, treat two consecutive hyphens as a space. for",
"- n_i + 0.5) / (n_i + 0.5), 2) similarity += frequency *",
"if query_ID in relevance_scores: relevance_scores[query_ID].append(pair) # It assumes no repetition of document IDs",
"if is_valid(compound): add_new_word(compound) term_split = term.split(\"-\") if len(term_split) > 1: for element in",
"a string is valid. Used to process documents and queries. \"\"\" if word",
"It returns possible relevant documents for each query based on BM25 model. \"\"\"",
"parse program arguments. # More details are here: https://docs.python.org/3/library/argparse.html import readline # Used",
"which is the numbers followed by \".I\", # since they may not be",
"\".\" and hyphenated compounds. term = term.replace(\".\", \"\") # Remove full stops in",
"build BM25 model and relative evaluation methods. If the index JSON file is",
"N. # If `MOST_RELEVANT` is equal to `N`, precision will be the same",
"of recalls for all queries. \"\"\" recall = 0.0 for query_ID in relevance_scores:",
"the dictionary after stemming. compound = term.replace(\"-\", \"\") if is_valid(compound): add_new_word(compound) if section",
"length = 0.0 section = current_section continue # Update and go to next",
"# There may exist a term with an ending hyphens like # \"sub-",
"document ID begins from 001. average_length += document_lengths[document_ID] document_ID += 1 # Ignore",
"Cumulated Gain). dcg = [gain_vector[0]] # Put the first item in `dcg`. for",
"for query_ID, ndcg in ndcg_at_n(N): print(\"NDCG@{0} <Query {1}>: {2}\".format(N, query_ID, ndcg), end =",
"are some Python libraries that places locally. import porter STOP_WORDS_PATH = \"stopwords.txt\" DOCUMENT_PATH",
"= stemming[word] if stemmed_word not in query_terms: query_terms.append(stemmed_word) query_terms = [] query =",
"for each query. yield query_ID, ndcg_at_n[0 : n] else: yield query_ID, ndcg_at_n def",
"if args.o is not None: # If `-o` option is available. with open(args.o,",
"on BM25 model. \"\"\" query_list = process_queries() query_results = {} # `query_results` structure:",
"line = line.replace(\"--\", \" \") # Also, treat two consecutive hyphens as a",
"in `term_split`, which means there is no hyphen in this word. # There",
"be saved, but not \"3\". # Calculate the last length since Cranfield collection",
"document_lengths def process_single_query(query): \"\"\" Process single line text. Used by `process_queries` function and",
"Tested under Python 3.5 on Ubuntu 16.04. # Author: '(<NAME>.) # Date created:",
"appearance_times for ID, appearance_times in vector.items()} nums_of_documents = len(document_lengths) # It is used",
"\"evaluation_output.txt\" # Labels in `cran.all.1400` and `cranqrel` text files. ID = \".I\" TITLE",
"collection and # implement the BM25 alogrithm information retrieval; # also 5 evaluation",
"(term_vectors[stemmed_word])[document_ID] += 1 else: term_vectors[stemmed_word].update({document_ID : 1}) stemming = {} term_vectors = {}",
"relevance_set = set() for pair in relevance_scores[query_ID]: if pair[1] <= RELEVANCE_SCORE_THRESHOLD: # We",
": [Value] Appearance Times}}. document_lengths = {} average_length = 0.0 num_of_documents = 0",
"= [ideal_gain_vector[0]] for i in range(1, len(ideal_gain_vector)): idcg.append(ideal_gain_vector[i] / math.log(i + 1, 2)",
"used in Precision at N and NDCG at N. # If `MOST_RELEVANT` is",
"decides the length of the boundary between two `manual` queries. MOST_RELEVANT = 15",
"query query = [] query_ID += 1 # Ignore original query IDs, which",
"= \"A script used to build BM25 model and relative evaluation methods. If",
"= make_relevance_set(query_ID) appearance_times = 0 current_map = 0.0 for pair in query_results[query_ID]: if",
"`manual` queries. MOST_RELEVANT = 15 # At most top `MOST_RELEVANT` results are returned",
"[] for pair in bm25_similarities(query_list[query_ID]): query_results[query_ID].append((pair[0], rank)) rank += 1 return query_results def",
"IDs, which is the numbers followed # by \".I\", since they are not",
"35373), # both \"f8u\" and \"f8u3\" will be saved, but not \"3\". #",
"counted. term_split = term.split(\"-\") if len(term_split) > 1: # If only one item",
"# All float-point numbers like \"3.2x10\" (line 18799), \"79.5degree\" # (line 20026) will",
"fp: query_list = {} query = [] query_ID = 0 for line in",
"args.m == \"evaluation\": relevance_scores = load_relevance_scores() query_results = make_query_results() print_evaluation_results() if args.o is",
"process_single_query(query): \"\"\" Process single line text. Used by `process_queries` function and `manual` mode.",
"+ average_length) / num_of_documents for document in document_lengths.keys(): document_lengths[document] = document_lengths[document] / average_length",
"fp: for line in fp: fields = line.split() query_ID = int(fields[0]) pair =",
"in `query_terms`. if word not in stemming: stemming[word] = stemmer.stem(word) stemmed_word = stemming[word]",
"is_number(word): return True else: return False def get_arguments(): parser = argparse.ArgumentParser(description = \"A",
"2) + dcg[-1]) # Step three: IDCG (Ideal Discounted Cumulated Gain). ideal_gain_vector =",
"= int(fields[0]) pair = (int(fields[1]), int(fields[2])) if query_ID in relevance_scores: relevance_scores[query_ID].append(pair) # It",
"most top `MOST_RELEVANT` pairs (Document ID, Similarity) based on BM25 to calculate similarities.",
"p_at_n = p_at_n / len(query_results) return p_at_n def mean_average_precision(): \"\"\" It calculates mean",
"symbols. document_lengths[document_ID] = math.sqrt(length) # Skip the document with index 0 from document",
"Gain). dcg = [gain_vector[0]] # Put the first item in `dcg`. for i",
"created # and makes term_split look like [\"sub\", \"\"]. for element in term_split:",
"in term_split: # Deal with each part of compound words like \"two-step\" (line",
"if len(similarities) > MOST_RELEVANT: return similarities[0 : MOST_RELEVANT] else: return similarities def manual_mode():",
"When in `manual` mode, the function will not end until user types \"QUIT\".",
"document_lengths.items()} for term, vector in term_vectors.items(): term_vectors[term] = {int(ID) : appearance_times for ID,",
"fp: for query_ID, pair_list in query_results.items(): for pair in pair_list: fp.write(\"{0} {1} {2}\\n\".format(query_ID,",
"= 1.0 B = 0.75 # A constant used in Precision at N",
"whose relevance scores are less than or equal # to `RELEVANCE_SCORE_THRESHOLD` here. relevance_set.add(pair[0])",
"relevance scores to # NDCG-friendly ones. # Constants used in BM25 model. K",
"user types `USER_STOP_WORD`, the program ends; it is case-sensitive. RELEVANCE_SCORE_THRESHOLD = 4 #",
"numbers followed by \".I\", # since they may not be consecutive. num_of_documents +=",
"\"*\" BOUNDARY_LENGTH = 80 # It decides the length of the boundary between",
"\"\") # Remove full stops in one term, used to convert abbreviations #",
"ranking scores to NDCG-usable scores. else: gain_vector.append(0) # Step two: DCG (Discounted Cumulated",
"for \"f8u-3\" (line 35373), # both \"f8u\" and \"f8u3\" will be saved, but",
"recall += appearance_times / len(relevance_set) recall = recall / len(query_results) return recall def",
"# For first-time running, it creates an index JSON file and exit. print(\"[Generating",
"\"\"\" It returns possible relevant documents for each query based on BM25 model.",
"more relevant the document is. relevance_scores[query_ID] = sorted(relevance_scores[query_ID], key = lambda x :",
"+= 1.0 # Treat a compound word as one word; words in `AUTHORS`",
"start a new one. # The empty entry for document 0 is also",
"is_valid(element): add_new_word(element) return query_terms def process_queries(): with open(QUERY_PATH, \"r\") as fp: query_list =",
"in relevance_set: appearance_times += 1 current_map += appearance_times / pair[1] mean_average_precision += current_map",
"# score is, the more relevant the document is. relevance_scores[query_ID] = sorted(relevance_scores[query_ID], key",
"2, 3, 4), which means all # documents in it will be reserved.",
"query_terms.append(stemmed_word) query_terms = [] query = query.strip() query = query.translate(removing_punctuation_map) query = query.replace(\"--\",",
"IDCG (Ideal Discounted Cumulated Gain). ideal_gain_vector = [] for pair in score_list: ideal_gain_vector.append(RELEVANCE_SCORE_FIX",
"by \".I\", # since they may not be consecutive. num_of_documents += 1 length",
"return mean_average_precision def ndcg_at_n(n): \"\"\" It yields a list of NDCGs at up",
"terminal window. user_query = input(\"Enter query (type \\\"QUIT\\\" to terminate): \") if user_query",
"At most top `MOST_RELEVANT` results are returned for each query. USER_STOP_WORD = \"QUIT\"",
"= 0.0 for line in fp: current_section = line[0 : 2] if current_section",
"if stemmed_word not in query_terms: query_terms.append(stemmed_word) query_terms = [] query = query.strip() query",
"= \".B\" WORDS = \".W\" LABELS = [ID, TITLE, AUTHORS, BIBLIOGRAPHY, WORDS] CONTENTS",
"file.]\") with open(INDEX_PATH, \"r\") as fp: stemming, term_vectors, document_lengths = json.load(fp) # Warning:",
"The empty entry for document 0 is also created although # in Cranfield",
"0.0 num_of_documents = 0 with open(DOCUMENT_PATH, \"r\") as fp: document_ID = 0 length",
"= (document_ID, similarity) similarities.append(pair) # Sort results in desceding order. similarities = sorted(similarities,",
"It decides the length of the boundary between two `manual` queries. MOST_RELEVANT =",
"= 15 # At most top `MOST_RELEVANT` results are returned for each query.",
"# Tested under Python 3.5 on Ubuntu 16.04. # Author: '(<NAME>.) # Date",
"for each document. return stemming, term_vectors, document_lengths def process_single_query(query): \"\"\" Process single line",
"in term_vectors: term_vectors[stemmed_word] = {} if document_ID in term_vectors[stemmed_word]: (term_vectors[stemmed_word])[document_ID] += 1 else:",
"relevance_set and pair[1] <= n: appearance_times += 1 p_at_n += appearance_times / n",
"# both \"f8u\" and \"f8u3\" will be saved, but not \"3\". # Calculate",
"return recall def p_at_n(n): \"\"\" It calculates arithmetic mean of precisions at N",
"= {int(ID) : length for ID, length in document_lengths.items()} for term, vector in",
"term_vectors: term_vectors[stemmed_word] = {} if document_ID in term_vectors[stemmed_word]: (term_vectors[stemmed_word])[document_ID] += 1 else: term_vectors[stemmed_word].update({document_ID",
"If `MOST_RELEVANT` is equal to `N`, precision will be the same as P",
"the length of the boundary between two `manual` queries. MOST_RELEVANT = 15 #",
"to add a new word in `term_vectors`. if word not in stemming: stemming[word]",
"to `N`, precision will be the same as P at N for Cranfield",
"query_ID, ndcg), end = \"\\n\") if __name__ == \"__main__\": stemmer = porter.PorterStemmer() stop_words",
"0 length = 0.0 for line in fp: current_section = line[0 : 2]",
"or \"studies.dash\" (line 516) will not be handled as expected. # All float-point",
"/ len(relevance_set) mean_average_precision = mean_average_precision / len(query_results) return mean_average_precision def ndcg_at_n(n): \"\"\" It",
"available. with open(args.o, \"w\") as fp: for query_ID, pair_list in query_results.items(): for pair",
"calculates arithmetic mean of precisions at N for all queries. \"\"\" p_at_n =",
"ID, appearance_times in vector.items()} nums_of_documents = len(document_lengths) # It is used in `bm25_similarities()`",
"return True except ValueError: return False def is_valid(word): \"\"\" A helper function to",
"= \"\\n\") print(\"Mean Average Precision: {0}\".format(mean_average_precision()), end = \"\\n\") for query_ID, ndcg in",
"types `USER_STOP_WORD`, the program ends; it is case-sensitive. RELEVANCE_SCORE_THRESHOLD = 4 # Filter",
"query_list def bm25_similarities(query): \"\"\" It returns a descending list with at most top",
"item in `dcg`. for i in range(1, len(gain_vector)): dcg.append(gain_vector[i] / math.log(i + 1,",
"in relevance_scores: relevance_set = make_relevance_set(query_ID) retrieval_set = make_retrieval_set(query_ID) appearance_times = 0 for document_ID",
"\".T\" AUTHORS = \".A\" BIBLIOGRAPHY = \".B\" WORDS = \".W\" LABELS = [ID,",
"will be ignored. # \"r.m.s.\" (line 20241) will become \"rm\" stored in the",
"1 else: term_vectors[stemmed_word].update({document_ID : 1}) stemming = {} term_vectors = {} # `term_vectors`",
"get_arguments(): parser = argparse.ArgumentParser(description = \"A script used to build BM25 model and",
"for `manual` mode. # More details are here: https://docs.python.org/3/library/readline.html import json # Used",
"relevance_set = make_relevance_set(query_ID) retrieval_set = make_retrieval_set(query_ID) appearance_times = 0 for document_ID in retrieval_set:",
"elif section in CONTENTS: line = line.translate(removing_punctuation_map) line = line.replace(\"--\", \" \") #",
"be ignored in this case\") parser.add_argument(\"-m\", required = False, choices = [\"manual\", \"evaluation\"],",
"= 0.0 for pair in query_results[query_ID]: if pair[0] in relevance_set: appearance_times += 1",
"fill the default # width of terminal window. user_query = input(\"Enter query (type",
"= \".I\" TITLE = \".T\" AUTHORS = \".A\" BIBLIOGRAPHY = \".B\" WORDS =",
"nargs = \"?\", const = EVALUATION_PATH, metavar = \"FILE NAME\", help = \"BM25",
"if it is not specified\") parser.add_argument(\"-o\", required = False, nargs = \"?\", const",
"Used to process documents and queries. \"\"\" try: int(word) return True except ValueError:",
"Score)]}, which is exactly the same structure and length as `relevance_scores`. for query_ID",
"# implement the BM25 alogrithm information retrieval; # also 5 evaluation methods (precision,",
"query += process_single_query(line) query_list[query_ID] = query # Add the last entry. del query_list[0]",
"the last length since Cranfield collection does not have ending symbols. document_lengths[document_ID] =",
"MOST_RELEVANT: return similarities[0 : MOST_RELEVANT] else: return similarities def manual_mode(): \"\"\" When in",
"# \"i.e.it\" (line 17287), \"trans.amer.math.soc.33\" (line 31509), # or \"studies.dash\" (line 516) will",
"just removing dots. # And similarly, phrases like \"m. i. t.\" (line 36527)",
"Discounted Cumulated Gain) at N. ndcg_at_n = [] for pair in zip(dcg, idcg):",
"CONTENTS = [AUTHORS, BIBLIOGRAPHY, WORDS] DELIMITER_SYMBOL = \"*\" BOUNDARY_LENGTH = 80 # It",
"# Here are some Python libraries that places locally. import porter STOP_WORDS_PATH =",
"open(args.o, \"w\") as fp: for query_ID, pair_list in query_results.items(): for pair in pair_list:",
"Score)]} with open(RELEVANCE_PATH, \"r\") as fp: for line in fp: fields = line.split()",
"t.\" (line 36527) and # \"i. e.\" (line 11820) will be ignored. #",
"string.punctuation[14:] removing_punctuation_map = dict((ord(character), \" \") for character in punctuation) # Remove all",
"helper function to check if a string is valid. Used to process documents",
"locally. import porter STOP_WORDS_PATH = \"stopwords.txt\" DOCUMENT_PATH = \"./cran/cran.all.1400\" QUERY_PATH = \"./cran/cran.qry\" RELEVANCE_PATH",
"Convert original ranking scores to NDCG-usable scores. else: gain_vector.append(0) # Step two: DCG",
"JSON file and exit. print(\"[Generating the index file.]\") with open(INDEX_PATH, \"w\") as fp:",
"\"?\", const = EVALUATION_PATH, metavar = \"FILE NAME\", help = \"BM25 evaluation result",
"stemming. compound = term.replace(\"-\", \"\") if is_valid(compound): add_new_word(compound) if section == WORDS: length",
"{[Key] Document ID : [Value] Appearance Times}}. document_lengths = {} average_length = 0.0",
"= math.sqrt(length) # Skip the document with index 0 from document length vector.",
"# Calculate the previous document length and start a new one. # The",
"= False, nargs = \"?\", const = EVALUATION_PATH, metavar = \"FILE NAME\", help",
"if pair[1] <= RELEVANCE_SCORE_THRESHOLD: # We only include queries whose relevance scores are",
"# Convert a list of pairs to dictionary for convienence. # Step one:",
"for convienence. # Step one: gain vector. gain_vector = [] for pair in",
"possible relevant documents for each query based on BM25 model. \"\"\" query_list =",
"to whitespace characters and deal with two special cases: # abbreviations with \".\"",
"stemming, term_vectors, document_lengths def process_single_query(query): \"\"\" Process single line text. Used by `process_queries`",
": 2] if current_section in LABELS: if current_section == ID: query_list[query_ID] = query",
"query = [] query_ID += 1 # Ignore original query IDs, which is",
"{1}>: {2}\".format(N, query_ID, ndcg), end = \"\\n\") if __name__ == \"__main__\": stemmer =",
"term_split: # Deal with each part of compound words like \"two-step\" (line 38037)",
"are some Python standard modules used in the script. import argparse # Used",
"dictionary containing pairs of original words and stemmed words are returned. \"\"\" def",
"cases: # abbreviations with \".\" and hyphenated compounds. term = term.replace(\".\", \"\") #",
"`process_queries` function and `manual` mode. \"\"\" def add_new_word(word): # A helper function to",
"query ID : [Value] [(Document ID, Relevance Score)]}, which is exactly the same",
"make_relevance_set(query_ID): # Relevant documents (Rel). relevance_set = set() for pair in relevance_scores[query_ID]: if",
"function to add a new word in `term_vectors`. if word not in stemming:",
"length since Cranfield collection does not have ending symbols. document_lengths[document_ID] = math.sqrt(length) #",
"+ idcg[-1]) # Step four: NDCG (Normalised Discounted Cumulated Gain) at N. ndcg_at_n",
"fp: current_section = line[0 : 2] if current_section in LABELS: if current_section ==",
"first one. return query_list def bm25_similarities(query): \"\"\" It returns a descending list with",
"= {int(ID) : appearance_times for ID, appearance_times in vector.items()} nums_of_documents = len(document_lengths) #",
"# \"sub- and\" (line 14632), which causes an extra empty string is created",
"K) / (frequency + K * ((1.0 - B) + B * document_lengths[document_ID]))",
"are returned. \"\"\" def add_new_word(word): # A helper function to add a new",
"porter STOP_WORDS_PATH = \"stopwords.txt\" DOCUMENT_PATH = \"./cran/cran.all.1400\" QUERY_PATH = \"./cran/cran.qry\" RELEVANCE_PATH = \"./cran/cranqrel\"",
"\"evaluation\": relevance_scores = load_relevance_scores() query_results = make_query_results() print_evaluation_results() if args.o is not None:",
"current_section == WORDS: section = current_section continue elif section in CONTENTS: if query",
"score_list_dict[pair[0]]) # Convert original ranking scores to NDCG-usable scores. else: gain_vector.append(0) # Step",
"= 1 query_results[query_ID] = [] for pair in bm25_similarities(query_list[query_ID]): query_results[query_ID].append((pair[0], rank)) rank +=",
"new word in `query_terms`. if word not in stemming: stemming[word] = stemmer.stem(word) stemmed_word",
"= \"\\n\") print(\"Recall: {0}\".format(recall()), end = \"\\n\") print(\"P@{0}: {1}\".format(N, p_at_n(N)), end = \"\\n\")",
"model and relative evaluation methods. If the index JSON file is not available,",
"1 p_at_n += appearance_times / n p_at_n = p_at_n / len(query_results) return p_at_n",
"open(INDEX_PATH, \"r\") as fp: stemming, term_vectors, document_lengths = json.load(fp) # Warning: unlike Python,",
"and length as `relevance_scores`. for query_ID in query_list: rank = 1 query_results[query_ID] =",
"QUERY_PATH = \"./cran/cran.qry\" RELEVANCE_PATH = \"./cran/cranqrel\" INDEX_PATH = \"index.json\" EVALUATION_PATH = \"evaluation_output.txt\" #",
"\"\"\" It calculates arithmetic mean of recalls for all queries. \"\"\" recall =",
"make_relevance_set(query_ID) appearance_times = 0 current_map = 0.0 for pair in query_results[query_ID]: if pair[0]",
"in `term_vectors`. if word not in stemming: stemming[word] = stemmer.stem(word) stemmed_word = stemming[word]",
"query_ID, ndcg_at_n def print_evaluation_results(): print(\"Evaluation Results:\") print(\"Precision: {0}\".format(precision()), end = \"\\n\") print(\"Recall: {0}\".format(recall()),",
"\"rm\" stored in the dictionary after stemming. compound = term.replace(\"-\", \"\") if is_valid(compound):",
"queries. \"\"\" p_at_n = 0.0 for query_ID in relevance_scores: relevance_set = make_relevance_set(query_ID) appearance_times",
"pair_list in query_results.items(): for pair in pair_list: fp.write(\"{0} {1} {2}\\n\".format(query_ID, pair[0], pair[1])) else:",
"a human-readable JSON file for index information and the like. import string #",
"def print_evaluation_results(): print(\"Evaluation Results:\") print(\"Precision: {0}\".format(precision()), end = \"\\n\") print(\"Recall: {0}\".format(recall()), end =",
"similarly, phrases like \"m. i. t.\" (line 36527) and # \"i. e.\" (line",
"N of each query separately. \"\"\" for query_ID, score_list in relevance_scores.items(): relevance_set =",
"for document in document_lengths.keys(): document_lengths[document] = document_lengths[document] / average_length # Now document_lengths stores",
"and NDCG at N. # If `MOST_RELEVANT` is equal to `N`, precision will",
"stemmer = porter.PorterStemmer() stop_words = load_stop_words() punctuation = string.punctuation[0 : 12] + string.punctuation[14:]",
"= \".A\" BIBLIOGRAPHY = \".B\" WORDS = \".W\" LABELS = [ID, TITLE, AUTHORS,",
"query (type \\\"QUIT\\\" to terminate): \") if user_query == USER_STOP_WORD: break query_terms =",
"Filter out all pure integers; for example, for \"f8u-3\" (line 35373), # both",
"\".W\" LABELS = [ID, TITLE, AUTHORS, BIBLIOGRAPHY, WORDS] CONTENTS = [AUTHORS, BIBLIOGRAPHY, WORDS]",
"`manual` mode. \"\"\" def add_new_word(word): # A helper function to add a new",
"+ 1, 2) + dcg[-1]) # Step three: IDCG (Ideal Discounted Cumulated Gain).",
"Here are some Python standard modules used in the script. import argparse #",
"only one item in `term_split`, which means there is no hyphen in this",
"is of necessity. document_lengths = {int(ID) : length for ID, length in document_lengths.items()}",
"value is 4 (-1, 1, 2, 3, 4), which means all # documents",
"section = current_section continue # Update and go to next line immediately. elif",
"pair = (document_ID, similarity) similarities.append(pair) # Sort results in desceding order. similarities =",
"metavar = \"FILE NAME\", help = \"BM25 evaluation result output in lines of",
"python3 # -*- coding: utf-8 -*- # Description: Build a structural data from",
"is exactly the same structure and length as `relevance_scores`. for query_ID in query_list:",
"import argparse # Used to parse program arguments. # More details are here:",
"precision will be the same as P at N for Cranfield collection. #",
"create a typing history buffer for `manual` mode. # More details are here:",
"default = \"manual\", help = \"mode selection; `manual` mode is chosen by default",
"consecutive hyphens as a space. for term in line.split(): # Split according to",
"It is a number used as minuend to convert original relevance scores to",
"B = 0.75 # A constant used in Precision at N and NDCG",
"be larger than `MOST_RELEVANT`. N = 10 def is_number(word): \"\"\" A helper function",
"pair[0] in relevance_set: appearance_times += 1 current_map += appearance_times / pair[1] mean_average_precision +=",
"print(\"Recall: {0}\".format(recall()), end = \"\\n\") print(\"P@{0}: {1}\".format(N, p_at_n(N)), end = \"\\n\") print(\"Mean Average",
"in relevance_scores: relevance_scores[query_ID].append(pair) # It assumes no repetition of document IDs for each",
"return stemming, term_vectors, document_lengths def process_single_query(query): \"\"\" Process single line text. Used by",
"like \"two-step\" (line 38037) or # type names like \"75s-t6\" (line 28459) or",
"= 0 for pair in query_results[query_ID]: if pair[0] in relevance_set and pair[1] <=",
"i in range(1, len(ideal_gain_vector)): idcg.append(ideal_gain_vector[i] / math.log(i + 1, 2) + idcg[-1]) #",
"an ending hyphens like # \"sub- and\" (line 14632), which causes an extra",
"= \"stopwords.txt\" DOCUMENT_PATH = \"./cran/cran.all.1400\" QUERY_PATH = \"./cran/cran.qry\" RELEVANCE_PATH = \"./cran/cranqrel\" INDEX_PATH =",
"to next line immediately. elif section in CONTENTS: line = line.translate(removing_punctuation_map) line =",
"ID begins from 1. similarity = 0.0 for term in query: if term",
"# It decides the length of the boundary between two `manual` queries. MOST_RELEVANT",
"\"index.json\" EVALUATION_PATH = \"evaluation_output.txt\" # Labels in `cran.all.1400` and `cranqrel` text files. ID",
"Skip the first one. return query_list def bm25_similarities(query): \"\"\" It returns a descending",
"idcg[-1]) # Step four: NDCG (Normalised Discounted Cumulated Gain) at N. ndcg_at_n =",
"makes term_split look like [\"sub\", \"\"]. for element in term_split: # Deal with",
"under Python 3.5 on Ubuntu 16.04. # Author: '(<NAME>.) # Date created: 2018-05-07",
"/ \"u.s.a.\" (line 32542) into \"mit\" / \"usa\". # In the meantime, something",
"arguments. # More details are here: https://docs.python.org/3/library/argparse.html import readline # Used to create",
"compound words like \"two-step\" (line 38037) or # type names like \"75s-t6\" (line",
"pair[0] in relevance_set and pair[1] <= n: appearance_times += 1 p_at_n += appearance_times",
"[] for pair in zip(dcg, idcg): ndcg_at_n.append(pair[0] / pair[1]) if len(ndcg_at_n) > n:",
"one. return query_list def bm25_similarities(query): \"\"\" It returns a descending list with at",
"+ K * ((1.0 - B) + B * document_lengths[document_ID])) * idf if",
"stop_words def process_documents(): \"\"\" Build vectors of each term and calculate lengths of",
"K = 1.0 B = 0.75 # A constant used in Precision at",
"`N`, precision will be the same as P at N for Cranfield collection.",
"False, nargs = \"?\", const = EVALUATION_PATH, metavar = \"FILE NAME\", help =",
"order. similarities = sorted(similarities, key = lambda x : x[1], reverse = True)",
"to process documents and queries. \"\"\" if word != \"\" and word not",
"stored in the dictionary after stemming. compound = term.replace(\"-\", \"\") if is_valid(compound): add_new_word(compound)",
"to convert original relevance scores to # NDCG-friendly ones. # Constants used in",
"to # NDCG-friendly ones. # Constants used in BM25 model. K = 1.0",
"if pair[0] in relevance_set: gain_vector.append(RELEVANCE_SCORE_FIX - score_list_dict[pair[0]]) # Convert original ranking scores to",
"Appearance Times}}. document_lengths = {} average_length = 0.0 num_of_documents = 0 with open(DOCUMENT_PATH,",
"created although # in Cranfield collection, document ID begins from 001. average_length +=",
"# Filter out all pure integers; for example, for \"f8u-3\" (line 35373), #",
"in ascending order for each query; the less the relevance # score is,",
"relevant documents for each query based on BM25 model. \"\"\" query_list = process_queries()",
"WORDS] CONTENTS = [AUTHORS, BIBLIOGRAPHY, WORDS] DELIMITER_SYMBOL = \"*\" BOUNDARY_LENGTH = 80 #",
"dcg.append(gain_vector[i] / math.log(i + 1, 2) + dcg[-1]) # Step three: IDCG (Ideal",
"open(DOCUMENT_PATH, \"r\") as fp: document_ID = 0 length = 0.0 for line in",
"= input(\"Enter query (type \\\"QUIT\\\" to terminate): \") if user_query == USER_STOP_WORD: break",
"and hyphenated compounds. term = term.replace(\".\", \"\") # Remove full stops in one",
"mode, the function will not end until user types \"QUIT\". \"\"\" while True:",
"\"sub- and\" (line 14632), which causes an extra empty string is created #",
"documents and queries. \"\"\" if word != \"\" and word not in stop_words",
"function to check if a string can be converted to an integer. Used",
"# Retrieval documents (Ret). retrieval_set = set() for pair in query_results[query_ID]: retrieval_set.add(pair[0]) return",
"utf-8 -*- # Description: Build a structural data from orginial Cranfield collection and",
"all # documents in it will be reserved. RELEVANCE_SCORE_FIX = 5 # It",
"mean_average_precision = 0.0 for query_ID in relevance_scores: relevance_set = make_relevance_set(query_ID) appearance_times = 0",
"RELEVANCE_SCORE_FIX = 5 # It is a number used as minuend to convert",
"dictionary for convienence. # Step one: gain vector. gain_vector = [] for pair",
"> n: # And finally, yield at most `n` results for each query.",
"14632), which causes an extra empty string is created # and makes term_split",
"at N and # NDCG at N) are applied. # Tested under Python",
"not given, the default output file name is `evaluation_output.txt`\") return parser.parse_args() def load_stop_words():",
"a typing history buffer for `manual` mode. # More details are here: https://docs.python.org/3/library/readline.html",
"Gain). ideal_gain_vector = [] for pair in score_list: ideal_gain_vector.append(RELEVANCE_SCORE_FIX - score_list_dict[pair[0]]) idcg =",
"go to next line immediately. elif section in CONTENTS: line = line.translate(removing_punctuation_map) line",
"stop_words = set() with open(STOP_WORDS_PATH, \"r\") as fp: for line in fp: stop_words.add(line.rstrip())",
"new word in `term_vectors`. if word not in stemming: stemming[word] = stemmer.stem(word) stemmed_word",
"at most top `MOST_RELEVANT` pairs (Document ID, Similarity) based on BM25 to calculate",
"`USER_STOP_WORD`, the program ends; it is case-sensitive. RELEVANCE_SCORE_THRESHOLD = 4 # Filter out",
"length of the boundary between two `manual` queries. MOST_RELEVANT = 15 # At",
"17393), # \"i.e.it\" (line 17287), \"trans.amer.math.soc.33\" (line 31509), # or \"studies.dash\" (line 516)",
"and hyphens. args = get_arguments() if os.path.exists(INDEX_PATH): print(\"[Loading BM25 index from file.]\") with",
"in lines of 3-tuples (query ID, document ID, and its rank [1 -",
"recall def p_at_n(n): \"\"\" It calculates arithmetic mean of precisions at N for",
"two `manual` queries. MOST_RELEVANT = 15 # At most top `MOST_RELEVANT` results are",
"for result in bm25_similarities(query_terms): print(\"{0}\\t{1}\\t{2}\".format(str(rank), result[0], str(result[1])), end = \"\\n\") rank += 1",
"+ str(query_terms)) print(\"Rank\\tID\\tScore\") rank = 1 for result in bm25_similarities(query_terms): print(\"{0}\\t{1}\\t{2}\".format(str(rank), result[0], str(result[1])),",
"= 5 # It is a number used as minuend to convert original",
"full stops and hyphens. args = get_arguments() if os.path.exists(INDEX_PATH): print(\"[Loading BM25 index from",
"B) + B * document_lengths[document_ID])) * idf if similarity > 0.0: # Ignore",
"each term and calculate lengths of each documents. Also a dictionary containing pairs",
"document with index 0 from document length vector. del document_lengths[0] average_length = (document_lengths[document_ID]",
"= stemming[word] if stemmed_word not in term_vectors: term_vectors[stemmed_word] = {} if document_ID in"
] |
[
"model_name=\"smallvariantflags\", name=\"flag_summary\", field=models.CharField( choices=[ (\"positive\", \"positive\"), (\"uncertain\", \"uncertain\"), (\"negative\", \"negative\"), (\"empty\", \"empty\"), ],",
"\"positive\"), (\"uncertain\", \"uncertain\"), (\"negative\", \"negative\"), (\"empty\", \"empty\"), ], default=\"empty\", max_length=32, ), ) ]",
"field=models.CharField( choices=[ (\"positive\", \"positive\"), (\"uncertain\", \"uncertain\"), (\"negative\", \"negative\"), (\"empty\", \"empty\"), ], default=\"empty\", max_length=32,",
"utf-8 -*- # Generated by Django 1.11.16 on 2018-11-14 19:30 from __future__ import",
"class Migration(migrations.Migration): dependencies = [(\"variants\", \"0012_auto_20181114_1914\")] operations = [ migrations.AddField( model_name=\"smallvariantflags\", name=\"flag_summary\", field=models.CharField(",
"Django 1.11.16 on 2018-11-14 19:30 from __future__ import unicode_literals from django.db import migrations,",
"\"0012_auto_20181114_1914\")] operations = [ migrations.AddField( model_name=\"smallvariantflags\", name=\"flag_summary\", field=models.CharField( choices=[ (\"positive\", \"positive\"), (\"uncertain\", \"uncertain\"),",
"19:30 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies",
"unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [(\"variants\", \"0012_auto_20181114_1914\")] operations",
"Generated by Django 1.11.16 on 2018-11-14 19:30 from __future__ import unicode_literals from django.db",
"migrations, models class Migration(migrations.Migration): dependencies = [(\"variants\", \"0012_auto_20181114_1914\")] operations = [ migrations.AddField( model_name=\"smallvariantflags\",",
"choices=[ (\"positive\", \"positive\"), (\"uncertain\", \"uncertain\"), (\"negative\", \"negative\"), (\"empty\", \"empty\"), ], default=\"empty\", max_length=32, ),",
"django.db import migrations, models class Migration(migrations.Migration): dependencies = [(\"variants\", \"0012_auto_20181114_1914\")] operations = [",
"import migrations, models class Migration(migrations.Migration): dependencies = [(\"variants\", \"0012_auto_20181114_1914\")] operations = [ migrations.AddField(",
"by Django 1.11.16 on 2018-11-14 19:30 from __future__ import unicode_literals from django.db import",
"models class Migration(migrations.Migration): dependencies = [(\"variants\", \"0012_auto_20181114_1914\")] operations = [ migrations.AddField( model_name=\"smallvariantflags\", name=\"flag_summary\",",
"on 2018-11-14 19:30 from __future__ import unicode_literals from django.db import migrations, models class",
"dependencies = [(\"variants\", \"0012_auto_20181114_1914\")] operations = [ migrations.AddField( model_name=\"smallvariantflags\", name=\"flag_summary\", field=models.CharField( choices=[ (\"positive\",",
"[ migrations.AddField( model_name=\"smallvariantflags\", name=\"flag_summary\", field=models.CharField( choices=[ (\"positive\", \"positive\"), (\"uncertain\", \"uncertain\"), (\"negative\", \"negative\"), (\"empty\",",
"= [(\"variants\", \"0012_auto_20181114_1914\")] operations = [ migrations.AddField( model_name=\"smallvariantflags\", name=\"flag_summary\", field=models.CharField( choices=[ (\"positive\", \"positive\"),",
"from django.db import migrations, models class Migration(migrations.Migration): dependencies = [(\"variants\", \"0012_auto_20181114_1914\")] operations =",
"-*- coding: utf-8 -*- # Generated by Django 1.11.16 on 2018-11-14 19:30 from",
"operations = [ migrations.AddField( model_name=\"smallvariantflags\", name=\"flag_summary\", field=models.CharField( choices=[ (\"positive\", \"positive\"), (\"uncertain\", \"uncertain\"), (\"negative\",",
"migrations.AddField( model_name=\"smallvariantflags\", name=\"flag_summary\", field=models.CharField( choices=[ (\"positive\", \"positive\"), (\"uncertain\", \"uncertain\"), (\"negative\", \"negative\"), (\"empty\", \"empty\"),",
"import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [(\"variants\", \"0012_auto_20181114_1914\")]",
"1.11.16 on 2018-11-14 19:30 from __future__ import unicode_literals from django.db import migrations, models",
"[(\"variants\", \"0012_auto_20181114_1914\")] operations = [ migrations.AddField( model_name=\"smallvariantflags\", name=\"flag_summary\", field=models.CharField( choices=[ (\"positive\", \"positive\"), (\"uncertain\",",
"name=\"flag_summary\", field=models.CharField( choices=[ (\"positive\", \"positive\"), (\"uncertain\", \"uncertain\"), (\"negative\", \"negative\"), (\"empty\", \"empty\"), ], default=\"empty\",",
"# -*- coding: utf-8 -*- # Generated by Django 1.11.16 on 2018-11-14 19:30",
"coding: utf-8 -*- # Generated by Django 1.11.16 on 2018-11-14 19:30 from __future__",
"= [ migrations.AddField( model_name=\"smallvariantflags\", name=\"flag_summary\", field=models.CharField( choices=[ (\"positive\", \"positive\"), (\"uncertain\", \"uncertain\"), (\"negative\", \"negative\"),",
"from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies =",
"2018-11-14 19:30 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration):",
"Migration(migrations.Migration): dependencies = [(\"variants\", \"0012_auto_20181114_1914\")] operations = [ migrations.AddField( model_name=\"smallvariantflags\", name=\"flag_summary\", field=models.CharField( choices=[",
"(\"positive\", \"positive\"), (\"uncertain\", \"uncertain\"), (\"negative\", \"negative\"), (\"empty\", \"empty\"), ], default=\"empty\", max_length=32, ), )",
"__future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [(\"variants\",",
"-*- # Generated by Django 1.11.16 on 2018-11-14 19:30 from __future__ import unicode_literals",
"# Generated by Django 1.11.16 on 2018-11-14 19:30 from __future__ import unicode_literals from"
] |
[
"def _elems_to_objs(self, elems): from ..attachments import FileAttachment, ItemAttachment cls_map = {cls.response_tag(): cls for",
"one attachment payload = self.get_payload( items=[attachment_id], include_mime_content=False, body_type=None, filter_html_content=None, additional_fields=None, ) self.streaming =",
"chain(*(f.expand(version=self.account.version) for f in additional_fields)) set_xml_value(additional_properties, sorted( expanded_fields, key=lambda f: (getattr(f.field, 'field_uri', ''),",
"can only stream content of one attachment payload = self.get_payload( items=[attachment_id], include_mime_content=False, body_type=None,",
"e in self._get_elements_in_response(response=res): if isinstance(e, Exception): raise e # The returned content did",
"iter_content() method. response = DummyResponse(url=None, headers=None, request_headers=None, content=enf.data) _, body = super()._get_soap_parts(response=response) res",
"GetAttachment(EWSAccountService): \"\"\"MSDN: https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/getattachment-operation\"\"\" SERVICE_NAME = 'GetAttachment' element_container_name = '{%s}Attachments' % MNS def call(self,",
"chain from .common import EWSAccountService, create_attachment_ids_element from ..util import create_element, add_xml_child, set_xml_value, DummyResponse,",
"elems): from ..attachments import FileAttachment, ItemAttachment cls_map = {cls.response_tag(): cls for cls in",
"in self._get_elements_in_response(response=res): if isinstance(e, Exception): raise e # The returned content did not",
"yield elem continue yield cls_map[elem.tag].from_xml(elem=elem, account=self.account) def get_payload(self, items, include_mime_content, body_type, filter_html_content, additional_fields):",
"response def _get_soap_messages(self, body, **parse_opts): if not parse_opts.get('stream_file_content', False): return super()._get_soap_messages(body, **parse_opts) from",
"Content element, ElementNotFound is thrown by parser.parse(). # Let the non-streaming SOAP parser",
"because _get_soap_parts() expects an iter_content() method. response = DummyResponse(url=None, headers=None, request_headers=None, content=enf.data) _,",
"class GetAttachment(EWSAccountService): \"\"\"MSDN: https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/getattachment-operation\"\"\" SERVICE_NAME = 'GetAttachment' element_container_name = '{%s}Attachments' % MNS def",
"by '_get_soap_parts' r = body parser = StreamingBase64Parser() field = FileAttachment.get_field_by_fieldname('_content') handler =",
"= create_element('m:%s' % self.SERVICE_NAME) shape_elem = create_element('m:AttachmentShape') if include_mime_content: add_xml_child(shape_elem, 't:IncludeMimeContent', 'true') if",
"and hook into the normal exception handling. # Wrap in DummyResponse because _get_soap_parts()",
"'false') if additional_fields: additional_properties = create_element('t:AdditionalProperties') expanded_fields = chain(*(f.expand(version=self.account.version) for f in additional_fields))",
"in DummyResponse because _get_soap_parts() expects an iter_content() method. response = DummyResponse(url=None, headers=None, request_headers=None,",
"parser can only stream content of one attachment payload = self.get_payload( items=[attachment_id], include_mime_content=False,",
"for elem in elems: if isinstance(elem, Exception): yield elem continue yield cls_map[elem.tag].from_xml(elem=elem, account=self.account)",
"body_type) if filter_html_content is not None: add_xml_child(shape_elem, 't:FilterHtmlContent', 'true' if filter_html_content else 'false')",
"one of %s\" % (body_type, BODY_TYPE_CHOICES)) return self._elems_to_objs(self._chunked_get_elements( self.get_payload, items=items, include_mime_content=include_mime_content, body_type=body_type, filter_html_content=filter_html_content,",
"StreamingContentHandler, ElementNotFound, MNS # https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/bodytype BODY_TYPE_CHOICES = ('Best', 'HTML', 'Text') class GetAttachment(EWSAccountService): \"\"\"MSDN:",
"# Wrap in DummyResponse because _get_soap_parts() expects an iter_content() method. response = DummyResponse(url=None,",
"# The returned content did not contain any EWS exceptions. Give up and",
"from ..attachments import FileAttachment # 'body' is actually the raw response passed on",
"streaming XML parser can only stream content of one attachment payload = self.get_payload(",
"not parse_opts.get('stream_file_content', False): super()._update_api_version(api_version, header, **parse_opts) # TODO: We're skipping this part in",
"api_version, header, **parse_opts): if not parse_opts.get('stream_file_content', False): super()._update_api_version(api_version, header, **parse_opts) # TODO: We're",
"BODY_TYPE_CHOICES: raise ValueError(\"'body_type' %s must be one of %s\" % (body_type, BODY_TYPE_CHOICES)) return",
"# Let the non-streaming SOAP parser parse the response and hook into the",
"set_xml_value, DummyResponse, StreamingBase64Parser,\\ StreamingContentHandler, ElementNotFound, MNS # https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/bodytype BODY_TYPE_CHOICES = ('Best', 'HTML', 'Text')",
"(body_type, BODY_TYPE_CHOICES)) return self._elems_to_objs(self._chunked_get_elements( self.get_payload, items=items, include_mime_content=include_mime_content, body_type=body_type, filter_html_content=filter_html_content, additional_fields=additional_fields, )) def _elems_to_objs(self,",
"When the returned XML does not contain a Content element, ElementNotFound is thrown",
"is actually the raw response passed on by '_get_soap_parts' r = body parser",
"additional_fields): payload = create_element('m:%s' % self.SERVICE_NAME) shape_elem = create_element('m:AttachmentShape') if include_mime_content: add_xml_child(shape_elem, 't:IncludeMimeContent',",
"a Content element, ElementNotFound is thrown by parser.parse(). # Let the non-streaming SOAP",
"super()._update_api_version(api_version, header, **parse_opts) # TODO: We're skipping this part in streaming mode because",
"self.SERVICE_NAME) shape_elem = create_element('m:AttachmentShape') if include_mime_content: add_xml_child(shape_elem, 't:IncludeMimeContent', 'true') if body_type: add_xml_child(shape_elem, 't:BodyType',",
"create_attachment_ids_element from ..util import create_element, add_xml_child, set_xml_value, DummyResponse, StreamingBase64Parser,\\ StreamingContentHandler, ElementNotFound, MNS #",
"from ..attachments import FileAttachment, ItemAttachment cls_map = {cls.response_tag(): cls for cls in (FileAttachment,",
"import FileAttachment # 'body' is actually the raw response passed on by '_get_soap_parts'",
"= True try: yield from self._get_response_xml(payload=payload, stream_file_content=True) except ElementNotFound as enf: # When",
"add_xml_child(shape_elem, 't:IncludeMimeContent', 'true') if body_type: add_xml_child(shape_elem, 't:BodyType', body_type) if filter_html_content is not None:",
"shape_elem.append(additional_properties) if len(shape_elem): payload.append(shape_elem) attachment_ids = create_attachment_ids_element(items=items, version=self.account.version) payload.append(attachment_ids) return payload def _update_api_version(self,",
"create_element('m:AttachmentShape') if include_mime_content: add_xml_child(shape_elem, 't:IncludeMimeContent', 'true') if body_type: add_xml_child(shape_elem, 't:BodyType', body_type) if filter_html_content",
"if isinstance(e, Exception): raise e # The returned content did not contain any",
"contain a Content element, ElementNotFound is thrown by parser.parse(). # Let the non-streaming",
"content of one attachment payload = self.get_payload( items=[attachment_id], include_mime_content=False, body_type=None, filter_html_content=None, additional_fields=None, )",
"% (body_type, BODY_TYPE_CHOICES)) return self._elems_to_objs(self._chunked_get_elements( self.get_payload, items=items, include_mime_content=include_mime_content, body_type=body_type, filter_html_content=filter_html_content, additional_fields=additional_fields, )) def",
"None, response def _get_soap_messages(self, body, **parse_opts): if not parse_opts.get('stream_file_content', False): return super()._get_soap_messages(body, **parse_opts)",
"Pass the response unaltered. We want to use our custom streaming parser return",
"= super()._get_soap_messages(body=body) for e in self._get_elements_in_response(response=res): if isinstance(e, Exception): raise e # The",
"for e in self._get_elements_in_response(response=res): if isinstance(e, Exception): raise e # The returned content",
"payload = create_element('m:%s' % self.SERVICE_NAME) shape_elem = create_element('m:AttachmentShape') if include_mime_content: add_xml_child(shape_elem, 't:IncludeMimeContent', 'true')",
"body_type=body_type, filter_html_content=filter_html_content, additional_fields=additional_fields, )) def _elems_to_objs(self, elems): from ..attachments import FileAttachment, ItemAttachment cls_map",
"the SOAP header @classmethod def _get_soap_parts(cls, response, **parse_opts): if not parse_opts.get('stream_file_content', False): return",
"EWSAccountService, create_attachment_ids_element from ..util import create_element, add_xml_child, set_xml_value, DummyResponse, StreamingBase64Parser,\\ StreamingContentHandler, ElementNotFound, MNS",
"expects an iter_content() method. response = DummyResponse(url=None, headers=None, request_headers=None, content=enf.data) _, body =",
"**parse_opts): if not parse_opts.get('stream_file_content', False): return super()._get_soap_parts(response, **parse_opts) # Pass the response unaltered.",
"the response unaltered. We want to use our custom streaming parser return None,",
"Let the non-streaming SOAP parser parse the response and hook into the normal",
"DummyResponse(url=None, headers=None, request_headers=None, content=enf.data) _, body = super()._get_soap_parts(response=response) res = super()._get_soap_messages(body=body) for e",
"e # The returned content did not contain any EWS exceptions. Give up",
"SERVICE_NAME = 'GetAttachment' element_container_name = '{%s}Attachments' % MNS def call(self, items, include_mime_content, body_type,",
"on by '_get_soap_parts' r = body parser = StreamingBase64Parser() field = FileAttachment.get_field_by_fieldname('_content') handler",
"for cls in (FileAttachment, ItemAttachment)} for elem in elems: if isinstance(elem, Exception): yield",
"and body_type not in BODY_TYPE_CHOICES: raise ValueError(\"'body_type' %s must be one of %s\"",
"Wrap in DummyResponse because _get_soap_parts() expects an iter_content() method. response = DummyResponse(url=None, headers=None,",
"(FileAttachment, ItemAttachment)} for elem in elems: if isinstance(elem, Exception): yield elem continue yield",
"{cls.response_tag(): cls for cls in (FileAttachment, ItemAttachment)} for elem in elems: if isinstance(elem,",
"not parse_opts.get('stream_file_content', False): return super()._get_soap_messages(body, **parse_opts) from ..attachments import FileAttachment # 'body' is",
"if body_type: add_xml_child(shape_elem, 't:BodyType', body_type) if filter_html_content is not None: add_xml_child(shape_elem, 't:FilterHtmlContent', 'true'",
"= DummyResponse(url=None, headers=None, request_headers=None, content=enf.data) _, body = super()._get_soap_parts(response=response) res = super()._get_soap_messages(body=body) for",
"if not parse_opts.get('stream_file_content', False): return super()._get_soap_messages(body, **parse_opts) from ..attachments import FileAttachment # 'body'",
"key=lambda f: (getattr(f.field, 'field_uri', ''), f.path) ), version=self.account.version) shape_elem.append(additional_properties) if len(shape_elem): payload.append(shape_elem) attachment_ids",
"in streaming mode because StreamingBase64Parser cannot parse the SOAP header @classmethod def _get_soap_parts(cls,",
"= create_element('m:AttachmentShape') if include_mime_content: add_xml_child(shape_elem, 't:IncludeMimeContent', 'true') if body_type: add_xml_child(shape_elem, 't:BodyType', body_type) if",
"self._elems_to_objs(self._chunked_get_elements( self.get_payload, items=items, include_mime_content=include_mime_content, body_type=body_type, filter_html_content=filter_html_content, additional_fields=additional_fields, )) def _elems_to_objs(self, elems): from ..attachments",
"content did not contain any EWS exceptions. Give up and re-raise the original",
"'_get_soap_parts' r = body parser = StreamingBase64Parser() field = FileAttachment.get_field_by_fieldname('_content') handler = StreamingContentHandler(parser=parser,",
"self.streaming = True try: yield from self._get_response_xml(payload=payload, stream_file_content=True) except ElementNotFound as enf: #",
"body_type and body_type not in BODY_TYPE_CHOICES: raise ValueError(\"'body_type' %s must be one of",
"in additional_fields)) set_xml_value(additional_properties, sorted( expanded_fields, key=lambda f: (getattr(f.field, 'field_uri', ''), f.path) ), version=self.account.version)",
"parser.parse(). # Let the non-streaming SOAP parser parse the response and hook into",
"does not contain a Content element, ElementNotFound is thrown by parser.parse(). # Let",
"super()._get_soap_messages(body, **parse_opts) from ..attachments import FileAttachment # 'body' is actually the raw response",
"from itertools import chain from .common import EWSAccountService, create_attachment_ids_element from ..util import create_element,",
"_get_soap_parts(cls, response, **parse_opts): if not parse_opts.get('stream_file_content', False): return super()._get_soap_parts(response, **parse_opts) # Pass the",
"actually the raw response passed on by '_get_soap_parts' r = body parser =",
"= '{%s}Attachments' % MNS def call(self, items, include_mime_content, body_type, filter_html_content, additional_fields): if body_type",
"'field_uri', ''), f.path) ), version=self.account.version) shape_elem.append(additional_properties) if len(shape_elem): payload.append(shape_elem) attachment_ids = create_attachment_ids_element(items=items, version=self.account.version)",
"@classmethod def _get_soap_parts(cls, response, **parse_opts): if not parse_opts.get('stream_file_content', False): return super()._get_soap_parts(response, **parse_opts) #",
"We're skipping this part in streaming mode because StreamingBase64Parser cannot parse the SOAP",
"r = body parser = StreamingBase64Parser() field = FileAttachment.get_field_by_fieldname('_content') handler = StreamingContentHandler(parser=parser, ns=field.namespace,",
"\"\"\"MSDN: https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/getattachment-operation\"\"\" SERVICE_NAME = 'GetAttachment' element_container_name = '{%s}Attachments' % MNS def call(self, items,",
"= create_element('t:AdditionalProperties') expanded_fields = chain(*(f.expand(version=self.account.version) for f in additional_fields)) set_xml_value(additional_properties, sorted( expanded_fields, key=lambda",
"elems: if isinstance(elem, Exception): yield elem continue yield cls_map[elem.tag].from_xml(elem=elem, account=self.account) def get_payload(self, items,",
"this part in streaming mode because StreamingBase64Parser cannot parse the SOAP header @classmethod",
"version=self.account.version) payload.append(attachment_ids) return payload def _update_api_version(self, api_version, header, **parse_opts): if not parse_opts.get('stream_file_content', False):",
"custom streaming parser return None, response def _get_soap_messages(self, body, **parse_opts): if not parse_opts.get('stream_file_content',",
"the raw response passed on by '_get_soap_parts' r = body parser = StreamingBase64Parser()",
"normal exception handling. # Wrap in DummyResponse because _get_soap_parts() expects an iter_content() method.",
"isinstance(e, Exception): raise e # The returned content did not contain any EWS",
"import create_element, add_xml_child, set_xml_value, DummyResponse, StreamingBase64Parser,\\ StreamingContentHandler, ElementNotFound, MNS # https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/bodytype BODY_TYPE_CHOICES =",
"because StreamingBase64Parser cannot parse the SOAP header @classmethod def _get_soap_parts(cls, response, **parse_opts): if",
"response, **parse_opts): if not parse_opts.get('stream_file_content', False): return super()._get_soap_parts(response, **parse_opts) # Pass the response",
"filter_html_content, additional_fields): payload = create_element('m:%s' % self.SERVICE_NAME) shape_elem = create_element('m:AttachmentShape') if include_mime_content: add_xml_child(shape_elem,",
"'true' if filter_html_content else 'false') if additional_fields: additional_properties = create_element('t:AdditionalProperties') expanded_fields = chain(*(f.expand(version=self.account.version)",
"the returned XML does not contain a Content element, ElementNotFound is thrown by",
"parser return None, response def _get_soap_messages(self, body, **parse_opts): if not parse_opts.get('stream_file_content', False): return",
"= body parser = StreamingBase64Parser() field = FileAttachment.get_field_by_fieldname('_content') handler = StreamingContentHandler(parser=parser, ns=field.namespace, element_name=field.field_uri)",
"raw response passed on by '_get_soap_parts' r = body parser = StreamingBase64Parser() field",
"add_xml_child, set_xml_value, DummyResponse, StreamingBase64Parser,\\ StreamingContentHandler, ElementNotFound, MNS # https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/bodytype BODY_TYPE_CHOICES = ('Best', 'HTML',",
"..util import create_element, add_xml_child, set_xml_value, DummyResponse, StreamingBase64Parser,\\ StreamingContentHandler, ElementNotFound, MNS # https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/bodytype BODY_TYPE_CHOICES",
"False): return super()._get_soap_parts(response, **parse_opts) # Pass the response unaltered. We want to use",
"('Best', 'HTML', 'Text') class GetAttachment(EWSAccountService): \"\"\"MSDN: https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/getattachment-operation\"\"\" SERVICE_NAME = 'GetAttachment' element_container_name = '{%s}Attachments'",
"get_payload(self, items, include_mime_content, body_type, filter_html_content, additional_fields): payload = create_element('m:%s' % self.SERVICE_NAME) shape_elem =",
"by parser.parse(). # Let the non-streaming SOAP parser parse the response and hook",
"XML does not contain a Content element, ElementNotFound is thrown by parser.parse(). #",
"% self.SERVICE_NAME) shape_elem = create_element('m:AttachmentShape') if include_mime_content: add_xml_child(shape_elem, 't:IncludeMimeContent', 'true') if body_type: add_xml_child(shape_elem,",
"'t:FilterHtmlContent', 'true' if filter_html_content else 'false') if additional_fields: additional_properties = create_element('t:AdditionalProperties') expanded_fields =",
"'GetAttachment' element_container_name = '{%s}Attachments' % MNS def call(self, items, include_mime_content, body_type, filter_html_content, additional_fields):",
".common import EWSAccountService, create_attachment_ids_element from ..util import create_element, add_xml_child, set_xml_value, DummyResponse, StreamingBase64Parser,\\ StreamingContentHandler,",
"BODY_TYPE_CHOICES)) return self._elems_to_objs(self._chunked_get_elements( self.get_payload, items=items, include_mime_content=include_mime_content, body_type=body_type, filter_html_content=filter_html_content, additional_fields=additional_fields, )) def _elems_to_objs(self, elems):",
"sorted( expanded_fields, key=lambda f: (getattr(f.field, 'field_uri', ''), f.path) ), version=self.account.version) shape_elem.append(additional_properties) if len(shape_elem):",
"items=[attachment_id], include_mime_content=False, body_type=None, filter_html_content=None, additional_fields=None, ) self.streaming = True try: yield from self._get_response_xml(payload=payload,",
"%s\" % (body_type, BODY_TYPE_CHOICES)) return self._elems_to_objs(self._chunked_get_elements( self.get_payload, items=items, include_mime_content=include_mime_content, body_type=body_type, filter_html_content=filter_html_content, additional_fields=additional_fields, ))",
"raise ValueError(\"'body_type' %s must be one of %s\" % (body_type, BODY_TYPE_CHOICES)) return self._elems_to_objs(self._chunked_get_elements(",
"response and hook into the normal exception handling. # Wrap in DummyResponse because",
"DummyResponse, StreamingBase64Parser,\\ StreamingContentHandler, ElementNotFound, MNS # https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/bodytype BODY_TYPE_CHOICES = ('Best', 'HTML', 'Text') class",
"ElementNotFound as enf: # When the returned XML does not contain a Content",
")) def _elems_to_objs(self, elems): from ..attachments import FileAttachment, ItemAttachment cls_map = {cls.response_tag(): cls",
"payload = self.get_payload( items=[attachment_id], include_mime_content=False, body_type=None, filter_html_content=None, additional_fields=None, ) self.streaming = True try:",
"= StreamingBase64Parser() field = FileAttachment.get_field_by_fieldname('_content') handler = StreamingContentHandler(parser=parser, ns=field.namespace, element_name=field.field_uri) parser.setContentHandler(handler) return parser.parse(r)",
"https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/getattachment-operation\"\"\" SERVICE_NAME = 'GetAttachment' element_container_name = '{%s}Attachments' % MNS def call(self, items, include_mime_content,",
"ValueError(\"'body_type' %s must be one of %s\" % (body_type, BODY_TYPE_CHOICES)) return self._elems_to_objs(self._chunked_get_elements( self.get_payload,",
"header, **parse_opts): if not parse_opts.get('stream_file_content', False): super()._update_api_version(api_version, header, **parse_opts) # TODO: We're skipping",
"True try: yield from self._get_response_xml(payload=payload, stream_file_content=True) except ElementNotFound as enf: # When the",
"not parse_opts.get('stream_file_content', False): return super()._get_soap_parts(response, **parse_opts) # Pass the response unaltered. We want",
"from ..util import create_element, add_xml_child, set_xml_value, DummyResponse, StreamingBase64Parser,\\ StreamingContentHandler, ElementNotFound, MNS # https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/bodytype",
"_, body = super()._get_soap_parts(response=response) res = super()._get_soap_messages(body=body) for e in self._get_elements_in_response(response=res): if isinstance(e,",
"cls_map[elem.tag].from_xml(elem=elem, account=self.account) def get_payload(self, items, include_mime_content, body_type, filter_html_content, additional_fields): payload = create_element('m:%s' %",
"isinstance(elem, Exception): yield elem continue yield cls_map[elem.tag].from_xml(elem=elem, account=self.account) def get_payload(self, items, include_mime_content, body_type,",
"use our custom streaming parser return None, response def _get_soap_messages(self, body, **parse_opts): if",
"content=enf.data) _, body = super()._get_soap_parts(response=response) res = super()._get_soap_messages(body=body) for e in self._get_elements_in_response(response=res): if",
"**parse_opts): if not parse_opts.get('stream_file_content', False): super()._update_api_version(api_version, header, **parse_opts) # TODO: We're skipping this",
"return super()._get_soap_messages(body, **parse_opts) from ..attachments import FileAttachment # 'body' is actually the raw",
"stream content of one attachment payload = self.get_payload( items=[attachment_id], include_mime_content=False, body_type=None, filter_html_content=None, additional_fields=None,",
"= FileAttachment.get_field_by_fieldname('_content') handler = StreamingContentHandler(parser=parser, ns=field.namespace, element_name=field.field_uri) parser.setContentHandler(handler) return parser.parse(r) def stream_file_content(self, attachment_id):",
"'Text') class GetAttachment(EWSAccountService): \"\"\"MSDN: https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/getattachment-operation\"\"\" SERVICE_NAME = 'GetAttachment' element_container_name = '{%s}Attachments' % MNS",
"if filter_html_content is not None: add_xml_child(shape_elem, 't:FilterHtmlContent', 'true' if filter_html_content else 'false') if",
"body, **parse_opts): if not parse_opts.get('stream_file_content', False): return super()._get_soap_messages(body, **parse_opts) from ..attachments import FileAttachment",
"if include_mime_content: add_xml_child(shape_elem, 't:IncludeMimeContent', 'true') if body_type: add_xml_child(shape_elem, 't:BodyType', body_type) if filter_html_content is",
"cls for cls in (FileAttachment, ItemAttachment)} for elem in elems: if isinstance(elem, Exception):",
"filter_html_content, additional_fields): if body_type and body_type not in BODY_TYPE_CHOICES: raise ValueError(\"'body_type' %s must",
"body_type not in BODY_TYPE_CHOICES: raise ValueError(\"'body_type' %s must be one of %s\" %",
"items=items, include_mime_content=include_mime_content, body_type=body_type, filter_html_content=filter_html_content, additional_fields=additional_fields, )) def _elems_to_objs(self, elems): from ..attachments import FileAttachment,",
"create_element, add_xml_child, set_xml_value, DummyResponse, StreamingBase64Parser,\\ StreamingContentHandler, ElementNotFound, MNS # https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/bodytype BODY_TYPE_CHOICES = ('Best',",
"def get_payload(self, items, include_mime_content, body_type, filter_html_content, additional_fields): payload = create_element('m:%s' % self.SERVICE_NAME) shape_elem",
"# The streaming XML parser can only stream content of one attachment payload",
"Exception): yield elem continue yield cls_map[elem.tag].from_xml(elem=elem, account=self.account) def get_payload(self, items, include_mime_content, body_type, filter_html_content,",
"self.get_payload, items=items, include_mime_content=include_mime_content, body_type=body_type, filter_html_content=filter_html_content, additional_fields=additional_fields, )) def _elems_to_objs(self, elems): from ..attachments import",
"'{%s}Attachments' % MNS def call(self, items, include_mime_content, body_type, filter_html_content, additional_fields): if body_type and",
"not contain a Content element, ElementNotFound is thrown by parser.parse(). # Let the",
"in elems: if isinstance(elem, Exception): yield elem continue yield cls_map[elem.tag].from_xml(elem=elem, account=self.account) def get_payload(self,",
"f.path) ), version=self.account.version) shape_elem.append(additional_properties) if len(shape_elem): payload.append(shape_elem) attachment_ids = create_attachment_ids_element(items=items, version=self.account.version) payload.append(attachment_ids) return",
"the normal exception handling. # Wrap in DummyResponse because _get_soap_parts() expects an iter_content()",
"super()._get_soap_parts(response=response) res = super()._get_soap_messages(body=body) for e in self._get_elements_in_response(response=res): if isinstance(e, Exception): raise e",
"parse_opts.get('stream_file_content', False): return super()._get_soap_messages(body, **parse_opts) from ..attachments import FileAttachment # 'body' is actually",
"FileAttachment # 'body' is actually the raw response passed on by '_get_soap_parts' r",
"include_mime_content=False, body_type=None, filter_html_content=None, additional_fields=None, ) self.streaming = True try: yield from self._get_response_xml(payload=payload, stream_file_content=True)",
") self.streaming = True try: yield from self._get_response_xml(payload=payload, stream_file_content=True) except ElementNotFound as enf:",
"FileAttachment.get_field_by_fieldname('_content') handler = StreamingContentHandler(parser=parser, ns=field.namespace, element_name=field.field_uri) parser.setContentHandler(handler) return parser.parse(r) def stream_file_content(self, attachment_id): #",
"non-streaming SOAP parser parse the response and hook into the normal exception handling.",
"**parse_opts): if not parse_opts.get('stream_file_content', False): return super()._get_soap_messages(body, **parse_opts) from ..attachments import FileAttachment #",
"= create_attachment_ids_element(items=items, version=self.account.version) payload.append(attachment_ids) return payload def _update_api_version(self, api_version, header, **parse_opts): if not",
"TODO: We're skipping this part in streaming mode because StreamingBase64Parser cannot parse the",
"not None: add_xml_child(shape_elem, 't:FilterHtmlContent', 'true' if filter_html_content else 'false') if additional_fields: additional_properties =",
"if body_type and body_type not in BODY_TYPE_CHOICES: raise ValueError(\"'body_type' %s must be one",
"..attachments import FileAttachment, ItemAttachment cls_map = {cls.response_tag(): cls for cls in (FileAttachment, ItemAttachment)}",
"filter_html_content else 'false') if additional_fields: additional_properties = create_element('t:AdditionalProperties') expanded_fields = chain(*(f.expand(version=self.account.version) for f",
"body parser = StreamingBase64Parser() field = FileAttachment.get_field_by_fieldname('_content') handler = StreamingContentHandler(parser=parser, ns=field.namespace, element_name=field.field_uri) parser.setContentHandler(handler)",
"an iter_content() method. response = DummyResponse(url=None, headers=None, request_headers=None, content=enf.data) _, body = super()._get_soap_parts(response=response)",
"shape_elem = create_element('m:AttachmentShape') if include_mime_content: add_xml_child(shape_elem, 't:IncludeMimeContent', 'true') if body_type: add_xml_child(shape_elem, 't:BodyType', body_type)",
"The streaming XML parser can only stream content of one attachment payload =",
"field = FileAttachment.get_field_by_fieldname('_content') handler = StreamingContentHandler(parser=parser, ns=field.namespace, element_name=field.field_uri) parser.setContentHandler(handler) return parser.parse(r) def stream_file_content(self,",
"MNS def call(self, items, include_mime_content, body_type, filter_html_content, additional_fields): if body_type and body_type not",
"False): super()._update_api_version(api_version, header, **parse_opts) # TODO: We're skipping this part in streaming mode",
"call(self, items, include_mime_content, body_type, filter_html_content, additional_fields): if body_type and body_type not in BODY_TYPE_CHOICES:",
"request_headers=None, content=enf.data) _, body = super()._get_soap_parts(response=response) res = super()._get_soap_messages(body=body) for e in self._get_elements_in_response(response=res):",
"parse the response and hook into the normal exception handling. # Wrap in",
"import EWSAccountService, create_attachment_ids_element from ..util import create_element, add_xml_child, set_xml_value, DummyResponse, StreamingBase64Parser,\\ StreamingContentHandler, ElementNotFound,",
"skipping this part in streaming mode because StreamingBase64Parser cannot parse the SOAP header",
"parse the SOAP header @classmethod def _get_soap_parts(cls, response, **parse_opts): if not parse_opts.get('stream_file_content', False):",
"# 'body' is actually the raw response passed on by '_get_soap_parts' r =",
"in BODY_TYPE_CHOICES: raise ValueError(\"'body_type' %s must be one of %s\" % (body_type, BODY_TYPE_CHOICES))",
"additional_properties = create_element('t:AdditionalProperties') expanded_fields = chain(*(f.expand(version=self.account.version) for f in additional_fields)) set_xml_value(additional_properties, sorted( expanded_fields,",
"'t:BodyType', body_type) if filter_html_content is not None: add_xml_child(shape_elem, 't:FilterHtmlContent', 'true' if filter_html_content else",
"(getattr(f.field, 'field_uri', ''), f.path) ), version=self.account.version) shape_elem.append(additional_properties) if len(shape_elem): payload.append(shape_elem) attachment_ids = create_attachment_ids_element(items=items,",
"super()._get_soap_messages(body=body) for e in self._get_elements_in_response(response=res): if isinstance(e, Exception): raise e # The returned",
"len(shape_elem): payload.append(shape_elem) attachment_ids = create_attachment_ids_element(items=items, version=self.account.version) payload.append(attachment_ids) return payload def _update_api_version(self, api_version, header,",
"if additional_fields: additional_properties = create_element('t:AdditionalProperties') expanded_fields = chain(*(f.expand(version=self.account.version) for f in additional_fields)) set_xml_value(additional_properties,",
"cannot parse the SOAP header @classmethod def _get_soap_parts(cls, response, **parse_opts): if not parse_opts.get('stream_file_content',",
"header, **parse_opts) # TODO: We're skipping this part in streaming mode because StreamingBase64Parser",
"**parse_opts) from ..attachments import FileAttachment # 'body' is actually the raw response passed",
"= 'GetAttachment' element_container_name = '{%s}Attachments' % MNS def call(self, items, include_mime_content, body_type, filter_html_content,",
"ItemAttachment)} for elem in elems: if isinstance(elem, Exception): yield elem continue yield cls_map[elem.tag].from_xml(elem=elem,",
"streaming mode because StreamingBase64Parser cannot parse the SOAP header @classmethod def _get_soap_parts(cls, response,",
"response unaltered. We want to use our custom streaming parser return None, response",
"our custom streaming parser return None, response def _get_soap_messages(self, body, **parse_opts): if not",
"self._get_response_xml(payload=payload, stream_file_content=True) except ElementNotFound as enf: # When the returned XML does not",
"'body' is actually the raw response passed on by '_get_soap_parts' r = body",
"_update_api_version(self, api_version, header, **parse_opts): if not parse_opts.get('stream_file_content', False): super()._update_api_version(api_version, header, **parse_opts) # TODO:",
"additional_fields)) set_xml_value(additional_properties, sorted( expanded_fields, key=lambda f: (getattr(f.field, 'field_uri', ''), f.path) ), version=self.account.version) shape_elem.append(additional_properties)",
"def _update_api_version(self, api_version, header, **parse_opts): if not parse_opts.get('stream_file_content', False): super()._update_api_version(api_version, header, **parse_opts) #",
"'HTML', 'Text') class GetAttachment(EWSAccountService): \"\"\"MSDN: https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/getattachment-operation\"\"\" SERVICE_NAME = 'GetAttachment' element_container_name = '{%s}Attachments' %",
"StreamingContentHandler(parser=parser, ns=field.namespace, element_name=field.field_uri) parser.setContentHandler(handler) return parser.parse(r) def stream_file_content(self, attachment_id): # The streaming XML",
"element_name=field.field_uri) parser.setContentHandler(handler) return parser.parse(r) def stream_file_content(self, attachment_id): # The streaming XML parser can",
"exception handling. # Wrap in DummyResponse because _get_soap_parts() expects an iter_content() method. response",
"None: add_xml_child(shape_elem, 't:FilterHtmlContent', 'true' if filter_html_content else 'false') if additional_fields: additional_properties = create_element('t:AdditionalProperties')",
"must be one of %s\" % (body_type, BODY_TYPE_CHOICES)) return self._elems_to_objs(self._chunked_get_elements( self.get_payload, items=items, include_mime_content=include_mime_content,",
"f: (getattr(f.field, 'field_uri', ''), f.path) ), version=self.account.version) shape_elem.append(additional_properties) if len(shape_elem): payload.append(shape_elem) attachment_ids =",
"additional_fields: additional_properties = create_element('t:AdditionalProperties') expanded_fields = chain(*(f.expand(version=self.account.version) for f in additional_fields)) set_xml_value(additional_properties, sorted(",
"element, ElementNotFound is thrown by parser.parse(). # Let the non-streaming SOAP parser parse",
"else 'false') if additional_fields: additional_properties = create_element('t:AdditionalProperties') expanded_fields = chain(*(f.expand(version=self.account.version) for f in",
"# https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/bodytype BODY_TYPE_CHOICES = ('Best', 'HTML', 'Text') class GetAttachment(EWSAccountService): \"\"\"MSDN: https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/getattachment-operation\"\"\" SERVICE_NAME =",
"f in additional_fields)) set_xml_value(additional_properties, sorted( expanded_fields, key=lambda f: (getattr(f.field, 'field_uri', ''), f.path) ),",
"if filter_html_content else 'false') if additional_fields: additional_properties = create_element('t:AdditionalProperties') expanded_fields = chain(*(f.expand(version=self.account.version) for",
"StreamingBase64Parser cannot parse the SOAP header @classmethod def _get_soap_parts(cls, response, **parse_opts): if not",
"body = super()._get_soap_parts(response=response) res = super()._get_soap_messages(body=body) for e in self._get_elements_in_response(response=res): if isinstance(e, Exception):",
"not in BODY_TYPE_CHOICES: raise ValueError(\"'body_type' %s must be one of %s\" % (body_type,",
"= super()._get_soap_parts(response=response) res = super()._get_soap_messages(body=body) for e in self._get_elements_in_response(response=res): if isinstance(e, Exception): raise",
"continue yield cls_map[elem.tag].from_xml(elem=elem, account=self.account) def get_payload(self, items, include_mime_content, body_type, filter_html_content, additional_fields): payload =",
"items, include_mime_content, body_type, filter_html_content, additional_fields): if body_type and body_type not in BODY_TYPE_CHOICES: raise",
"SOAP parser parse the response and hook into the normal exception handling. #",
"any EWS exceptions. Give up and re-raise the original exception. raise enf finally:",
"# When the returned XML does not contain a Content element, ElementNotFound is",
"response passed on by '_get_soap_parts' r = body parser = StreamingBase64Parser() field =",
"for f in additional_fields)) set_xml_value(additional_properties, sorted( expanded_fields, key=lambda f: (getattr(f.field, 'field_uri', ''), f.path)",
"of one attachment payload = self.get_payload( items=[attachment_id], include_mime_content=False, body_type=None, filter_html_content=None, additional_fields=None, ) self.streaming",
"Give up and re-raise the original exception. raise enf finally: self.streaming = False",
"return super()._get_soap_parts(response, **parse_opts) # Pass the response unaltered. We want to use our",
"% MNS def call(self, items, include_mime_content, body_type, filter_html_content, additional_fields): if body_type and body_type",
"contain any EWS exceptions. Give up and re-raise the original exception. raise enf",
"def stream_file_content(self, attachment_id): # The streaming XML parser can only stream content of",
"returned XML does not contain a Content element, ElementNotFound is thrown by parser.parse().",
"of %s\" % (body_type, BODY_TYPE_CHOICES)) return self._elems_to_objs(self._chunked_get_elements( self.get_payload, items=items, include_mime_content=include_mime_content, body_type=body_type, filter_html_content=filter_html_content, additional_fields=additional_fields,",
"= StreamingContentHandler(parser=parser, ns=field.namespace, element_name=field.field_uri) parser.setContentHandler(handler) return parser.parse(r) def stream_file_content(self, attachment_id): # The streaming",
"body_type=None, filter_html_content=None, additional_fields=None, ) self.streaming = True try: yield from self._get_response_xml(payload=payload, stream_file_content=True) except",
"try: yield from self._get_response_xml(payload=payload, stream_file_content=True) except ElementNotFound as enf: # When the returned",
"not contain any EWS exceptions. Give up and re-raise the original exception. raise",
"up and re-raise the original exception. raise enf finally: self.streaming = False self.stop_streaming()",
"_get_soap_parts() expects an iter_content() method. response = DummyResponse(url=None, headers=None, request_headers=None, content=enf.data) _, body",
"account=self.account) def get_payload(self, items, include_mime_content, body_type, filter_html_content, additional_fields): payload = create_element('m:%s' % self.SERVICE_NAME)",
"body_type, filter_html_content, additional_fields): if body_type and body_type not in BODY_TYPE_CHOICES: raise ValueError(\"'body_type' %s",
"**parse_opts) # Pass the response unaltered. We want to use our custom streaming",
"ElementNotFound is thrown by parser.parse(). # Let the non-streaming SOAP parser parse the",
"super()._get_soap_parts(response, **parse_opts) # Pass the response unaltered. We want to use our custom",
"hook into the normal exception handling. # Wrap in DummyResponse because _get_soap_parts() expects",
"except ElementNotFound as enf: # When the returned XML does not contain a",
"create_attachment_ids_element(items=items, version=self.account.version) payload.append(attachment_ids) return payload def _update_api_version(self, api_version, header, **parse_opts): if not parse_opts.get('stream_file_content',",
"We want to use our custom streaming parser return None, response def _get_soap_messages(self,",
"parser.parse(r) def stream_file_content(self, attachment_id): # The streaming XML parser can only stream content",
"enf: # When the returned XML does not contain a Content element, ElementNotFound",
"as enf: # When the returned XML does not contain a Content element,",
"def _get_soap_messages(self, body, **parse_opts): if not parse_opts.get('stream_file_content', False): return super()._get_soap_messages(body, **parse_opts) from ..attachments",
"additional_fields=additional_fields, )) def _elems_to_objs(self, elems): from ..attachments import FileAttachment, ItemAttachment cls_map = {cls.response_tag():",
"exceptions. Give up and re-raise the original exception. raise enf finally: self.streaming =",
"include_mime_content, body_type, filter_html_content, additional_fields): payload = create_element('m:%s' % self.SERVICE_NAME) shape_elem = create_element('m:AttachmentShape') if",
"ItemAttachment cls_map = {cls.response_tag(): cls for cls in (FileAttachment, ItemAttachment)} for elem in",
"self._get_elements_in_response(response=res): if isinstance(e, Exception): raise e # The returned content did not contain",
"include_mime_content: add_xml_child(shape_elem, 't:IncludeMimeContent', 'true') if body_type: add_xml_child(shape_elem, 't:BodyType', body_type) if filter_html_content is not",
"attachment payload = self.get_payload( items=[attachment_id], include_mime_content=False, body_type=None, filter_html_content=None, additional_fields=None, ) self.streaming = True",
"return None, response def _get_soap_messages(self, body, **parse_opts): if not parse_opts.get('stream_file_content', False): return super()._get_soap_messages(body,",
"SOAP header @classmethod def _get_soap_parts(cls, response, **parse_opts): if not parse_opts.get('stream_file_content', False): return super()._get_soap_parts(response,",
"be one of %s\" % (body_type, BODY_TYPE_CHOICES)) return self._elems_to_objs(self._chunked_get_elements( self.get_payload, items=items, include_mime_content=include_mime_content, body_type=body_type,",
"**parse_opts) # TODO: We're skipping this part in streaming mode because StreamingBase64Parser cannot",
"StreamingBase64Parser() field = FileAttachment.get_field_by_fieldname('_content') handler = StreamingContentHandler(parser=parser, ns=field.namespace, element_name=field.field_uri) parser.setContentHandler(handler) return parser.parse(r) def",
"= chain(*(f.expand(version=self.account.version) for f in additional_fields)) set_xml_value(additional_properties, sorted( expanded_fields, key=lambda f: (getattr(f.field, 'field_uri',",
"want to use our custom streaming parser return None, response def _get_soap_messages(self, body,",
"did not contain any EWS exceptions. Give up and re-raise the original exception.",
"elem continue yield cls_map[elem.tag].from_xml(elem=elem, account=self.account) def get_payload(self, items, include_mime_content, body_type, filter_html_content, additional_fields): payload",
"add_xml_child(shape_elem, 't:FilterHtmlContent', 'true' if filter_html_content else 'false') if additional_fields: additional_properties = create_element('t:AdditionalProperties') expanded_fields",
"FileAttachment, ItemAttachment cls_map = {cls.response_tag(): cls for cls in (FileAttachment, ItemAttachment)} for elem",
"is not None: add_xml_child(shape_elem, 't:FilterHtmlContent', 'true' if filter_html_content else 'false') if additional_fields: additional_properties",
"mode because StreamingBase64Parser cannot parse the SOAP header @classmethod def _get_soap_parts(cls, response, **parse_opts):",
"if not parse_opts.get('stream_file_content', False): return super()._get_soap_parts(response, **parse_opts) # Pass the response unaltered. We",
"expanded_fields, key=lambda f: (getattr(f.field, 'field_uri', ''), f.path) ), version=self.account.version) shape_elem.append(additional_properties) if len(shape_elem): payload.append(shape_elem)",
"parser parse the response and hook into the normal exception handling. # Wrap",
"ns=field.namespace, element_name=field.field_uri) parser.setContentHandler(handler) return parser.parse(r) def stream_file_content(self, attachment_id): # The streaming XML parser",
"self.get_payload( items=[attachment_id], include_mime_content=False, body_type=None, filter_html_content=None, additional_fields=None, ) self.streaming = True try: yield from",
"= ('Best', 'HTML', 'Text') class GetAttachment(EWSAccountService): \"\"\"MSDN: https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/getattachment-operation\"\"\" SERVICE_NAME = 'GetAttachment' element_container_name =",
"return self._elems_to_objs(self._chunked_get_elements( self.get_payload, items=items, include_mime_content=include_mime_content, body_type=body_type, filter_html_content=filter_html_content, additional_fields=additional_fields, )) def _elems_to_objs(self, elems): from",
"add_xml_child(shape_elem, 't:BodyType', body_type) if filter_html_content is not None: add_xml_child(shape_elem, 't:FilterHtmlContent', 'true' if filter_html_content",
"include_mime_content=include_mime_content, body_type=body_type, filter_html_content=filter_html_content, additional_fields=additional_fields, )) def _elems_to_objs(self, elems): from ..attachments import FileAttachment, ItemAttachment",
"if isinstance(elem, Exception): yield elem continue yield cls_map[elem.tag].from_xml(elem=elem, account=self.account) def get_payload(self, items, include_mime_content,",
"set_xml_value(additional_properties, sorted( expanded_fields, key=lambda f: (getattr(f.field, 'field_uri', ''), f.path) ), version=self.account.version) shape_elem.append(additional_properties) if",
"= {cls.response_tag(): cls for cls in (FileAttachment, ItemAttachment)} for elem in elems: if",
"items, include_mime_content, body_type, filter_html_content, additional_fields): payload = create_element('m:%s' % self.SERVICE_NAME) shape_elem = create_element('m:AttachmentShape')",
"raise e # The returned content did not contain any EWS exceptions. Give",
"payload.append(attachment_ids) return payload def _update_api_version(self, api_version, header, **parse_opts): if not parse_opts.get('stream_file_content', False): super()._update_api_version(api_version,",
"the non-streaming SOAP parser parse the response and hook into the normal exception",
"%s must be one of %s\" % (body_type, BODY_TYPE_CHOICES)) return self._elems_to_objs(self._chunked_get_elements( self.get_payload, items=items,",
"res = super()._get_soap_messages(body=body) for e in self._get_elements_in_response(response=res): if isinstance(e, Exception): raise e #",
"only stream content of one attachment payload = self.get_payload( items=[attachment_id], include_mime_content=False, body_type=None, filter_html_content=None,",
"stream_file_content(self, attachment_id): # The streaming XML parser can only stream content of one",
"= self.get_payload( items=[attachment_id], include_mime_content=False, body_type=None, filter_html_content=None, additional_fields=None, ) self.streaming = True try: yield",
"into the normal exception handling. # Wrap in DummyResponse because _get_soap_parts() expects an",
"attachment_id): # The streaming XML parser can only stream content of one attachment",
"returned content did not contain any EWS exceptions. Give up and re-raise the",
"part in streaming mode because StreamingBase64Parser cannot parse the SOAP header @classmethod def",
"DummyResponse because _get_soap_parts() expects an iter_content() method. response = DummyResponse(url=None, headers=None, request_headers=None, content=enf.data)",
"False): return super()._get_soap_messages(body, **parse_opts) from ..attachments import FileAttachment # 'body' is actually the",
"version=self.account.version) shape_elem.append(additional_properties) if len(shape_elem): payload.append(shape_elem) attachment_ids = create_attachment_ids_element(items=items, version=self.account.version) payload.append(attachment_ids) return payload def",
"itertools import chain from .common import EWSAccountService, create_attachment_ids_element from ..util import create_element, add_xml_child,",
"cls_map = {cls.response_tag(): cls for cls in (FileAttachment, ItemAttachment)} for elem in elems:",
"response = DummyResponse(url=None, headers=None, request_headers=None, content=enf.data) _, body = super()._get_soap_parts(response=response) res = super()._get_soap_messages(body=body)",
"return parser.parse(r) def stream_file_content(self, attachment_id): # The streaming XML parser can only stream",
"filter_html_content=filter_html_content, additional_fields=additional_fields, )) def _elems_to_objs(self, elems): from ..attachments import FileAttachment, ItemAttachment cls_map =",
"in (FileAttachment, ItemAttachment)} for elem in elems: if isinstance(elem, Exception): yield elem continue",
"expanded_fields = chain(*(f.expand(version=self.account.version) for f in additional_fields)) set_xml_value(additional_properties, sorted( expanded_fields, key=lambda f: (getattr(f.field,",
"Exception): raise e # The returned content did not contain any EWS exceptions.",
"EWS exceptions. Give up and re-raise the original exception. raise enf finally: self.streaming",
"thrown by parser.parse(). # Let the non-streaming SOAP parser parse the response and",
"parse_opts.get('stream_file_content', False): return super()._get_soap_parts(response, **parse_opts) # Pass the response unaltered. We want to",
"ElementNotFound, MNS # https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/bodytype BODY_TYPE_CHOICES = ('Best', 'HTML', 'Text') class GetAttachment(EWSAccountService): \"\"\"MSDN: https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/getattachment-operation\"\"\"",
"include_mime_content, body_type, filter_html_content, additional_fields): if body_type and body_type not in BODY_TYPE_CHOICES: raise ValueError(\"'body_type'",
"to use our custom streaming parser return None, response def _get_soap_messages(self, body, **parse_opts):",
"element_container_name = '{%s}Attachments' % MNS def call(self, items, include_mime_content, body_type, filter_html_content, additional_fields): if",
"streaming parser return None, response def _get_soap_messages(self, body, **parse_opts): if not parse_opts.get('stream_file_content', False):",
"unaltered. We want to use our custom streaming parser return None, response def",
"import chain from .common import EWSAccountService, create_attachment_ids_element from ..util import create_element, add_xml_child, set_xml_value,",
"''), f.path) ), version=self.account.version) shape_elem.append(additional_properties) if len(shape_elem): payload.append(shape_elem) attachment_ids = create_attachment_ids_element(items=items, version=self.account.version) payload.append(attachment_ids)",
"headers=None, request_headers=None, content=enf.data) _, body = super()._get_soap_parts(response=response) res = super()._get_soap_messages(body=body) for e in",
"passed on by '_get_soap_parts' r = body parser = StreamingBase64Parser() field = FileAttachment.get_field_by_fieldname('_content')",
"XML parser can only stream content of one attachment payload = self.get_payload( items=[attachment_id],",
"if len(shape_elem): payload.append(shape_elem) attachment_ids = create_attachment_ids_element(items=items, version=self.account.version) payload.append(attachment_ids) return payload def _update_api_version(self, api_version,",
"parser.setContentHandler(handler) return parser.parse(r) def stream_file_content(self, attachment_id): # The streaming XML parser can only",
"MNS # https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/bodytype BODY_TYPE_CHOICES = ('Best', 'HTML', 'Text') class GetAttachment(EWSAccountService): \"\"\"MSDN: https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/getattachment-operation\"\"\" SERVICE_NAME",
"BODY_TYPE_CHOICES = ('Best', 'HTML', 'Text') class GetAttachment(EWSAccountService): \"\"\"MSDN: https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/getattachment-operation\"\"\" SERVICE_NAME = 'GetAttachment' element_container_name",
"parse_opts.get('stream_file_content', False): super()._update_api_version(api_version, header, **parse_opts) # TODO: We're skipping this part in streaming",
"..attachments import FileAttachment # 'body' is actually the raw response passed on by",
"_get_soap_messages(self, body, **parse_opts): if not parse_opts.get('stream_file_content', False): return super()._get_soap_messages(body, **parse_opts) from ..attachments import",
"_elems_to_objs(self, elems): from ..attachments import FileAttachment, ItemAttachment cls_map = {cls.response_tag(): cls for cls",
"yield from self._get_response_xml(payload=payload, stream_file_content=True) except ElementNotFound as enf: # When the returned XML",
"body_type, filter_html_content, additional_fields): payload = create_element('m:%s' % self.SERVICE_NAME) shape_elem = create_element('m:AttachmentShape') if include_mime_content:",
"attachment_ids = create_attachment_ids_element(items=items, version=self.account.version) payload.append(attachment_ids) return payload def _update_api_version(self, api_version, header, **parse_opts): if",
"import FileAttachment, ItemAttachment cls_map = {cls.response_tag(): cls for cls in (FileAttachment, ItemAttachment)} for",
"def call(self, items, include_mime_content, body_type, filter_html_content, additional_fields): if body_type and body_type not in",
"https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/bodytype BODY_TYPE_CHOICES = ('Best', 'HTML', 'Text') class GetAttachment(EWSAccountService): \"\"\"MSDN: https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/getattachment-operation\"\"\" SERVICE_NAME = 'GetAttachment'",
"create_element('m:%s' % self.SERVICE_NAME) shape_elem = create_element('m:AttachmentShape') if include_mime_content: add_xml_child(shape_elem, 't:IncludeMimeContent', 'true') if body_type:",
"the response and hook into the normal exception handling. # Wrap in DummyResponse",
"yield cls_map[elem.tag].from_xml(elem=elem, account=self.account) def get_payload(self, items, include_mime_content, body_type, filter_html_content, additional_fields): payload = create_element('m:%s'",
"stream_file_content=True) except ElementNotFound as enf: # When the returned XML does not contain",
"handler = StreamingContentHandler(parser=parser, ns=field.namespace, element_name=field.field_uri) parser.setContentHandler(handler) return parser.parse(r) def stream_file_content(self, attachment_id): # The",
"create_element('t:AdditionalProperties') expanded_fields = chain(*(f.expand(version=self.account.version) for f in additional_fields)) set_xml_value(additional_properties, sorted( expanded_fields, key=lambda f:",
"def _get_soap_parts(cls, response, **parse_opts): if not parse_opts.get('stream_file_content', False): return super()._get_soap_parts(response, **parse_opts) # Pass",
"handling. # Wrap in DummyResponse because _get_soap_parts() expects an iter_content() method. response =",
"is thrown by parser.parse(). # Let the non-streaming SOAP parser parse the response",
"method. response = DummyResponse(url=None, headers=None, request_headers=None, content=enf.data) _, body = super()._get_soap_parts(response=response) res =",
"additional_fields): if body_type and body_type not in BODY_TYPE_CHOICES: raise ValueError(\"'body_type' %s must be",
"additional_fields=None, ) self.streaming = True try: yield from self._get_response_xml(payload=payload, stream_file_content=True) except ElementNotFound as",
"cls in (FileAttachment, ItemAttachment)} for elem in elems: if isinstance(elem, Exception): yield elem",
"payload def _update_api_version(self, api_version, header, **parse_opts): if not parse_opts.get('stream_file_content', False): super()._update_api_version(api_version, header, **parse_opts)",
"from .common import EWSAccountService, create_attachment_ids_element from ..util import create_element, add_xml_child, set_xml_value, DummyResponse, StreamingBase64Parser,\\",
"# TODO: We're skipping this part in streaming mode because StreamingBase64Parser cannot parse",
"filter_html_content=None, additional_fields=None, ) self.streaming = True try: yield from self._get_response_xml(payload=payload, stream_file_content=True) except ElementNotFound",
"return payload def _update_api_version(self, api_version, header, **parse_opts): if not parse_opts.get('stream_file_content', False): super()._update_api_version(api_version, header,",
"header @classmethod def _get_soap_parts(cls, response, **parse_opts): if not parse_opts.get('stream_file_content', False): return super()._get_soap_parts(response, **parse_opts)",
"payload.append(shape_elem) attachment_ids = create_attachment_ids_element(items=items, version=self.account.version) payload.append(attachment_ids) return payload def _update_api_version(self, api_version, header, **parse_opts):",
"filter_html_content is not None: add_xml_child(shape_elem, 't:FilterHtmlContent', 'true' if filter_html_content else 'false') if additional_fields:",
"The returned content did not contain any EWS exceptions. Give up and re-raise",
"if not parse_opts.get('stream_file_content', False): super()._update_api_version(api_version, header, **parse_opts) # TODO: We're skipping this part",
"parser = StreamingBase64Parser() field = FileAttachment.get_field_by_fieldname('_content') handler = StreamingContentHandler(parser=parser, ns=field.namespace, element_name=field.field_uri) parser.setContentHandler(handler) return",
"'true') if body_type: add_xml_child(shape_elem, 't:BodyType', body_type) if filter_html_content is not None: add_xml_child(shape_elem, 't:FilterHtmlContent',",
"StreamingBase64Parser,\\ StreamingContentHandler, ElementNotFound, MNS # https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/bodytype BODY_TYPE_CHOICES = ('Best', 'HTML', 'Text') class GetAttachment(EWSAccountService):",
"elem in elems: if isinstance(elem, Exception): yield elem continue yield cls_map[elem.tag].from_xml(elem=elem, account=self.account) def",
"from self._get_response_xml(payload=payload, stream_file_content=True) except ElementNotFound as enf: # When the returned XML does",
"), version=self.account.version) shape_elem.append(additional_properties) if len(shape_elem): payload.append(shape_elem) attachment_ids = create_attachment_ids_element(items=items, version=self.account.version) payload.append(attachment_ids) return payload",
"'t:IncludeMimeContent', 'true') if body_type: add_xml_child(shape_elem, 't:BodyType', body_type) if filter_html_content is not None: add_xml_child(shape_elem,",
"# Pass the response unaltered. We want to use our custom streaming parser",
"body_type: add_xml_child(shape_elem, 't:BodyType', body_type) if filter_html_content is not None: add_xml_child(shape_elem, 't:FilterHtmlContent', 'true' if"
] |
[
"the sample rate of the outputs `time_coarse` and `time_evolution_coarse`. time_evolution_coarse : :class:`numpy.ndarray` of",
"compile the integrator to run on an AMD ROCm compatible GPU, in parallel.",
"dtype = np.complex128) elif device_index == 1: temporary = cuda.local.array((2, 2), dtype =",
"\"\"\" Use the :func:`numba.jit()` LLVM compiler to compile the integrator to run on",
": :obj:`int` A reference number, used when compiling the integrator, where higher level",
"= np.complex128) elif device_index == 2: time_evolution_fine_group = roc.shared.array((threads_per_block, dimension, dimension), dtype =",
"jit_host(template, max_registers): def jit_host(func): return nb.njit(template)(func) return jit_host self.jit_host = jit_host def jit_device(func):",
"= inner self.set_to = set_to self.set_to_one = set_to_one self.set_to_zero = set_to_zero self.matrix_multiply =",
"left[2, 2]*right[2, 0] result[0, 1] = left[0, 0]*right[0, 1] + left[0, 1]*right[1, 1]",
"- i s_Xs_Y)}{2} & \\\\frac{e^{i\\\\frac{2Q}{3}} (s_Y -i c_Y s_X)}{\\\\sqrt{2}} & \\\\frac{e^{-i\\\\left(-Z + \\\\frac{Q}{3}\\\\right)}(c_X",
"math.tau*time_step_integration*(w1*field_sample[0, 3] + w0*field_sample[1, 3]) append_exponentiation(field_sample[2, :], time_evolution_fine, time_evolution_coarse) get_field_integration = get_field_integration_magnus_cf4 append_exponentiation_integration",
"result[2, 2] = conj(operator[2, 2]) @jit_device def matrix_exponential_analytic(field_sample, result, trotter_cutoff): pass @jit_device def",
"# matrix_multiply(result, result, temporary) # matrix_multiply(temporary, temporary, result) else: @jit_device def norm2(z): return",
"to obtain :math:`\\\\exp(A)`. Parameters: * **field_sample** (:class:`numpy.ndarray` of :class:`numpy.float64`, (y_index, x_index)) - The",
"@jit_device def matrix_square_residual(operator, result): result[0, 0] = (2 + operator[0, 0])*operator[0, 0] +",
"1] = -(y + 1j*x)*s result[1, 1] = c + 1j*z*s else: result[0,",
"temporary = temporary_group[roc.get_local_id(1), :, :] # for power_index in range(hyper_cube_amount): # matrix_multiply(result, result,",
"def jit_device_template(func): return func return jit_device_template self.jit_device_template = jit_device_template elif value == \"cpu_single\":",
"be run. For example, it is used to sweep over dressing frequencies during",
"math.cos(math.tau*rotating_wave*time_step_output) + 1j*math.sin(math.tau*rotating_wave*time_step_output) time_evolution_coarse[time_index, 0, 0] /= rotating_wave_winding[0] time_evolution_coarse[time_index, 0, 1] /= rotating_wave_winding[0]",
":ref:`architecture` for some information. spin_calculator : :obj:`callable` Calculates the expected spin projection (Bloch",
"in parallel. For spin half: .. math:: \\\\begin{align*} \\\\langle F\\\\rangle(t) = \\\\begin{pmatrix} \\\\Re(\\\\psi_{+\\\\frac{1}{2}}(t)\\\\psi_{-\\\\frac{1}{2}}(t)^*)\\\\\\\\",
"See :math:`\\\\psi(t)` in :ref:`overview_of_simulation_method`. spin : :class:`numpy.ndarray` of :class:`numpy.float64` (time_index, spatial_index) The expected",
".. _Achieved Occupancy: https://docs.nvidia.com/gameworks/content/developertools/desktop/analysis/report/cudaexperiments/kernellevel/achievedoccupancy.htm Parameters ---------- get_field : :obj:`callable` A python function that",
"0 operator[2, 2] = 1 @jit_device def set_to_zero(operator): operator[0, 0] = 0 operator[1,",
"+ left[2, 2]*right[2, 1] result[0, 2] = left[0, 0]*right[0, 2] + left[0, 1]*right[1,",
"0] = (Ca/(eq*ez))*(Ca/(eq*ez)) - 1 result[1, 0] = sa*eq*ep/ez result[2, 0] = -((Sa*ep/eq)*(Sa*ep/eq))",
"(:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The matrix to copy to. set_to_one(operator) :",
"0] = math.tau*time_step_integration*(w1*field_sample[0, 0] + w0*field_sample[1, 0]) field_sample[2, 1] = math.tau*time_step_integration*(w1*field_sample[0, 1] +",
"result[0, 0] = (2 + operator[0, 0])*operator[0, 0] + operator[0, 1]*operator[1, 0] +",
"result) self.conj = conj self.complex_abs = complex_abs self.norm2 = norm2 self.inner = inner",
"Ca/ez - 1 result[1, 0] = Sa*ep result[0, 1] = Sa/ep result[1, 1]",
"ib)^* &= a - ib\\\\\\\\ a, b &\\\\in \\\\mathbb{R} \\\\end{align*} Parameters: * **z**",
"time evolution operator is found for. In units of s. This is an",
"= cuda.local.array((dimension, dimension), dtype = np.complex128) elif device_index == 2: time_evolution_old_group = roc.shared.array((threads_per_block,",
"== IntegrationMethod.HALF_STEP: sample_index_max = 3 sample_index_end = 4 elif integration_method == IntegrationMethod.MIDPOINT_SAMPLE: sample_index_max",
"\\\\exp\\\\left(\\\\frac{1}{c}B\\\\right)\\\\right)^c. **For spin half systems:** Assumes the exponent is an imaginary linear combination",
"s. * **simulation_index** (:obj:`int`) - a parameter that can be swept over when",
". import utilities from enum import Enum import numpy as np import numba",
"\"\"\" The target device that the integrator is being compiled for. .. _Supported",
"+ y J_y + z J_z), \\\\end{align*} with .. math:: \\\\begin{align*} J_x &=",
"use this device option, the user defined field function must be :func:`numba.cuda.jit()` compilable.",
"if hyper_cube_amount < 0: hyper_cube_amount = 0 precision = 4**hyper_cube_amount a = math.sqrt(field_sample[0]*field_sample[0]",
"== 2: # temporary_group = roc.shared.array((threads_per_block, 3, 3), dtype = np.complex128) # temporary",
"in :ref:`overview_of_simulation_method`. spin : :class:`numpy.ndarray` of :class:`numpy.float64` (time_index, spatial_index) The expected value for",
"= spin_quantum_number.utility_set if not exponentiation_method: if spin_quantum_number == SpinQuantumNumber.ONE: exponentiation_method = ExponentiationMethod.LIE_TROTTER elif",
"Parameters ---------- get_field : :obj:`callable` A python function that describes the field that",
"matrix into another. .. math:: (A)_{i, j} = (B)_{i, j} Parameters: * **operator**",
":obj:`SpinQuantumNumber.HALF` systems only. \"\"\" LIE_TROTTER = (\"lie_trotter\", 1) \"\"\" Approximation using the Lie",
"self.threads_per_block try: self.get_time_evolution_raw[blocks_per_grid, self.threads_per_block](sweep_parameter, time, time_end_points, time_step_integration, time_step_output, time_evolution_coarse) except: print(\"\\033[31mspinsim error: numba.cuda",
"result[2, 2] += 1 # @jit_device # def matrix_exponential_lie_trotter(field_sample, result, trotter_cutoff): # hyper_cube_amount",
"is set to :obj:`True` - no such approximations are made, and the output",
"field_sample[2, 2] = math.tau*time_step_integration*field_sample[0, 2]/2 if dimension > 2: field_sample[2, 3] = math.tau*time_step_integration*field_sample[0,",
"of the spin system over time. Parameters ---------- sweep_parameter : :obj:`float` The input",
"if dimension > 2: field_sample[2, 3] = math.tau*time_step_integration*field_sample[0, 3]/2 append_exponentiation(field_sample[2, :], time_evolution_fine, time_evolution_coarse)",
"2] = left[2, 0]*right[0, 2] + left[2, 1]*right[1, 2] + left[2, 2]*right[2, 2]",
"Assumes the exponent is an imaginary linear combination of a subspace of :math:`\\\\mathfrak{su}(3)`,",
"jit_device = device.jit_device device_index = device.index @jit_device def conj(z): return (z.real - 1j*z.imag)",
"matrix which the result of the exponentiation is to be written to. *",
"\"\"\" def __init__(self, value, index): super().__init__() self._value_ = value self.index = index if",
"= set_to_one self.set_to_zero = set_to_zero self.matrix_multiply = matrix_multiply self.adjoint = adjoint self.matrix_exponential_analytic =",
"1])*operator[1, 0] + operator[1, 2]*operator[2, 0] result[2, 0] = operator[2, 0]*operator[0, 0] +",
"self.jit_host = jit_host def jit_device(func): return nb.njit()(func) self.jit_device = jit_device def jit_device_template(template): def",
"time_evolution_coarse[time_index, 2, 0] *= rotating_wave_winding[0] time_evolution_coarse[time_index, 2, 1] *= rotating_wave_winding[0] time_evolution_coarse[time_index, 2, 2]",
"exponent is an imaginary linear combination of :math:`\\\\mathfrak{su}(2)`, being, .. math:: \\\\begin{align*} A",
"\\\\\\\\ i & 0 \\\\end{pmatrix},& J_z &= \\\\frac{1}{2}\\\\begin{pmatrix} 1 & 0 \\\\\\\\ 0",
"of :class:`numpy.float64` (start time (0) or end time (1)) The time values for",
"the time to sample the field at, in units of s. * **simulation_index**",
"get_field_jit(time_sample, sweep_parameter, field_sample[1, :]) @jit_device_template(\"(complex128[:, :], complex128[:, :], float64[:, :], float64, float64, complex128[:])\")",
"at, and the time that the experiment is to finish at. Measured in",
"float64, complex128[:])\") def append_exponentiation_integration_half_step(time_evolution_fine, time_evolution_coarse, field_sample, time_step_integration, rotating_wave, rotating_wave_winding): transform_frame(field_sample[0, :], rotating_wave, rotating_wave_winding[0])",
"np.complex128) # elif device_index == 2: # temporary_group = roc.shared.array((threads_per_block, 2, 2), dtype",
"dtype = np.complex128) elif device_index == 1: time_evolution_fine = cuda.local.array((dimension, dimension), dtype =",
"to :obj:`Device.CPU` otherwise. See :obj:`Device` for all options and more details. exponentiation_method :",
"features. \"\"\" ROC = (\"roc\", 2) \"\"\" Use the :func:`numba.roc.jit()` LLVM compiler to",
"J_y - iz J_z))^{2^\\\\tau}\\\\\\\\ &\\\\approx (\\\\exp(-i(2^{-\\\\tau} x) J_x) \\\\exp(-i(2^{-\\\\tau} y) J_y) \\\\exp(-i(2^{-\\\\tau} z)",
"jit_device(func): return nb.njit()(func) self.jit_device = jit_device def jit_device_template(template): def jit_device_template(func): return nb.njit(template)(func) return",
"spin one :obj:`SpinQuantumNumber.ONE` quantum system. device : :obj:`Device` The option to select which",
"\\\\begin{pmatrix} (c_Xc_Y - is_Xs_Y) e^{-iZ} & -(c_Xs_Y + is_Xc_Y) e^{iZ} \\\\\\\\ (c_Xs_Y -",
"= math.sqrt(3) class SpinQuantumNumber(Enum): \"\"\" Options for the spin quantum number of a",
"(:obj:`numpy.ndarray` of :obj:`numpy.float128` (time_index, y_index, x_index)) - The evaluated time evolution operator between",
"attr_name)) class Simulator: \"\"\" Attributes ---------- spin_quantum_number : :obj:`SpinQuantumNumber` The option to select",
"be written to. * **trotter_cutoff** (:obj:`int`) - The number of squares to make",
"in parallel time_index = cuda.grid(1) if time_index < time_coarse.size: get_time_evolution_loop(time_index, time_coarse, time_step_output, time_step_integration,",
"1] = 0 operator[0, 2] = 0 operator[1, 2] = 0 operator[2, 2]",
"* **result** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - An array to write the",
"level objects like enums cannot be interpreted. \"\"\" def __init__(self, value, index): super().__init__()",
"expected spin value in parallel. For spin half: .. math:: \\\\begin{align*} \\\\langle F\\\\rangle(t)",
"\\\\exp(-i(2^{-\\\\tau} y) J_y) \\\\exp(-i(2^{-\\\\tau} z) J_z)^{2^\\\\tau}\\\\\\\\ &= \\\\begin{pmatrix} (c_Xc_Y - is_Xs_Y) e^{-iZ} &",
"field_sample[3]/(6*precision) # eq = math.cos(eq) + 1j*math.sin(eq) result[0, 0] = Ca/ez - 1",
"each contain, when running on the GPU target devices :obj:`Device.CUDA` (:obj:`Device.ROC`). Defaults to",
"wavefunction) of the system at the start of the simulation. state : :class:`numpy.ndarray`",
"time_coarse, time_step_integration, field_sample, rotating_wave, rotating_wave_winding): time_sample = time_fine - time_coarse rotating_wave_winding[0] = math.cos(math.tau*rotating_wave*time_sample)",
"& 0 \\\\\\\\ 0 & -2 & 0 \\\\\\\\ 0 & 0 &",
"result, temporary) # matrix_multiply(temporary, temporary, result) result[0, 0] += 1 result[1, 1] +=",
"J_x - iy J_y - iz J_z)\\\\\\\\ &= \\\\exp(2^{-\\\\tau}(-ix J_x - iy J_y",
"output timeseries for the state. Must be a whole number multiple of `time_step_integration`.",
"pass @jit_device def matrix_exponential_lie_trotter(field_sample, result, trotter_cutoff): hyper_cube_amount = math.ceil(trotter_cutoff/2) if hyper_cube_amount < 0:",
"temporary) # matrix_multiply(temporary, temporary, result) self.conj = conj self.complex_abs = complex_abs self.norm2 =",
"internal function for evaluating the time evolution operator in parallel. Compiled for chosen",
"systems:** Assumes the exponent is an imaginary linear combination of a subspace of",
"c_Y + i s_Xs_Y)}{2} \\\\\\\\ \\\\frac{e^{-i\\\\left(Z + \\\\frac{Q}{3}\\\\right)} (-i s_X + c_X s_Y)}{\\\\sqrt{2}}",
"transform_frame_spin_one_rotating else: @jit_device_template(\"(float64[:], float64, complex128)\") def transform_frame_spin_half_rotating(field_sample, rotating_wave, rotating_wave_winding): X = (field_sample[0] +",
"jit_host self.jit_host = jit_host def jit_device(func): return roc.jit(device = True)(func) self.jit_device = jit_device",
"the spin projection operator in the z direction. Returns ------- results : :obj:`Results`",
"exponentiation_method_index = exponentiation_method.index if (exponentiation_method == ExponentiationMethod.ANALYTIC) and (spin_quantum_number != SpinQuantumNumber.HALF): print(\"\\033[31mspinsim warning!!!\\n_attempting",
"spin_quantum_number self.device = device self.get_time_evolution_raw = None self.get_spin_raw = None try: self.compile_time_evolver(get_field, spin_quantum_number,",
"- state[time_index, 2].real**2 - state[time_index, 2].imag**2 elif device_index > 0: if device_index ==",
"`state` was evaluated at. * **time_end_points** (:obj:`numpy.ndarray` of :obj:`numpy.float64` (start/end)) - The time",
"\\\\cdot r &\\\\equiv \\\\langle l, r \\\\rangle\\\\\\\\ l \\\\cdot r &= \\\\sum_i (l_i)^*",
"self.spin_calculator(self.state) setattr(self, attr_name, spin) return self.spin raise AttributeError(\"{} has no attribute called {}.\".format(self,",
"experiment is to finish at. Measured in s. * **time_step_integration** (:obj:`float`) - The",
"append_exponentiation(field_sample[0, :], time_evolution_fine, time_evolution_coarse) get_field_integration = get_field_integration_midpoint append_exponentiation_integration = append_exponentiation_integration_midpoint @jit_device_template(\"(int64, float64[:], float64,",
"jit_device(get_field) if integration_method == IntegrationMethod.MAGNUS_CF4: @jit_device_template(\"(float64, float64, float64, float64, float64[:, :], float64, complex128[:])\")",
"option to select which device will be targeted for integration. That is, whether",
"matrix_multiply(result, result, temporary) # matrix_multiply(temporary, temporary, result) else: @jit_device def norm2(z): return math.sqrt(z[0].real**2",
"rotating_wave_winding[0] else: time_evolution_coarse[time_index, 1, 0] *= rotating_wave_winding[0] time_evolution_coarse[time_index, 1, 1] *= rotating_wave_winding[0] @jit_host(\"(float64,",
"z[1].imag**2 + z[2].real**2 + z[2].imag**2) @jit_device def cross(left, right, result): result[0] = conj(left[1]*right[2]",
"math.cos(r/2) s = math.sin(r/2) result[0, 0] = c - 1j*z*s result[1, 0] =",
"= state[time_index, 0].real**2 + state[time_index, 0].imag**2 - state[time_index, 2].real**2 - state[time_index, 2].imag**2 return",
"The inner product of l and r. set_to(operator, result) : :obj:`callable` Copy the",
"time = np.empty(time_index_max, np.float64) time_evolution_coarse = np.empty((time_index_max, self.spin_quantum_number.dimension, self.spin_quantum_number.dimension), np.complex128) self.get_time_evolution_raw(sweep_parameter, time, time_end_points,",
"time series of a quantum state. This :obj:`callable` is passed to the :obj:`Results`",
"= 0.5*cisz*(cx + cy + 1j*sx*sy) # if device_index == 0: # temporary",
"jit_device_template(func): return func return jit_device_template self.jit_device_template = jit_device_template elif value == \"cpu_single\": def",
"= cuda.device_array((state.shape[0], 3), np.float64) blocks_per_grid = (state.shape[0] + (threads_per_block - 1)) // threads_per_block",
"Defaults to :obj:`IntegrationMethod.MAGNUS_CF4`. See :obj:`IntegrationMethod` for more details. trotter_cutoff : :obj:`int` The number",
"set to :obj:`True`, the integrator moves into a frame rotating in the z",
"time_step_integration, time_step_output, time_evolution_coarse): \"\"\" Find the stepwise time evolution opperator. Parameters ---------- sweep_parameter",
"returned value of the field. This is a four dimensional vector, with the",
"the eigenstates of the spin projection operator in the z direction. spin_calculator :",
"\"roc\": def jit_host(template, max_registers): def jit_host(func): return roc.jit(template)(func) return jit_host self.jit_host = jit_host",
"2] = operator[0, 2] result[1, 2] = operator[1, 2] result[2, 2] = operator[2,",
"just in time using the JITed :obj:`callable` `spin_calculator`. spin_calculator : :obj:`callable` Calculates the",
"= ((time_fine + 0.5*time_step_integration*(1 - 1/sqrt3)) - time_coarse) rotating_wave_winding[0] = math.cos(math.tau*rotating_wave*time_sample) + 1j*math.sin(math.tau*rotating_wave*time_sample)",
"# cisz = math.cos(z) + 1j*math.sin(z) # result[0, 0] = (cx*cy - 1j*sx*sy)/cisz",
"units of s. This is an output, so use an empty :class:`numpy.ndarray` with",
"= \"half_step\" \"\"\" Integration method from AtomicPy. Makes two Euler integration steps, one",
"matrix_square_residual(temporary, result) # matrix_multiply(result, result, temporary) # matrix_multiply(temporary, temporary, result) result[0, 0] +=",
"inner = utilities.inner set_to = utilities.set_to set_to_one = utilities.set_to_one set_to_zero = utilities.set_to_zero matrix_multiply",
"spin quantum number. dimension : :obj:`int` Dimension of the hilbert space the states",
"The initial quantum state of the spin system, written in terms of the",
"else: result[0, 0] = 1 result[1, 0] = 0 result[0, 1] = 0",
"for each time sampled. Units of :math:`\\\\hbar`. This is an output, so use",
"the :obj:`Results` object returned from :func:`Simulator.evaluate()`, and is executed there just in time",
"\\\\begin{align*} \\\\langle F\\\\rangle(t) = \\\\begin{pmatrix} \\\\Re(\\\\psi_{+\\\\frac{1}{2}}(t)\\\\psi_{-\\\\frac{1}{2}}(t)^*)\\\\\\\\ -\\\\Im(\\\\psi_{+\\\\frac{1}{2}}(t)\\\\psi_{-\\\\frac{1}{2}}(t)^*)\\\\\\\\ \\\\frac{1}{2}(|\\\\psi_{+\\\\frac{1}{2}}(t)|^2 - |\\\\psi_{-\\\\frac{1}{2}}(t)|^2) \\\\end{pmatrix} \\\\end{align*} For",
":class:`numpy.complex128`, (y_index, x_index)) - The matrix which the result of the exponentiation is",
"# cx = math.cos(x) # sx = math.sin(x) # cy = math.cos(y) #",
"self.set_to = set_to self.set_to_one = set_to_one self.set_to_zero = set_to_zero self.matrix_multiply = matrix_multiply self.adjoint",
"to use the rotating frame optimisation. Defaults to :obj:`True`. If set to :obj:`True`,",
"np import numba as nb from numba import cuda from numba import roc",
"+ left[1, 1]*right[1, 1] @jit_device def matrix_square_residual(operator, result): result[0, 0] = (2 +",
"---------- sweep_parameter : :obj:`float` The input to the `get_field` function supplied by the",
"c_{\\\\theta} &= \\\\cos(\\\\theta),\\\\\\\\ s_{\\\\theta} &= \\\\sin(\\\\theta). \\\\end{align*} **For spin one systems** Assumes the",
"number. .. math:: \\\\begin{align*} (a + ib)^* &= a - ib\\\\\\\\ a, b",
"dot) product between two complex vectors. .. note:: The mathematics definition is used",
"frequencies during the simulations that `spinsim` was designed for. * **field_sample** (:class:`numpy.ndarray` of",
"when using :obj:`Device.CUDA` as the target device, and can be modified to increase",
"get_field_integration = get_field_integration_magnus_cf4 append_exponentiation_integration = append_exponentiation_integration_magnus_cf4 elif integration_method == IntegrationMethod.HALF_STEP: @jit_device_template(\"(float64, float64, float64,",
"1: time_evolution_old = cuda.local.array((dimension, dimension), dtype = np.complex128) elif device_index == 2: time_evolution_old_group",
"J_x + y J_y + z J_z + q J_q), \\\\end{align*} with ..",
"Compiled for chosen device on object constrution. Parameters: * **state** (:obj:`numpy.ndarray` of :obj:`numpy.complex128`",
"state[time_index, 2].imag**2 return def spin_calculator(state): \"\"\" Calculates the expected spin projection (Bloch vector)",
"- 1 if device_index == 0: temporary = np.empty((2, 2), dtype = np.complex128)",
"self.jit_host = jit_host def jit_device(func): return cuda.jit(device = True, inline = True)(func) self.jit_device",
"norm2(z) : :obj:`callable` The 2 norm of a complex vector. .. math:: \\|a",
"= np.empty(sample_index_end, dtype = np.complex128) elif device_index == 1: time_evolution_fine = cuda.local.array((dimension, dimension),",
"values for when the experiment is to start and finishes. In units of",
"number of threads (workitems) they each contain, when running on the GPU target",
"take the conjugate of. Returns * **cz** (:class:`numpy.complex128`) - The conjugate of z.",
"result[0, 0] = 0.5*cisz*(cx + cy - 1j*sx*sy) # result[1, 0] = cisz*(-1j*sx",
"to integrate states in the rotating frame, using the rating wave approximation: just",
"left[2]*right[1]) result[1] = conj(left[2]*right[0] - left[0]*right[2]) result[2] = conj(left[0]*right[1] - left[1]*right[0]) @jit_device def",
"0] = -((Sa*ep/eq)*(Sa*ep/eq)) result[0, 1] = sa*eq/(ez*ep) result[1, 1] = ca*(eq*eq*eq*eq) - 1",
"time. \"\"\" self.time = time self.time_evolution = time_evolution self.state = state self.spin_calculator =",
"hyper_cube_amount = math.ceil(trotter_cutoff/2) # if hyper_cube_amount < 0: # hyper_cube_amount = 0 #",
"math.tau*time_step_integration*(w0*field_sample[0, 0] + w1*field_sample[1, 0]) field_sample[2, 1] = math.tau*time_step_integration*(w0*field_sample[0, 1] + w1*field_sample[1, 1])",
"if use_rotating_frame: # Take out of rotating frame rotating_wave_winding[0] = math.cos(math.tau*rotating_wave*time_step_output) + 1j*math.sin(math.tau*rotating_wave*time_step_output)",
"was evaluated at. * **time_end_points** (:obj:`numpy.ndarray` of :obj:`numpy.float64` (start/end)) - The time offset",
"* **right** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The matrix to right multiply",
"time that the experiment is to finish at. Measured in s. * **time_step_integration**",
"**result** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The matrix which the result of",
":], float64[:, :], float64, float64, complex128[:])\") def append_exponentiation_integration_half_step(time_evolution_fine, time_evolution_coarse, field_sample, time_step_integration, rotating_wave, rotating_wave_winding):",
"GPU model. Defaults to 63 (optimal for GTX1070, the device used for testing.",
"example, it is used to sweep over dressing frequencies during the simulations that",
"simulation. \"\"\" if math.fabs(time_step_output/time_step_integration - round(time_step_output/time_step_integration)) > 1e-6: print(f\"\\033[33mspinsim warning: time_step_output not an",
"rotating_wave_winding[0] = math.cos(math.tau*rotating_wave*time_sample) + 1j*math.sin(math.tau*rotating_wave*time_sample) time_sample += time_coarse get_field_jit(time_sample, sweep_parameter, field_sample[0, :]) time_sample",
"nb.njit()(func) self.jit_device = jit_device def jit_device_template(template): def jit_device_template(func): return nb.njit(template)(func) return jit_device_template self.jit_device_template",
"2] = -((Sa*eq/ep)*(Sa*eq/ep)) result[1, 2] = sa*eq*ez/ep result[2, 2] = (Ca*ez/eq)*(Ca*ez/eq) - 1",
"0 \\\\\\\\ 0 & 0 & -1 \\\\end{pmatrix},& J_q &= \\\\frac{1}{3}\\\\begin{pmatrix} 1 &",
"- state[time_index, 1].imag**2) else: spin[time_index, 0] = (2*conj(state[time_index, 1])*(state[time_index, 0] + state[time_index, 2])/sqrt2).real",
"in the z direction. spin : :obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index, spatial_direction) The expected",
"0] = math.tau*time_step_integration*field_sample[0, 0]/2 field_sample[2, 1] = math.tau*time_step_integration*field_sample[0, 1]/2 field_sample[2, 2] = math.tau*time_step_integration*field_sample[0,",
"of :obj:`numpy.float64` (time_index)) - The times that `state` was evaluated at. * **time_end_points**",
"within the integration algorithm. In units of s. time_step_output : :obj:`float` The time",
"---------- state_init : :class:`numpy.ndarray` of :class:`numpy.complex128` The state (spin wavefunction) of the system",
"time step. The equivalent of the trapezoidal method. \"\"\" class ExponentiationMethod(Enum): \"\"\" The",
"round(time_step_output/time_step_integration)) > 1e-6: print(f\"\\033[33mspinsim warning: time_step_output not an integer multiple of time_step_integration. Resetting",
"integration_method : :obj:`IntegrationMethod` Which integration method to use in the integration. Defaults to",
"an AMD ROCm compatible GPU, in parallel. .. warning :: Work in progress,",
"sweeping through bias values, by calling this method multiple times, each time varying",
"time_evolution_fine = np.empty((dimension, dimension), dtype = np.complex128) field_sample = np.empty((sample_index_max, lie_dimension), dtype =",
"object containing the results of the simulation. \"\"\" if math.fabs(time_step_output/time_step_integration - round(time_step_output/time_step_integration)) >",
"max_registers) def get_spin(state, spin): \"\"\" Calculate each expected spin value in parallel. For",
"__init__(self, spin_quantum_number, device, threads_per_block): \"\"\" Parameters ---------- spin_quantum_number : :obj:`SpinQuantumNumber` The option to",
"of :class:`numpy.float64`, (y_index, x_index)) - The values of x, y and z respectively,",
"&= \\\\frac{1}{\\\\sqrt{2}}\\\\begin{pmatrix} 0 & -i & 0 \\\\\\\\ i & 0 & -i",
":obj:`str` A text label that can be used for archiving. \"\"\" def __init__(self,",
"1j*math.sin(eq) # Ca = 1 # Sa = a/2 # ca = 1",
"system. Parameters ---------- value : :obj:`float` The numerical value of the spin quantum",
"\\\\langle l, r \\\\rangle\\\\\\\\ l \\\\cdot r &= \\\\sum_i (l_i)^* r_i \\\\end{align*} Parameters:",
"2: field_sample[0, 3] = math.tau*time_step_integration*field_sample[0, 3] append_exponentiation(field_sample[0, :], time_evolution_fine, time_evolution_coarse) get_field_integration = get_field_integration_midpoint",
"time_evolution_coarse[time_index, 0, 0] /= rotating_wave_winding[0] time_evolution_coarse[time_index, 0, 1] /= rotating_wave_winding[0] if dimension >",
"can be swept over when multiple simulations need to be run. For example,",
"state timeseries of the 3 level atom. Parameters ---------- state_init : :class:`numpy.ndarray` of",
"- the time to sample the field at, in units of s. *",
"0] = conj(operator[0, 1]) result[2, 0] = conj(operator[0, 2]) result[0, 1] = conj(operator[1,",
"state[time_index, 2])/sqrt2).real spin[time_index, 2] = state[time_index, 0].real**2 + state[time_index, 0].imag**2 - state[time_index, 2].real**2",
"(\\\\exp(-i(2^{-\\\\tau} x) J_x) \\\\exp(-i(2^{-\\\\tau} y) J_y) \\\\exp(-i(2^{-\\\\tau} z) J_z)^{2^\\\\tau}\\\\\\\\ &= \\\\begin{pmatrix} (c_Xc_Y -",
"implementation to use for matrix exponentiation within the integrator. Parameters ---------- value :",
"`time_end - time_start`. time_step_integration : :obj:`float` The integration time step. Measured in s.",
"1] = (2j*conj(state[time_index, 1])*(state[time_index, 0] - state[time_index, 2])/sqrt2).real spin[time_index, 2] = state[time_index, 0].real**2",
"0: # hyper_cube_amount = 0 # precision = 4**hyper_cube_amount # x = field_sample[0]/(2*precision)",
"For example, if the `sweep_parameter` is used to define the bias field strength",
"operator in the z direction. Returns ------- spin : :obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index,",
"0] + left[1, 1]*right[1, 0] result[0, 1] = left[0, 0]*right[0, 1] + left[0,",
"y J_y + z J_z + q J_q), \\\\end{align*} with .. math:: \\\\begin{align*}",
"from numba import cuda from numba import roc import math sqrt2 = math.sqrt(2)",
"power_index in range(hyper_cube_amount): matrix_square_residual(result, temporary) matrix_square_residual(temporary, result) # matrix_multiply(result, result, temporary) # matrix_multiply(temporary,",
"one systems** Assumes the exponent is an imaginary linear combination of a subspace",
"the 2 norm of. Returns * **nz** (:class:`numpy.float64`) - The 2 norm of",
"1]) field_sample[2, 2] = math.tau*time_step_integration*(w0*field_sample[0, 2] + w1*field_sample[1, 2]) if dimension > 2:",
"+ left[1, 1]*right[1, 2] + left[1, 2]*right[2, 2] result[2, 2] = left[2, 0]*right[0,",
"(:obj:`int`) - The number of squares to make to the approximate matrix (:math:`\\\\tau`",
"time_start : :obj:`float` The time offset that the experiment is to start at.",
"= math.tau*time_step_integration*(w0*field_sample[0, 0] + w1*field_sample[1, 0]) field_sample[2, 1] = math.tau*time_step_integration*(w0*field_sample[0, 1] + w1*field_sample[1,",
"state (wavefunction) of the spin system in the lab frame, for each time",
"\\\\sin(\\\\theta). \\\\end{align*} Once :math:`T` is calculated, it is then recursively squared :math:`\\\\tau` times",
"GPU. See `Achieved Occupancy`_ for Nvidia's official explanation. \"\"\" if not device: if",
"The time offset that the experiment is to start at. Measured in s.",
"nb from numba import cuda from numba import roc import math sqrt2 =",
"between each time step. See :ref:`architecture` for some information. spin_calculator : :obj:`callable` Calculates",
"get_field_jit(time_sample, sweep_parameter, field_sample[0, :]) time_sample = time_fine + time_step_integration - time_coarse rotating_wave_winding[1] =",
"ib\\\\\\\\ a, b &\\\\in \\\\mathbb{R} \\\\end{align*} Parameters: * **z** (:class:`numpy.complex128`) - The complex",
"1].imag**2) else: spin[time_index, 0] = (2*conj(state[time_index, 1])*(state[time_index, 0] + state[time_index, 2])/sqrt2).real spin[time_index, 1]",
": :obj:`callable` The absolute value of a complex number. .. math:: \\\\begin{align*} |a",
".. note:: The mathematics definition is used here rather than the physics definition,",
"field that the spin system is being put under. It must have three",
"and can be modified to increase the execution speed for a specific GPU",
"time_step_integration, time_step_output, time_evolution_coarse) except: print(\"\\033[31mspinsim error: numba.cuda could not jit get_field function into",
"See :math:`U(t)` in :ref:`overview_of_simulation_method`. This is an output, so use an empty :class:`numpy.ndarray`",
"as .. math:: \\\\begin{align*} \\\\exp(A) &= \\\\exp(-ix J_x - iy J_y - iz",
"exponent is an imaginary linear combination of a subspace of :math:`\\\\mathfrak{su}(3)`, being, ..",
"allocated per thread when using :obj:`Device.CUDA` as the target device, and can be",
": :obj:`str` A text label that can be used for archiving. \"\"\" MAGNUS_CF4",
"can run many simulations, sweeping through bias values, by calling this method multiple",
"Thus the inner product of two orthogonal vectors is 0. .. math:: \\\\begin{align*}",
"operator between each time step. See :ref:`architecture` for some information. spin_calculator : :obj:`callable`",
"spin_quantum_number.utility_set if not exponentiation_method: if spin_quantum_number == SpinQuantumNumber.ONE: exponentiation_method = ExponentiationMethod.LIE_TROTTER elif spin_quantum_number",
"if cuda.is_available(): device = Device.CUDA else: device = Device.CPU self.threads_per_block = threads_per_block self.spin_quantum_number",
"1j*math.sin(ez) eq = field_sample[3]/(6*precision) eq = math.cos(eq) + 1j*math.sin(eq) # Ca = 1",
"the integrator to run on an AMD ROCm compatible GPU, in parallel. ..",
":func:`numba.cuda.device_array_like()`. time_end_points : :class:`numpy.ndarray` of :class:`numpy.float64` (start time (0) or end time (1))",
"result): x = field_sample[0] y = field_sample[1] z = field_sample[2] r = math.sqrt(x**2",
"operator[0, 1] = 0 operator[1, 1] = 1 @jit_device def set_to_zero(operator): operator[0, 0]",
"x_index)) - The matrix to set to :math:`1`. set_to_zero(operator) : :obj:`callable` Make a",
"temporary = np.empty((3, 3), dtype = np.complex128) # elif device_index == 1: #",
"2] = conj(operator[2, 0]) result[1, 2] = conj(operator[2, 1]) result[2, 2] = conj(operator[2,",
":obj:`IntegrationMethod.MAGNUS_CF4`. See :obj:`IntegrationMethod` for more details. trotter_cutoff : :obj:`int` The number of squares",
"(spin_quantum_number != SpinQuantumNumber.HALF): print(\"\\033[31mspinsim warning!!!\\n_attempting to use an analytic exponentiation method outside of",
"0 & 0 \\\\\\\\ 0 & 0 & -1 \\\\end{pmatrix},& J_q &= \\\\frac{1}{3}\\\\begin{pmatrix}",
"attr_name == \"spin\": spin = self.spin_calculator(self.state) setattr(self, attr_name, spin) return self.spin raise AttributeError(\"{}",
"= np.complex128) rotating_wave_winding = rotating_wave_winding_group[roc.get_local_id(1), :] time_coarse[time_index] = time_end_points[0] + time_step_output*time_index time_fine =",
"2^{-\\\\tau}y,\\\\\\\\ Z &= 2^{-\\\\tau}z,\\\\\\\\ Q &= 2^{-\\\\tau}q,\\\\\\\\ c_{\\\\theta} &= \\\\cos(\\\\theta),\\\\\\\\ s_{\\\\theta} &= \\\\sin(\\\\theta).",
"(workgroup), in terms of the number of threads (workitems) they each contain, when",
"is used during the integration. Parameters ---------- value : :obj:`str` A text label",
"systems. \"\"\" class IntegrationMethod(Enum): \"\"\" Options for describing which method is used during",
"spin_quantum_number : :obj:`SpinQuantumNumber` The option to select whether the simulator will integrate a",
"w1 = (1.5 - sqrt3)/6 field_sample[2, 0] = math.tau*time_step_integration*(w0*field_sample[0, 0] + w1*field_sample[1, 0])",
":], float64[:, :], float64, float64, complex128[:])\") def append_exponentiation_integration_midpoint(time_evolution_fine, time_evolution_coarse, field_sample, time_step_integration, rotating_wave, rotating_wave_winding):",
"model. This means that if more registers are allocated than are available for",
"rather than the physics definition, so the left vector is conjugated. Thus the",
"utilities.matrix_exponential_lie_trotter jit_host = device.jit_host jit_device = device.jit_device jit_device_template = device.jit_device_template device_index = device.index",
"= utilities.norm2 inner = utilities.inner set_to = utilities.set_to set_to_one = utilities.set_to_one set_to_zero =",
"performance for your GPU. See `Achieved Occupancy`_ for Nvidia's official explanation. \"\"\" if",
"compilable python features, and `Supported Numpy features`_ for compilable numpy features. \"\"\" CUDA",
": :obj:`callable` Make a matrix the multiplicative identity, ie, :math:`1`. .. math:: \\\\begin{align*}",
"1])*operator[1, 2] + operator[1, 2]*operator[2, 2] result[2, 2] = operator[2, 0]*operator[0, 2] +",
"more details. use_rotating_frame : :obj:`bool` Whether or not to use the rotating frame",
"3, \"one\") \"\"\" For three level systems. \"\"\" class IntegrationMethod(Enum): \"\"\" Options for",
"0 \\\\end{align*} Parameters: * **operator** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The matrix",
"2]*right[2, 0] result[2, 0] = left[2, 0]*right[0, 0] + left[2, 1]*right[1, 0] +",
"matrix to right multiply by. * **result** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) -",
"if dimension > 2: field_sample[0, 3] = math.tau*time_step_integration*field_sample[0, 3] append_exponentiation(field_sample[0, :], time_evolution_fine, time_evolution_coarse)",
"spatial_direction)) - The expected spin projection (Bloch vector) over time. \"\"\" self.time =",
":], time_evolution_fine, time_evolution_coarse) get_field_integration = get_field_integration_half_step append_exponentiation_integration = append_exponentiation_integration_half_step elif integration_method == IntegrationMethod.MIDPOINT_SAMPLE:",
"complex128)\") def transform_frame_spin_one_rotating(field_sample, rotating_wave, rotating_wave_winding): X = (field_sample[0] + 1j*field_sample[1])/rotating_wave_winding field_sample[0] = X.real",
"time_end_points = np.asarray([time_start, time_end], np.float64) state_init = np.asarray(state_init, np.complex128) time_index_max = int((time_end_points[1] -",
"= None, use_rotating_frame = True, integration_method = IntegrationMethod.MAGNUS_CF4, trotter_cutoff = 32, threads_per_block =",
"dependent Schroedinger equation and returns the quantum state of the spin system over",
"time for different GPU models. device : :obj:`Device` The option to select which",
"\"\"\" Use the :func:`numba.roc.jit()` LLVM compiler to compile the integrator to run on",
"Lie Trotter method.\\033[0m\") exponentiation_method = ExponentiationMethod.LIE_TROTTER exponentiation_method_index = 1 @jit_device_template(\"(float64[:], complex128[:, :], complex128[:,",
"\\\\cos(\\\\theta),\\\\\\\\ s_{\\\\theta} &= \\\\sin(\\\\theta). \\\\end{align*} **For spin one systems** Assumes the exponent is",
"a/precision Ca = math.cos(a/2) Sa = math.sin(a/2) ca = math.cos(a) sa = -1j*math.sin(a)/sqrt2",
"\\\\end{align*} Parameters: * **left** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The matrix to",
"+ i s_Xs_Y)}{2} \\\\end{pmatrix}^{2^\\\\tau}\\\\\\\\ &= T^{2^\\\\tau}, \\\\end{align*} with .. math:: \\\\begin{align*} X &=",
"= a/precision Ca = math.cos(a/2) Sa = math.sin(a/2) ca = math.cos(a) sa =",
"time_evolution_coarse) results = Results(time, time_evolution_coarse, state, self.spin_calculator) return results @staticmethod @nb.njit def get_state(state_init,",
"_Supported CUDA Python features: http://numba.pydata.org/numba-doc/latest/cuda/cudapysupported.html \"\"\" def __init__(self, value, index): super().__init__() self._value_ =",
"\"\"\" CPU_SINGLE = (\"cpu_single\", 0) \"\"\" Use the :func:`numba.jit()` LLVM compiler to compile",
".. _Supported Numpy features: http://numba.pydata.org/numba-doc/latest/reference/numpysupported.html .. _Supported CUDA Python features: http://numba.pydata.org/numba-doc/latest/cuda/cudapysupported.html \"\"\" def",
"frame. One can, of course, use :mod:`spinsim` to integrate states in the rotating",
"iz J_z))^{2^\\\\tau}\\\\\\\\ &\\\\approx (\\\\exp(-i(2^{-\\\\tau} x) J_x) \\\\exp(-i(2^{-\\\\tau} y) J_y) \\\\exp(-i(2^{-\\\\tau} z) J_z)^{2^\\\\tau}\\\\\\\\ &=",
"device.index dimension = spin_quantum_number.dimension lie_dimension = dimension + 1 # utility_set = spin_quantum_number.utility_set",
"= threads_per_block self.spin_quantum_number = spin_quantum_number self.device = device self.get_time_evolution_raw = None self.get_spin_raw =",
"= X.real field_sample[1] = X.imag field_sample[2] = field_sample[2] - 2*rotating_wave transform_frame = transform_frame_spin_half_rotating",
"& 0 & 1 \\\\\\\\ 0 & 1 & 0 \\\\end{pmatrix},& J_y &=",
"of the system at the start of the simulation. state : :class:`numpy.ndarray` of",
"time that the experiment is to finish at. Measured in s. The duration",
"the device used for testing. Note that one extra register per thread is",
"of the eigenstates of the spin projection operator in the z direction. spin_calculator",
"(:math:`\\\\tau` above). \"\"\" def __init__(self, spin_quantum_number, device, threads_per_block): \"\"\" Parameters ---------- spin_quantum_number :",
"2] + operator[1, 2]*operator[2, 2] result[2, 2] = operator[2, 0]*operator[0, 2] + operator[2,",
":obj:`float` The time that the experiment is to finish at. Measured in s.",
"is to start and finishes. In units of s. time_step_integration : :obj:`float` The",
"matrix_square_residual(result, temporary) matrix_square_residual(temporary, result) result[0, 0] += 1 result[1, 1] += 1 result[2,",
"= jit_host def jit_device(func): return func self.jit_device = jit_device def jit_device_template(template): def jit_device_template(func):",
"set to :obj:`SpinQuantumNumber.ONE`, and defaults to :obj:`ExponentiationMethod.ANALYTIC` when `spin_quantum_number` is set to :obj:`SpinQuantumNumber.HALF`.",
"2])/sqrt2).real spin[time_index, 2] = state[time_index, 0].real**2 + state[time_index, 0].imag**2 - state[time_index, 2].real**2 -",
"axis by an amount defined by the field in the z direction. This",
"1] = conj(operator[1, 2]) result[0, 2] = conj(operator[2, 0]) result[1, 2] = conj(operator[2,",
"smaller steps. .. note :: The use of a rotating frame is commonly",
"(2 + operator[2, 2])*operator[2, 2] @jit_device def adjoint(operator, result): result[0, 0] = conj(operator[0,",
"+ operator[0, 1]*operator[1, 0] + operator[0, 2]*operator[2, 0] result[1, 0] = operator[1, 0]*operator[0,",
"integrator, ie, don't compile the integrator. \"\"\" CPU_SINGLE = (\"cpu_single\", 0) \"\"\" Use",
"get_time_evolution(sweep_parameter, time_coarse, time_end_points, time_step_integration, time_step_output, time_evolution_coarse): \"\"\" Find the stepwise time evolution opperator.",
"index): super().__init__() self._value_ = value self.index = index if value == \"python\": def",
"result of the exponentiation is to be written to. matrix_exponential_lie_trotter(field_sample, result) : :obj:`callable`",
"ez = 1 + 1j*ez # eq = field_sample[3]/(6*precision) # eq = 1",
"math.tau*time_step_integration*field_sample[0, 0] field_sample[0, 1] = math.tau*time_step_integration*field_sample[0, 1] field_sample[0, 2] = math.tau*time_step_integration*field_sample[0, 2] if",
"device_index == 1: temporary = cuda.local.array((2, 2), dtype = np.complex128) elif device_index ==",
"return (z.real - 1j*z.imag) @jit_device def complex_abs(z): return math.sqrt(z.real**2 + z.imag**2) if spin_quantum_number",
"integration_method == IntegrationMethod.HALF_STEP: @jit_device_template(\"(float64, float64, float64, float64, float64[:, :], float64, complex128[:])\") def get_field_integration_half_step(sweep_parameter,",
"math:: \\\\begin{align*} \\\\exp(A) &= \\\\exp(-ix J_x - iy J_y - iz J_z -",
"information. \"\"\" for time_index in range(state.shape[0]): # State = time evolution * previous",
"z = field_sample[2]/precision # q = field_sample[3]/precision # cx = math.cos(x) # sx",
"np.complex128) elif device_index == 2: temporary_group = roc.shared.array((threads_per_block, 3, 3), dtype = np.complex128)",
"- 1j*math.sin(z + q/3) # result[0, 0] = 0.5*cisz*(cx + cy - 1j*sx*sy)",
"= IntegrationMethod.MAGNUS_CF4, exponentiation_method = None, trotter_cutoff:int = 28, threads_per_block = 64, max_registers =",
"+ w0*field_sample[1, 0]) field_sample[2, 1] = math.tau*time_step_integration*(w1*field_sample[0, 1] + w0*field_sample[1, 1]) field_sample[2, 2]",
"self.device == Device.CUDA: time = cuda.device_array(time_index_max, np.float64) time_evolution_coarse = cuda.device_array((time_index_max, self.spin_quantum_number.dimension, self.spin_quantum_number.dimension), np.complex128)",
"(time_index, y_index, x_index) The evaluated time evolution operator between each time step. See",
"state, time_evolution): \"\"\" Use the stepwise time evolution operators in succession to find",
"1])*operator[1, 1] + operator[1, 2]*operator[2, 1] result[2, 1] = operator[2, 0]*operator[0, 1] +",
"transform_frame = transform_frame_lab get_field_jit = jit_device(get_field) if integration_method == IntegrationMethod.MAGNUS_CF4: @jit_device_template(\"(float64, float64, float64,",
"F\\\\rangle(t) = \\\\begin{pmatrix} \\\\Re(\\\\psi_{+\\\\frac{1}{2}}(t)\\\\psi_{-\\\\frac{1}{2}}(t)^*)\\\\\\\\ -\\\\Im(\\\\psi_{+\\\\frac{1}{2}}(t)\\\\psi_{-\\\\frac{1}{2}}(t)^*)\\\\\\\\ \\\\frac{1}{2}(|\\\\psi_{+\\\\frac{1}{2}}(t)|^2 - |\\\\psi_{-\\\\frac{1}{2}}(t)|^2) \\\\end{pmatrix} \\\\end{align*} For spin one:",
"2: temporary_group = roc.shared.array((threads_per_block, 2, 2), dtype = np.complex128) temporary = temporary_group[roc.get_local_id(1), :,",
"time_evolution_coarse = np.empty((time_index_max, self.spin_quantum_number.dimension, self.spin_quantum_number.dimension), np.complex128) self.get_time_evolution_raw(sweep_parameter, time, time_end_points, time_step_integration, time_step_output, time_evolution_coarse) elif",
"temporary) # matrix_multiply(temporary, temporary, result) else: @jit_device def norm2(z): return math.sqrt(z[0].real**2 + z[0].imag**2",
"compiler to compile the integrator to run on a single CPU core. ..",
"== \"cuda\": def jit_host(template, max_registers): def jit_host(func): return cuda.jit(template, debug = False, max_registers",
"0 operator[1, 1] = 0 @jit_device def matrix_multiply(left, right, result): result[0, 0] =",
"math.tau*time_step_integration*field_sample[0, 2]/2 if dimension > 2: field_sample[2, 3] = math.tau*time_step_integration*field_sample[0, 3]/2 append_exponentiation(field_sample[2, :],",
"63 (optimal for GTX1070, the device used for testing. Note that one extra",
"elif integration_method == IntegrationMethod.MIDPOINT_SAMPLE: @jit_device_template(\"(float64, float64, float64, float64, float64[:, :], float64, complex128[:])\") def",
"time_evolution_old = cuda.local.array((dimension, dimension), dtype = np.complex128) elif device_index == 2: time_evolution_old_group =",
"return conj(left[0])*right[0] + conj(left[1])*right[1] + conj(left[2])*right[2] @jit_device def set_to(operator, result): result[0, 0] =",
"append_exponentiation_integration = append_exponentiation_integration_magnus_cf4 elif integration_method == IntegrationMethod.HALF_STEP: @jit_device_template(\"(float64, float64, float64, float64, float64[:, :],",
"def jit_device_template(func): return nb.njit(template)(func) return jit_device_template self.jit_device_template = jit_device_template elif value == \"cpu\":",
"\\\\frac{1}{2}(|\\\\psi_{+\\\\frac{1}{2}}(t)|^2 - |\\\\psi_{-\\\\frac{1}{2}}(t)|^2) \\\\end{pmatrix} \\\\end{align*} For spin one: .. math:: \\\\begin{align*} \\\\langle F\\\\rangle(t)",
"the current and next timesteps, for each time sampled. See :math:`U(t)` in :ref:`overview_of_simulation_method`.",
"field_sample[0, :]) time_sample = time_fine + time_step_integration - time_coarse rotating_wave_winding[1] = math.cos(math.tau*rotating_wave*time_sample) +",
"\\\\\\\\ 0 & 0 & 0 \\\\\\\\ 0 & 0 & -1 \\\\end{pmatrix},&",
"features. \"\"\" CPU = (\"cpu\", 0) \"\"\" Use the :func:`numba.jit()` LLVM compiler to",
"1j*ez # eq = field_sample[3]/(6*precision) # eq = 1 + 1j*eq result[0, 0]",
"0] result[0, 1] = (2 + operator[0, 0])*operator[0, 1] + operator[0, 1]*operator[1, 1]",
"offset that the experiment is to start at. Measured in s. time_end :",
"math.sqrt(2) sqrt3 = math.sqrt(3) machine_epsilon = np.finfo(np.float64).eps*1000 class Utilities: \"\"\" A on object",
"can be calculated as .. math:: \\\\begin{align*} \\\\exp(A) &= \\\\exp(-ix J_x - iy",
"= roc.device_array(time_index_max, np.float64) time_evolution_coarse = roc.device_array((time_index_max, self.spin_quantum_number.dimension, self.spin_quantum_number.dimension), np.complex128) blocks_per_grid = (time.size +",
"1] = conj(operator[1, 1]) result[2, 1] = conj(operator[1, 2]) result[0, 2] = conj(operator[2,",
"the field, which increases the accuracy of the output since the integrator will",
"removes the (possibly large) z component of the field, which increases the accuracy",
"def __init__(self, value, index): super().__init__() self._value_ = value self.index = index ANALYTIC =",
"(threads_per_block - 1)) // threads_per_block get_spin[blocks_per_grid, threads_per_block](cuda.to_device(state), spin) spin = spin.copy_to_host() elif device",
"= ExponentiationMethod.LIE_TROTTER exponentiation_method_index = 1 @jit_device_template(\"(float64[:], complex128[:, :], complex128[:, :])\") def append_exponentiation(field_sample, time_evolution_fine,",
"= roc.shared.array((threads_per_block, dimension, dimension), dtype = np.complex128) time_evolution_fine = time_evolution_fine_group[roc.get_local_id(1), :, :] field_sample_group",
"Only available for use with spin half systems. Will not work with spin",
"of :class:`numpy.complex128`, (y_index, x_index)) - The matrix to left multiply by. * **right**",
"return jit_host self.jit_host = jit_host def jit_device(func): return cuda.jit(device = True, inline =",
"(optimal for GTX1070, the device used for testing. Note that one extra register",
"result[0, 0] = c - 1j*z*s result[1, 0] = (y - 1j*x)*s result[0,",
"MIDPOINT_SAMPLE = \"midpoint_sample\" \"\"\" Euler integration method. \"\"\" HALF_STEP = \"half_step\" \"\"\" Integration",
"on the target device) used in the integrator. These device functions are compiled",
"in the inner product. * **right** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (index)) - The vector",
"eigenstates of the spin projection operator in the z direction. Returns ------- results",
":func:`numba.jit()` LLVM compiler to compile the integrator to run on all CPU cores,",
"&= -i(x J_x + y J_y + z J_z), \\\\end{align*} with .. math::",
"time : :obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index) The times that `state` was evaluated at.",
"+ 0.5*time_step_integration - time_coarse rotating_wave_winding[0] = math.cos(math.tau*rotating_wave*time_sample) + 1j*math.sin(math.tau*rotating_wave*time_sample) time_sample += time_coarse get_field_jit(time_sample,",
"device_index == 0: time_evolution_old = np.empty((dimension, dimension), dtype = np.complex128) elif device_index ==",
"result[2, 0] = operator[2, 0] result[0, 1] = operator[0, 1] result[1, 1] =",
"- iq J_q)\\\\\\\\ &= \\\\exp(2^{-\\\\tau}(-ix J_x - iy J_y - iz J_z -",
":, :] for power_index in range(hyper_cube_amount): matrix_square_residual(result, temporary) matrix_square_residual(temporary, result) result[0, 0] +=",
":math:`\\\\tau` times to obtain :math:`\\\\exp(A)`. Parameters: * **field_sample** (:class:`numpy.ndarray` of :class:`numpy.float64`, (y_index, x_index))",
"result[1, 2] = cisz*(-1j*sx - cx*sy)/sqrt2 # result[2, 2] = 0.5*cisz*(cx + cy",
"0: time_evolution_old = np.empty((dimension, dimension), dtype = np.complex128) elif device_index == 1: time_evolution_old",
"1: temporary = cuda.local.array((3, 3), dtype = np.complex128) elif device_index == 2: temporary_group",
"identity, ie, :math:`1`. .. math:: \\\\begin{align*} (A)_{i, j} &= \\\\delta_{i, j}\\\\\\\\ &= \\\\begin{cases}",
"each time sampled. Units of :math:`\\\\hbar`. This is an output, so use an",
"&= \\\\sqrt{a^2 + b^2}\\\\\\\\ a, b &\\\\in \\\\mathbb{R} \\\\end{align*} Parameters: * **z** (:class:`numpy.complex128`)",
"transform_frame_spin_half_rotating else: @jit_device_template(\"(float64[:], float64, complex128)\") def transform_frame_lab(field_sample, rotating_wave, rotating_wave_winding): return transform_frame = transform_frame_lab",
"of the simulation. state : :class:`numpy.ndarray` of :class:`numpy.complex128` (time_index, state_index) The state (wavefunction)",
"to 63 (optimal for GTX1070, the device used for testing. Note that one",
"def matrix_exponential_analytic(field_sample, result): x = field_sample[0] y = field_sample[1] z = field_sample[2] r",
"if the `sweep_parameter` is used to define the bias field strength in `get_field`,",
"* **time_step_integration** (:obj:`float`) - The integration time step. Measured in s. * **time_step_output**",
"spin[time_index, 2] = state[time_index, 0].real**2 + state[time_index, 0].imag**2 - state[time_index, 2].real**2 - state[time_index,",
"math:: \\\\begin{align*} X &= 2^{-\\\\tau}x,\\\\\\\\ Y &= 2^{-\\\\tau}y,\\\\\\\\ Z &= 2^{-\\\\tau}z,\\\\\\\\ Q &=",
"in s. * **time_step_output** (:obj:`float`) - The sample resolution of the output timeseries",
"== 0: for time_index in nb.prange(spin.shape[0]): if dimension == 2: spin[time_index, 0] =",
"x_index, z_index]*state[time_index - 1, z_index] else: state[time_index, x_index] += state_init[x_index] sqrt2 = math.sqrt(2)",
"inline = True)(func) return jit_device_template self.jit_device_template = jit_device_template elif value == \"roc\": def",
"for compilable python features. \"\"\" ROC = (\"roc\", 2) \"\"\" Use the :func:`numba.roc.jit()`",
"One can, of course, use :mod:`spinsim` to integrate states in the rotating frame,",
"= math.tau*time_step_integration*(w0*field_sample[0, 1] + w1*field_sample[1, 1]) field_sample[2, 2] = math.tau*time_step_integration*(w0*field_sample[0, 2] + w1*field_sample[1,",
"no such approximations are made, and the output state in given out of",
"rotating_wave_winding): return transform_frame = transform_frame_lab get_field_jit = jit_device(get_field) if integration_method == IntegrationMethod.MAGNUS_CF4: @jit_device_template(\"(float64,",
"\"\"\" Find the stepwise time evolution opperator. Parameters ---------- sweep_parameter : :obj:`float` time_coarse",
"def __getattr__(self, attr_name): if attr_name == \"spin\": spin = self.spin_calculator(self.state) setattr(self, attr_name, spin)",
"the rotating wave approximation in the rotating frame. integration_method : :obj:`IntegrationMethod` Which integration",
"= sa*eq*ep/ez result[2, 0] = -((Sa*ep/eq)*(Sa*ep/eq)) result[0, 1] = sa*eq/(ez*ep) result[1, 1] =",
"J_z - iq J_q))^{2^\\\\tau}\\\\\\\\ &\\\\approx (\\\\exp(-i(2^{-\\\\tau} x) J_x) \\\\exp(-i(2^{-\\\\tau} y) J_y) \\\\exp(-i(2^{-\\\\tau} z",
":obj:`float` The input to the `get_field` function supplied by the user. Modifies the",
"2), dtype = np.complex128) temporary = temporary_group[roc.get_local_id(1), :, :] for power_index in range(hyper_cube_amount):",
"0]*right[0, 2] + left[1, 1]*right[1, 2] + left[1, 2]*right[2, 2] result[2, 2] =",
"result): result[0, 0] = conj(operator[0, 0]) result[1, 0] = conj(operator[0, 1]) result[2, 0]",
"Defaults to :obj:`Device.CUDA` if the system it is being run on is Nvidia",
"time_step_integration, time_step_output, time_evolution_coarse) except: print(\"\\033[31mspinsim error: numba.roc could not jit get_field function into",
"+ 1j*math.sin(eq) result[0, 0] = Ca/ez - 1 result[1, 0] = Sa*ep result[0,",
"math.tau*time_step_integration*(w1*field_sample[0, 0] + w0*field_sample[1, 0]) field_sample[2, 1] = math.tau*time_step_integration*(w1*field_sample[0, 1] + w0*field_sample[1, 1])",
"the expected spin projection (Bloch vector) over time for a given time series",
"compilable. See `Supported Python features`_ for compilable python features, and `Supported Numpy features`_",
"number of a system. Parameters ---------- value : :obj:`float` The numerical value of",
"z. complex_abs(z) : :obj:`callable` The absolute value of a complex number. .. math::",
"&\\\\approx (\\\\exp(-i(2^{-\\\\tau} x) J_x) \\\\exp(-i(2^{-\\\\tau} y) J_y) \\\\exp(-i(2^{-\\\\tau} z) J_z)^{2^\\\\tau}\\\\\\\\ &= \\\\begin{pmatrix} (c_Xc_Y",
"threads_per_block): \"\"\" Parameters ---------- spin_quantum_number : :obj:`SpinQuantumNumber` The option to select whether the",
"device_index == 2: # temporary_group = roc.shared.array((threads_per_block, 2, 2), dtype = np.complex128) #",
"for hyperfine spin of the spin system in the lab frame, for each",
"compiled for. .. _Supported Python features: http://numba.pydata.org/numba-doc/latest/reference/pysupported.html .. _Supported Numpy features: http://numba.pydata.org/numba-doc/latest/reference/numpysupported.html ..",
"thread, meaning slower memory must be used. Thus, there will be an optimal",
"is not done when this option is set to :obj:`True` - no such",
"y = field_sample[1]/(2*precision) # z = field_sample[2]/(2*precision) # cx = math.cos(x) # sx",
"(2 + operator[0, 0])*operator[0, 0] + operator[0, 1]*operator[1, 0] + operator[0, 2]*operator[2, 0]",
"rotating wave approximation, a technique used to get approximate analytic solutions of spin",
"quantum system. device : :obj:`Device` The option to select which device will be",
"def set_to_zero(operator): operator[0, 0] = 0 operator[1, 0] = 0 operator[2, 0] =",
"be :func:`numba.jit()` compilable. See `Supported Python features`_ for compilable python features, and `Supported",
"0].imag**2 - state[time_index, 2].real**2 - state[time_index, 2].imag**2 return def spin_calculator(state): \"\"\" Calculates the",
"(2 + operator[1, 1])*operator[1, 0] result[0, 1] = (2 + operator[0, 0])*operator[0, 1]",
"state : :obj:`numpy.ndarray` of :obj:`numpy.complex128` (time_index, magnetic_quantum_number) The quantum state of the spin",
"0]) result[1, 0] = conj(operator[0, 1]) result[0, 1] = conj(operator[1, 0]) result[1, 1]",
"**field_sample** (:class:`numpy.ndarray` of :class:`numpy.float64` (spatial_index)) the returned value of the field. This is",
"2] = math.tau*time_step_integration*field_sample[0, 2]/2 if dimension > 2: field_sample[2, 3] = math.tau*time_step_integration*field_sample[0, 3]/2",
"product of two orthogonal vectors is 0. .. math:: \\\\begin{align*} l \\\\cdot r",
"at the start of the simulation. state : :class:`numpy.ndarray` of :class:`numpy.complex128` (time_index, state_index)",
"dimension), dtype = np.complex128) field_sample = cuda.local.array((sample_index_max, lie_dimension), dtype = np.float64) rotating_wave_winding =",
"of :obj:`numpy.complex128` (time_index, magnetic_quantum_number)) - The quantum state of the spin system over",
"time_evolution_fine = time_evolution_fine_group[roc.get_local_id(1), :, :] field_sample_group = roc.shared.array((threads_per_block, sample_index_max, lie_dimension), dtype = np.float64)",
"np.float64) time_evolution_coarse = cuda.device_array((time_index_max, self.spin_quantum_number.dimension, self.spin_quantum_number.dimension), np.complex128) blocks_per_grid = (time.size + (self.threads_per_block -",
"s_X + c_X s_Y)}{\\\\sqrt{2}} & e^{i\\\\frac{2Q}{3}} c_X c_Y & \\\\frac{e^{-i(Z - \\\\frac{Q}{3})} (-i",
"to :obj:`Device.CPU` otherwise. See :obj:`Device` for all options and more details. threads_per_block :",
"i\\\\frac{z}{r}\\\\sin(\\\\frac{r}{2}) \\\\end{pmatrix} \\\\end{align*} with :math:`r = \\\\sqrt{x^2 + y^2 + z^2}`. Parameters: *",
"time_coarse, time_step_output, time_step_integration, time_end_points, time_evolution_coarse, sweep_parameter) elif device_index == 1: # Run calculation",
"operator[0, 2] = 0 operator[1, 2] = 0 operator[2, 2] = 0 @jit_device",
"& -(c_Xs_Y + is_Xc_Y) e^{iZ} \\\\\\\\ (c_Xs_Y - is_Xc_Y) e^{-iZ} & (c_Xc_Y +",
"self.spin_quantum_number.dimension, self.spin_quantum_number.dimension), np.complex128) self.get_time_evolution_raw(sweep_parameter, time, time_end_points, time_step_integration, time_step_output, time_evolution_coarse) elif self.device == Device.CUDA:",
"Use pure python interpreted code for the integrator, ie, don't compile the integrator.",
"over when multiple simulations need to be run. For example, it is used",
":math:`\\\\mathfrak{su}(2)`, being, .. math:: \\\\begin{align*} A &= -i(x J_x + y J_y +",
"- The sample resolution of the output timeseries for the state. Must be",
"== 2: spin[time_index, 0] = (state[time_index, 0]*conj(state[time_index, 1])).real spin[time_index, 1] = (1j*state[time_index, 0]*conj(state[time_index,",
"- 1)) // self.threads_per_block try: self.get_time_evolution_raw[blocks_per_grid, self.threads_per_block](sweep_parameter, time, time_end_points, time_step_integration, time_step_output, time_evolution_coarse) except:",
"device that the integrator is being compiled for. .. _Supported Python features: http://numba.pydata.org/numba-doc/latest/reference/pysupported.html",
"function must be :func:`numba.cuda.jit()` compilable. See `Supported CUDA Python features`_ for compilable python",
"device) used in the integrator. These device functions are compiled for the chosen",
"field_sample, time_step_integration, rotating_wave, rotating_wave_winding): transform_frame(field_sample[0, :], rotating_wave, rotating_wave_winding[0]) field_sample[0, 0] = math.tau*time_step_integration*field_sample[0, 0]",
"step. See :ref:`architecture` for some information. state : :obj:`numpy.ndarray` of :obj:`numpy.complex128` (time_index, magnetic_quantum_number)",
"said to have less occupancy. Lowering the value increases GPU occupancy, meaning more",
"These device functions are compiled for the chosen target device on construction of",
"threads concurrently than it has Cuda cores, meaning some cores are inactive, and",
"2].real**2 - state[time_index, 2].imag**2 elif device_index > 0: if device_index == 1: time_index",
"= device.index dimension = spin_quantum_number.dimension lie_dimension = dimension + 1 # utility_set =",
"self.spin raise AttributeError(\"{} has no attribute called {}.\".format(self, attr_name)) class Simulator: \"\"\" Attributes",
"work with spin one systems. Assumes the exponent is an imaginary linear combination",
"1] = operator[1, 1] @jit_device def set_to_one(operator): operator[0, 0] = 1 operator[1, 0]",
"q/3) # result[0, 0] = 0.5*cisz*(cx + cy - 1j*sx*sy) # result[1, 0]",
"to :obj:`True`, the integrator moves into a frame rotating in the z axis",
"(:obj:`float`) - The input to the `get_field` function supplied by the user. Modifies",
"ExponentiationMethod.LIE_TROTTER exponentiation_method_index = 1 @jit_device_template(\"(float64[:], complex128[:, :], complex128[:, :])\") def append_exponentiation(field_sample, time_evolution_fine, time_evolution_coarse):",
"Parameters: * **left** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The matrix to left",
"the spin system in the lab frame, for each time sampled. See :math:`\\\\psi(t)`",
"-1j*math.sin(a)/sqrt2 ez = field_sample[2]/(2*precision) ez = math.cos(ez) + 1j*math.sin(ez) eq = field_sample[3]/(6*precision) eq",
"get_spin(state, spin) elif device == Device.CUDA: spin = cuda.device_array((state.shape[0], 3), np.float64) blocks_per_grid =",
"Magnus based integrator. \"\"\" MIDPOINT_SAMPLE = \"midpoint_sample\" \"\"\" Euler integration method. \"\"\" HALF_STEP",
"@jit_device_template(\"(complex128[:, :], complex128[:, :], float64[:, :], float64, float64, complex128[:])\") def append_exponentiation_integration_midpoint(time_evolution_fine, time_evolution_coarse, field_sample,",
"ez = math.cos(ez) + 1j*math.sin(ez) eq = field_sample[3]/(6*precision) eq = math.cos(eq) + 1j*math.sin(eq)",
"matrix the additive identity, ie, :math:`0`. .. math:: \\\\begin{align*} (A)_{i, j} = 0",
"norm of. Returns * **nz** (:class:`numpy.float64`) - The 2 norm of z. inner(left,",
"Numpy features: http://numba.pydata.org/numba-doc/latest/reference/numpysupported.html .. _Supported CUDA Python features: http://numba.pydata.org/numba-doc/latest/cuda/cudapysupported.html \"\"\" def __init__(self, value,",
"\\\\\\\\ (c_Xs_Y - is_Xc_Y) e^{-iZ} & (c_Xc_Y + is_Xs_Y) e^{iZ} \\\\end{pmatrix}^{2^\\\\tau}\\\\\\\\ &= T^{2^\\\\tau},",
"return math.sqrt(z[0].real**2 + z[0].imag**2 + z[1].real**2 + z[1].imag**2) @jit_device def inner(left, right): return",
":] # for power_index in range(hyper_cube_amount): # matrix_multiply(result, result, temporary) # matrix_multiply(temporary, temporary,",
"np.empty((dimension, dimension), dtype = np.complex128) elif device_index == 1: time_evolution_old = cuda.local.array((dimension, dimension),",
"0])*operator[0, 0] + operator[0, 1]*operator[1, 0] result[1, 0] = operator[1, 0]*operator[0, 0] +",
"= field_sample[2] r = math.sqrt(x**2 + y**2 + z**2) if r > 0:",
"np.complex128) elif device_index == 2: time_evolution_old_group = roc.shared.array((threads_per_block, dimension, dimension), dtype = np.complex128)",
"= (Ca*ez/eq)*(Ca*ez/eq) - 1 if device_index == 0: temporary = np.empty((3, 3), dtype",
"-\\\\frac{y + ix}{r}\\\\sin(\\\\frac{r}{2})\\\\\\\\ \\\\frac{y - ix}{r}\\\\sin(\\\\frac{r}{2}) & \\\\cos(\\\\frac{r}{2}) + i\\\\frac{z}{r}\\\\sin(\\\\frac{r}{2}) \\\\end{pmatrix} \\\\end{align*} with",
"1] = 0 @jit_device def matrix_multiply(left, right, result): result[0, 0] = left[0, 0]*right[0,",
"= 0.5*cisz*(cx - cy - 1j*sx*sy) # cisz = math.cos(2*q/3) + 1j*math.sin(2*q/3) #",
"time_index in nb.prange(spin.shape[0]): if dimension == 2: spin[time_index, 0] = (state[time_index, 0]*conj(state[time_index, 1])).real",
"2 norm of. Returns * **nz** (:class:`numpy.float64`) - The 2 norm of z.",
"time_sample = time_fine + 0.5*time_step_integration - time_coarse rotating_wave_winding[0] = math.cos(math.tau*rotating_wave*time_sample) + 1j*math.sin(math.tau*rotating_wave*time_sample) time_sample",
"result[0, 1] = (2 + operator[0, 0])*operator[0, 1] + operator[0, 1]*operator[1, 1] result[1,",
"((time_fine + 0.5*time_step_integration*(1 - 1/sqrt3)) - time_coarse) rotating_wave_winding[0] = math.cos(math.tau*rotating_wave*time_sample) + 1j*math.sin(math.tau*rotating_wave*time_sample) time_sample",
"spin_quantum_number, device, use_rotating_frame = True, integration_method = IntegrationMethod.MAGNUS_CF4, exponentiation_method = None, trotter_cutoff:int =",
"time_evolution, state, spin_calculator): \"\"\" Parameters ---------- time : :obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index) The",
"Premultiply to the exitsing time evolution operator set_to(time_evolution_coarse, time_evolution_old) matrix_multiply(time_evolution_fine, time_evolution_old, time_evolution_coarse) if",
"(Ca*ez/eq)*(Ca*ez/eq) - 1 if device_index == 0: temporary = np.empty((3, 3), dtype =",
"the integrator. These device functions are compiled for the chosen target device on",
"series of a quantum state. Used to calculate `spin` the first time it",
"= 0 if time_index > 0: for z_index in range(state.shape[1]): state[time_index, x_index] +=",
"\\\\end{align*} with :math:`r = \\\\sqrt{x^2 + y^2 + z^2}`. Parameters: * **field_sample** (:class:`numpy.ndarray`",
"np.empty((3, 3), dtype = np.complex128) # elif device_index == 1: # temporary =",
"left[0, 0]*right[0, 0] + left[0, 1]*right[1, 0] + left[0, 2]*right[2, 0] result[1, 0]",
"- The inner product of l and r. set_to(operator, result) : :obj:`callable` Copy",
":])\", max_registers = max_registers) def get_spin(state, spin): \"\"\" Calculate each expected spin value",
"device = Device.CUDA else: device = Device.CPU self.threads_per_block = threads_per_block self.spin_quantum_number = spin_quantum_number",
"being avaliable to each thread, meaning slower memory must be used. Thus, there",
"np.complex128) # elif device_index == 1: # temporary = cuda.local.array((2, 2), dtype =",
"append_exponentiation_integration = append_exponentiation_integration_midpoint @jit_device_template(\"(int64, float64[:], float64, float64, float64[:], complex128[:, :, :], float64)\") def",
"device function.\\033[0m\\n\") raise time_evolution_coarse = time_evolution_coarse.copy_to_host() time = time.copy_to_host() state = np.empty((time_index_max, self.spin_quantum_number.dimension),",
"For every fine step for time_fine_index in range(math.floor(time_step_output/time_step_integration + 0.5)): get_field_integration(sweep_parameter, time_fine, time_coarse[time_index],",
"\\\\frac{e^{-i\\\\left(Z + \\\\frac{Q}{3}\\\\right)}(c_X - c_Y - i s_Xs_Y)}{2} & \\\\frac{e^{i\\\\frac{2Q}{3}} (s_Y -i c_Y",
"for. See :class:`Device` for more information and links. spin_quantum_number : :obj:`SpinQuantumNumber` The option",
"\\\\\\\\ 0 & 0 & 1 \\\\end{pmatrix} \\\\end{align*} Then the exponential can be",
"# Ca = 1 # Sa = a/2 # ca = 1 #",
"power_index in range(hyper_cube_amount): # matrix_multiply(result, result, temporary) # matrix_multiply(temporary, temporary, result) else: @jit_device",
"integration_method == IntegrationMethod.MAGNUS_CF4: sample_index_max = 3 sample_index_end = 4 elif integration_method == IntegrationMethod.HALF_STEP:",
"LLVM compiler to compile the integrator to run on an AMD ROCm compatible",
"(y_index, x_index)) - The matrix to left multiply by. * **right** (:class:`numpy.ndarray` of",
"= time_evolution_coarse.copy_to_host() time = time.copy_to_host() elif self.device == Device.ROC: time = roc.device_array(time_index_max, np.float64)",
"defaults to :obj:`Device.CPU` otherwise. See :obj:`Device` for all options and more details. get_time_evolution_raw",
"field function so the integrator can be used for many experiments, without the",
"0] result[0, 1] = left[0, 0]*right[0, 1] + left[0, 1]*right[1, 1] + left[0,",
"\"\"\" Approximation using the Lie Trotter theorem. \"\"\" class Device(Enum): \"\"\" The target",
"0] + left[1, 2]*right[2, 0] result[2, 0] = left[2, 0]*right[0, 0] + left[2,",
"time, written in terms of the eigenstates of the spin projection operator in",
"of z. inner(left, right) : :obj:`callable` The inner (maths convention dot) product between",
"math:: \\\\begin{align*} l \\\\cdot r &\\\\equiv \\\\langle l, r \\\\rangle\\\\\\\\ l \\\\cdot r",
"expected spin projection (Bloch vector) over time for a given time series of",
"rotating_wave_winding_group = roc.shared.array((threads_per_block, sample_index_end), dtype = np.complex128) rotating_wave_winding = rotating_wave_winding_group[roc.get_local_id(1), :] time_coarse[time_index] =",
"device_index == 0: for time_index in nb.prange(time_coarse.size): get_time_evolution_loop(time_index, time_coarse, time_step_output, time_step_integration, time_end_points, time_evolution_coarse,",
"specified for control, so really this number is 64). Raising this value allocates",
"field functions that use the rotating wave approximation in the rotating frame. integration_method",
"are made, and the output state in given out of the rotating frame.",
"be compilable for the device that the integrator is being compiled for. See",
"= 4**hyper_cube_amount a = math.sqrt(field_sample[0]*field_sample[0] + field_sample[1]*field_sample[1]) if a > 0: ep =",
"s_X)}{\\\\sqrt{2}} & \\\\frac{e^{-i\\\\left(-Z + \\\\frac{Q}{3}\\\\right)}(c_X + c_Y + i s_Xs_Y)}{2} \\\\end{pmatrix}^{2^\\\\tau}\\\\\\\\ &= T^{2^\\\\tau},",
"start of the time step, one sampling the field from the end of",
"value == \"roc\": def jit_host(template, max_registers): def jit_host(func): return roc.jit(template)(func) return jit_host self.jit_host",
"adjoint to. matrix_exponential_analytic(field_sample, result) : :obj:`callable` Calculates a :math:`\\\\mathfrak{su}(2)` matrix exponential based on",
"matrix_multiply self.adjoint = adjoint self.matrix_exponential_analytic = matrix_exponential_analytic self.matrix_exponential_lie_trotter = matrix_exponential_lie_trotter self.matrix_square_residual = matrix_square_residual",
"exponential if exponentiation_method_index == 0: matrix_exponential_analytic(field_sample, time_evolution_fine) elif exponentiation_method_index == 1: matrix_exponential_lie_trotter(field_sample, time_evolution_fine,",
"3] = math.tau*time_step_integration*(w1*field_sample[0, 3] + w0*field_sample[1, 3]) append_exponentiation(field_sample[2, :], time_evolution_fine, time_evolution_coarse) get_field_integration =",
"== 0: time_evolution_old = np.empty((dimension, dimension), dtype = np.complex128) elif device_index == 1:",
"Multiply matrices left and right together, to be returned in result. .. math::",
"AttributeError(\"{} has no attribute called {}.\".format(self, attr_name)) class Simulator: \"\"\" Attributes ---------- spin_quantum_number",
"simulations need to be run. For example, it is used to sweep over",
"\\\\\\\\ 0 & 1 & 0 \\\\end{pmatrix},& J_y &= \\\\frac{1}{\\\\sqrt{2}}\\\\begin{pmatrix} 0 & -i",
"float64[:], float64, float64, float64[:], complex128[:, :, :], float64)\") def get_time_evolution_loop(time_index, time_coarse, time_step_output, time_step_integration,",
"= \\\\sqrt{x^2 + y^2 + z^2}`. Parameters: * **field_sample** (:class:`numpy.ndarray` of :class:`numpy.float64`, (y_index,",
"The expected value for hyperfine spin of the spin system in the lab",
"with .. math:: \\\\begin{align*} X &= 2^{-\\\\tau}x,\\\\\\\\ Y &= 2^{-\\\\tau}y,\\\\\\\\ Z &= 2^{-\\\\tau}z,\\\\\\\\",
"of the field. This is a four dimensional vector, with the first three",
"0])*operator[0, 1] + operator[0, 1]*operator[1, 1] + operator[0, 2]*operator[2, 1] result[1, 1] =",
"> 0: x /= r y /= r z /= r c =",
"append_exponentiation_integration(time_evolution_fine, time_evolution_coarse[time_index, :], field_sample, time_step_integration, rotating_wave, rotating_wave_winding) time_fine += time_step_integration if use_rotating_frame: #",
"import math sqrt2 = math.sqrt(2) sqrt3 = math.sqrt(3) class SpinQuantumNumber(Enum): \"\"\" Options for",
"0] = 0 operator[0, 1] = 0 operator[1, 1] = 1 operator[2, 1]",
"0 & 1 & 0 \\\\\\\\ 1 & 0 & 1 \\\\\\\\ 0",
"2 norm of a complex vector. .. math:: \\|a + ib\\|_2 = \\\\sqrt",
"time = time.copy_to_host() elif self.device == Device.ROC: time = roc.device_array(time_index_max, np.float64) time_evolution_coarse =",
"float64[:], float64, float64, complex128[:, :, :])\", max_registers) def get_time_evolution(sweep_parameter, time_coarse, time_end_points, time_step_integration, time_step_output,",
"matrix_multiply(result, result, temporary) # matrix_multiply(temporary, temporary, result) self.conj = conj self.complex_abs = complex_abs",
"debug = False, max_registers = max_registers)(func) return jit_host self.jit_host = jit_host def jit_device(func):",
"field_sample[2] - rotating_wave transform_frame = transform_frame_spin_one_rotating else: @jit_device_template(\"(float64[:], float64, complex128)\") def transform_frame_spin_half_rotating(field_sample, rotating_wave,",
"A^\\\\dagger &\\\\equiv A^H\\\\\\\\ (A^\\\\dagger)_{y,x} &= ((A)_{x,y})^* \\\\end{align*} Matrix can be in :math:`\\\\mathbb{C}^{2\\\\times2}` or",
"s_X - c_X s_Y)}{\\\\sqrt{2}} \\\\\\\\ \\\\frac{e^{-i\\\\left(Z + \\\\frac{Q}{3}\\\\right)}(c_X - c_Y - i s_Xs_Y)}{2}",
":, :] rotating_wave_winding_group = roc.shared.array((threads_per_block, sample_index_end), dtype = np.complex128) rotating_wave_winding = rotating_wave_winding_group[roc.get_local_id(1), :]",
"+ state[time_index, 0].imag**2 - state[time_index, 2].real**2 - state[time_index, 2].imag**2 elif device_index > 0:",
"field_sample[2, 3] = math.tau*time_step_integration*(w1*field_sample[0, 3] + w0*field_sample[1, 3]) append_exponentiation(field_sample[2, :], time_evolution_fine, time_evolution_coarse) get_field_integration",
"field, for example), and the fourth entry being the amplitude of the quadratic",
"imaginary linear combination of a subspace of :math:`\\\\mathfrak{su}(3)`, being, .. math:: \\\\begin{align*} A",
"iz J_z - iq J_q)\\\\\\\\ &= \\\\exp(2^{-\\\\tau}(-ix J_x - iy J_y - iz",
"\\\\exp(A) &= \\\\exp(-ix J_x - iy J_y - iz J_z)\\\\\\\\ &= \\\\exp(2^{-\\\\tau}(-ix J_x",
"= operator[1, 0]*operator[0, 1] + (2 + operator[1, 1])*operator[1, 1] @jit_device def adjoint(operator,",
"and more details. exponentiation_method : :obj:`ExponentiationMethod` Which method to use for matrix exponentiation",
"expected value for hyperfine spin of the spin system in the lab frame,",
"append_exponentiation_integration = append_exponentiation_integration_half_step elif integration_method == IntegrationMethod.MIDPOINT_SAMPLE: @jit_device_template(\"(float64, float64, float64, float64, float64[:, :],",
"This is not done when this option is set to :obj:`True` - no",
"matrix_exponential_analytic(field_sample, result) : :obj:`callable` Calculates a :math:`\\\\mathfrak{su}(2)` matrix exponential based on its analytic",
"value of `max_registers` for each model of GPU running :mod:`spinsim`, balancing more threads",
"that one extra register per thread is always added to the number specified",
"steps. .. note :: The use of a rotating frame is commonly associated",
"of course, use :mod:`spinsim` to integrate states in the rotating frame, using the",
"time offset that the experiment is to start at. Measured in s. time_end",
"\"magnus_cf4\" \"\"\" Commutator free, fourth order Magnus based integrator. \"\"\" MIDPOINT_SAMPLE = \"midpoint_sample\"",
"spin[time_index, 1] = (2j*conj(state[time_index, 1])*(state[time_index, 0] - state[time_index, 2])/sqrt2).real spin[time_index, 2] = state[time_index,",
"def __init__(self, value, index): super().__init__() self._value_ = value self.index = index if value",
"= field_sample[2] - 2*rotating_wave transform_frame = transform_frame_spin_half_rotating else: @jit_device_template(\"(float64[:], float64, complex128)\") def transform_frame_lab(field_sample,",
"field_sample[0, 3] = math.tau*time_step_integration*field_sample[0, 3] append_exponentiation(field_sample[0, :], time_evolution_fine, time_evolution_coarse) get_field_integration = get_field_integration_midpoint append_exponentiation_integration",
"described above. * **result** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The matrix which",
"0] /= rotating_wave_winding[0] time_evolution_coarse[time_index, 0, 1] /= rotating_wave_winding[0] if dimension > 2: time_evolution_coarse[time_index,",
"Numpy features`_ for compilable numpy features. \"\"\" CUDA = (\"cuda\", 1) \"\"\" Use",
"= -((Sa*eq/ep)*(Sa*eq/ep)) result[1, 2] = sa*eq*ez/ep result[2, 2] = (Ca*ez/eq)*(Ca*ez/eq) - 1 if",
"1] = 0 operator[1, 1] = 0 operator[2, 1] = 0 operator[0, 2]",
"l and r. set_to(operator, result) : :obj:`callable` Copy the contents of one matrix",
"declare a :class:`numpy.ndarray` using :func:`numba.cuda.device_array_like()`. time_end_points : :class:`numpy.ndarray` of :class:`numpy.float64` (start time (0)",
"operator in the z direction. Returns: * **spin** (:obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index, spatial_direction))",
"over time for a given time series of a quantum state. Used to",
"rotating_wave transform_frame = transform_frame_spin_one_rotating else: @jit_device_template(\"(float64[:], float64, complex128)\") def transform_frame_spin_half_rotating(field_sample, rotating_wave, rotating_wave_winding): X",
"time_end_points[0])/time_step_output) if self.device.index == 0: time = np.empty(time_index_max, np.float64) time_evolution_coarse = np.empty((time_index_max, self.spin_quantum_number.dimension,",
"def jit_host(func): return cuda.jit(template, debug = False, max_registers = max_registers)(func) return jit_host self.jit_host",
"end time (1)) The time values for when the experiment is to start",
"field_sample = cuda.local.array((sample_index_max, lie_dimension), dtype = np.float64) rotating_wave_winding = cuda.local.array(sample_index_end, dtype = np.complex128)",
"get_field_integration(sweep_parameter, time_fine, time_coarse[time_index], time_step_integration, field_sample, rotating_wave, rotating_wave_winding) append_exponentiation_integration(time_evolution_fine, time_evolution_coarse[time_index, :], field_sample, time_step_integration, rotating_wave,",
"set_to(operator, result) : :obj:`callable` Copy the contents of one matrix into another. ..",
"could increase performance for your GPU. See `Achieved Occupancy`_ for Nvidia's official explanation.",
"compiler to compile the integrator to run on all CPU cores, in parallel.",
"(time_index) A coarse grained list of time samples that the time evolution operator",
"1].real**2 - state[time_index, 1].imag**2) else: spin[time_index, 0] = (2*conj(state[time_index, 1])*(state[time_index, 0] + state[time_index,",
"= left[1, 0]*right[0, 2] + left[1, 1]*right[1, 2] + left[1, 2]*right[2, 2] result[2,",
"`time_evolution_coarse`. time_evolution_coarse : :class:`numpy.ndarray` of :class:`numpy.complex128` (time_index, bra_state_index, ket_state_index) Time evolution operator (matrix)",
"to increase execution time for different GPU models. \"\"\" jit_device = device.jit_device device_index",
"time_step_output/round(time_step_output/time_step_integration) time_end_points = np.asarray([time_start, time_end], np.float64) state_init = np.asarray(state_init, np.complex128) time_index_max = int((time_end_points[1]",
"e^{i\\\\frac{2Q}{3}} c_X c_Y & \\\\frac{e^{-i(Z - \\\\frac{Q}{3})} (-i s_X - c_X s_Y)}{\\\\sqrt{2}} \\\\\\\\",
"& 1 & 0 \\\\\\\\ 1 & 0 & 1 \\\\\\\\ 0 &",
"of a complex number. .. math:: \\\\begin{align*} |a + ib| &= \\\\sqrt{a^2 +",
"by the field in the z direction. This removes the (possibly large) z",
"Parameters ---------- spin_quantum_number : :obj:`SpinQuantumNumber` The option to select whether the simulator will",
"\\\\end{pmatrix},& J_y &= \\\\frac{1}{2}\\\\begin{pmatrix} 0 & -i \\\\\\\\ i & 0 \\\\end{pmatrix},& J_z",
"of :obj:`numpy.complex128` (magnetic_quantum_number) The initial quantum state of the spin system, written in",
"complex128)\") def transform_frame_lab(field_sample, rotating_wave, rotating_wave_winding): return transform_frame = transform_frame_lab get_field_jit = jit_device(get_field) if",
"used when compiling the integrator, where higher level objects like enums cannot be",
": :obj:`callable` Takes the hermitian adjoint of a matrix. .. math:: \\\\begin{align*} A^\\\\dagger",
"time varying `sweep_parameter`. * **time_coarse** (:obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index)) - The times that",
"np.complex128) elif device_index == 1: temporary = cuda.local.array((3, 3), dtype = np.complex128) elif",
"math.tau*time_step_integration*(w0*field_sample[0, 3] + w1*field_sample[1, 3]) append_exponentiation(field_sample[2, :], time_evolution_fine, time_evolution_coarse) field_sample[2, 0] = math.tau*time_step_integration*(w1*field_sample[0,",
"**z** (:class:`numpy.complex128`) - The complex number to take the conjugate of. Returns *",
"time_evolution_coarse) get_field_integration = get_field_integration_half_step append_exponentiation_integration = append_exponentiation_integration_half_step elif integration_method == IntegrationMethod.MIDPOINT_SAMPLE: @jit_device_template(\"(float64, float64,",
"def jit_device_template(template): def jit_device_template(func): return roc.jit(template, device = True)(func) return jit_device_template self.jit_device_template =",
"np.float64) blocks_per_grid = (state.shape[0] + (threads_per_block - 1)) // threads_per_block get_spin[blocks_per_grid, threads_per_block](cuda.to_device(state), spin)",
":] # Calculate the exponential if exponentiation_method_index == 0: matrix_exponential_analytic(field_sample, time_evolution_fine) elif exponentiation_method_index",
"2] + left[1, 2]*right[2, 2] result[2, 2] = left[2, 0]*right[0, 2] + left[2,",
"operator[2, 2] @jit_device def set_to_one(operator): operator[0, 0] = 1 operator[1, 0] = 0",
"an amount defined by the field in the z direction. This removes the",
":obj:`True`. If set to :obj:`True`, the integrator moves into a frame rotating in",
"J_q)\\\\\\\\ &= \\\\exp(2^{-\\\\tau}(-ix J_x - iy J_y - iz J_z - iq J_q))^{2^\\\\tau}\\\\\\\\",
"device = Device.CPU self.threads_per_block = threads_per_block self.spin_quantum_number = spin_quantum_number self.device = device self.get_time_evolution_raw",
"left[0, 1]*right[1, 0] + left[0, 2]*right[2, 0] result[1, 0] = left[1, 0]*right[0, 0]",
"function must be compilable for the device that the integrator is being compiled",
":class:`numpy.complex128`, (y_index, x_index)) - The matrix to set to :math:`0`. matrix_multiply(left, right, result)",
"in :math:`\\\\mathbb{C}^{2\\\\times2}` or :math:`\\\\mathbb{C}^{3\\\\times3}`. Parameters: * **operator** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) -",
"= math.sqrt(x**2 + y**2 + z**2) if r > 0: x /= r",
"the eigenstates of the spin projection operator in the z direction. Returns -------",
"Device.CPU self.threads_per_block = threads_per_block self.spin_quantum_number = spin_quantum_number self.device = device self.get_time_evolution_raw = None",
"time_step_integration, time_end_points, time_evolution_coarse, sweep_parameter) return @jit_host(\"(complex128[:, :], float64[:, :])\", max_registers = max_registers) def",
"python features, and `Supported Numpy features`_ for compilable numpy features. \"\"\" CPU =",
"every fine step for time_fine_index in range(math.floor(time_step_output/time_step_integration + 0.5)): get_field_integration(sweep_parameter, time_fine, time_coarse[time_index], time_step_integration,",
"a specific GPU model. Defaults to 63 (optimal for GTX1070, the device used",
"def transform_frame_lab(field_sample, rotating_wave, rotating_wave_winding): return transform_frame = transform_frame_lab get_field_jit = jit_device(get_field) if integration_method",
"+ operator[1, 1])*operator[1, 0] + operator[1, 2]*operator[2, 0] result[2, 0] = operator[2, 0]*operator[0,",
":]) @jit_device_template(\"(complex128[:, :], complex128[:, :], float64[:, :], float64, float64, complex128[:])\") def append_exponentiation_integration_magnus_cf4(time_evolution_fine, time_evolution_coarse,",
"grained list of time samples that the time evolution operator is found for.",
"+ operator[1, 2]*operator[2, 1] result[2, 1] = operator[2, 0]*operator[0, 1] + operator[2, 1]*operator[1,",
"== 1: matrix_exponential_lie_trotter(field_sample, time_evolution_fine, trotter_cutoff) # Premultiply to the exitsing time evolution operator",
"take the adjoint of. * **result** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - An",
"+ b_i^2\\\\right)} Parameters: * **z** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (index)) - The vector to",
"cisz*(-1j*sx - cx*sy)/sqrt2 # result[2, 2] = 0.5*cisz*(cx + cy + 1j*sx*sy) #",
"= utilities.matrix_exponential_analytic matrix_exponential_lie_trotter = utilities.matrix_exponential_lie_trotter jit_host = device.jit_host jit_device = device.jit_device jit_device_template =",
"math.cos(a/2) Sa = -1j*math.sin(a/2) ez = field_sample[2]/(2*precision) ez = math.cos(ez) + 1j*math.sin(ez) #",
"See :obj:`Device` for all options and more details. threads_per_block : :obj:`int` The size",
":obj:`callable` Takes the hermitian adjoint of a matrix. .. math:: \\\\begin{align*} A^\\\\dagger &\\\\equiv",
"of :class:`numpy.float64`, (y_index, x_index)) - The values of x, y and z (and",
"solutions of spin system dynamics. This is not done when this option is",
"This is calculated just in time using the JITed :obj:`callable` `spin_calculator`. spin_calculator :",
"2] = math.tau*time_step_integration*field_sample[0, 2] if dimension > 2: field_sample[0, 3] = math.tau*time_step_integration*field_sample[0, 3]",
"math:: \\\\exp(A + B) = \\\\lim_{c \\\\to \\\\infty} \\\\left(\\\\exp\\\\left(\\\\frac{1}{c}A\\\\right) \\\\exp\\\\left(\\\\frac{1}{c}B\\\\right)\\\\right)^c. **For spin half",
"self.jit_device = jit_device def jit_device_template(template): def jit_device_template(func): return func return jit_device_template self.jit_device_template =",
"= (2j*conj(state[time_index, 1])*(state[time_index, 0] - state[time_index, 2])/sqrt2).real spin[time_index, 2] = state[time_index, 0].real**2 +",
"time_evolution_coarse, sweep_parameter) elif device_index == 1: # Run calculation for each coarse timestep",
"time_evolution_coarse = roc.device_array((time_index_max, self.spin_quantum_number.dimension, self.spin_quantum_number.dimension), np.complex128) blocks_per_grid = (time.size + (self.threads_per_block - 1))",
"Parameters: * **state** (:obj:`numpy.ndarray` of :obj:`numpy.complex128` (time_index, magnetic_quantum_number)) - The quantum state of",
"None, exponentiation_method = None, use_rotating_frame = True, integration_method = IntegrationMethod.MAGNUS_CF4, trotter_cutoff = 32,",
"2] = conj(operator[2, 2]) @jit_device def matrix_exponential_analytic(field_sample, result, trotter_cutoff): pass @jit_device def matrix_exponential_lie_trotter(field_sample,",
"the z direction. Returns ------- results : :obj:`Results` An object containing the results",
"def get_spin(state, spin): \"\"\" Calculate each expected spin value in parallel. For spin",
"+ z[1].imag**2 + z[2].real**2 + z[2].imag**2) @jit_device def cross(left, right, result): result[0] =",
"= np.float64) rotating_wave_winding = cuda.local.array(sample_index_end, dtype = np.complex128) elif device_index == 2: time_evolution_fine_group",
"J_q &= \\\\frac{1}{3}\\\\begin{pmatrix} 1 & 0 & 0 \\\\\\\\ 0 & -2 &",
"= math.sin(a/2) ca = math.cos(a) sa = -1j*math.sin(a)/sqrt2 ez = field_sample[2]/(2*precision) ez =",
"0]*operator[0, 0] + (2 + operator[1, 1])*operator[1, 0] + operator[1, 2]*operator[2, 0] result[2,",
"1, z_index] else: state[time_index, x_index] += state_init[x_index] sqrt2 = math.sqrt(2) sqrt3 = math.sqrt(3)",
"x) J_x) \\\\exp(-i(2^{-\\\\tau} y) J_y) \\\\exp(-i(2^{-\\\\tau} z) J_z)^{2^\\\\tau}\\\\\\\\ &= \\\\begin{pmatrix} (c_Xc_Y - is_Xs_Y)",
"time step. See :ref:`architecture` for some information. state : :obj:`numpy.ndarray` of :obj:`numpy.complex128` (time_index,",
"made by the matrix exponentiator, if :obj:`ExponentiationMethod.LIE_TROTTER` is chosen. threads_per_block : :obj:`int` The",
"1] = operator[2, 0]*operator[0, 1] + operator[2, 1]*operator[1, 1] + (2 + operator[2,",
"(index)) - The vector to left multiply in the inner product. * **right**",
"at. Measured in s. The duration of the experiment is `time_end - time_start`.",
"to run on a single CPU core. .. note :: To use this",
"@jit_device def complex_abs(z): return math.sqrt(z.real**2 + z.imag**2) if spin_quantum_number == SpinQuantumNumber.HALF: @jit_device def",
"of the time step, one sampling the field from the end of the",
"cores, meaning some cores are inactive, and the GPU is said to have",
"(c_Xc_Y - is_Xs_Y) e^{-iZ} & -(c_Xs_Y + is_Xc_Y) e^{iZ} \\\\\\\\ (c_Xs_Y - is_Xc_Y)",
"are compiled for the chosen target device on construction of the object. Attributes",
"rotating_wave, rotating_wave_winding): transform_frame(field_sample[0, :], rotating_wave, rotating_wave_winding[0]) transform_frame(field_sample[1, :], rotating_wave, rotating_wave_winding[1]) field_sample[2, 0] =",
"for each coarse timestep in parallel time_index = roc.get_global_id(1) if time_index < time_coarse.size:",
"to have less occupancy. Lowering the value increases GPU occupancy, meaning more threads",
"= math.cos(math.tau*rotating_wave*time_sample) + 1j*math.sin(math.tau*rotating_wave*time_sample) time_sample += time_coarse get_field_jit(time_sample, sweep_parameter, field_sample[0, :]) time_sample =",
"rotating_wave, rotating_wave_winding): X = (field_sample[0] + 1j*field_sample[1])/rotating_wave_winding field_sample[0] = X.real field_sample[1] = X.imag",
"Compiles the integrator and spin calculation functions of the simulator. Parameters ---------- get_field",
"operator[1, 0]*operator[0, 1] + (2 + operator[1, 1])*operator[1, 1] @jit_device def adjoint(operator, result):",
"if device_index == 0: time_evolution_old = np.empty((dimension, dimension), dtype = np.complex128) elif device_index",
"using :obj:`Device.CUDA` as the target device, and can be modified to increase the",
"b_i^2\\\\right)} Parameters: * **z** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (index)) - The vector to take",
"= left[0, 0]*right[0, 0] + left[0, 1]*right[1, 0] + left[0, 2]*right[2, 0] result[1,",
"of two orthogonal vectors is 0. .. math:: \\\\begin{align*} l \\\\cdot r &\\\\equiv",
"= 0 operator[1, 0] = 0 operator[2, 0] = 0 operator[0, 1] =",
"sample rate of the outputs `time_coarse` and `time_evolution_coarse`. time_evolution_coarse : :class:`numpy.ndarray` of :class:`numpy.complex128`",
"0].real**2 + state[time_index, 0].imag**2 - state[time_index, 2].real**2 - state[time_index, 2].imag**2 return def spin_calculator(state):",
"magnetic_quantum_number) The quantum state of the spin system over time, written in terms",
"a matrix exponential based on the Lie Product Formula, .. math:: \\\\exp(A +",
"dtype = np.complex128) elif device_index == 1: temporary = cuda.local.array((3, 3), dtype =",
"vector) over time for a given time series of a quantum state. Parameters",
"= (\"lie_trotter\", 1) \"\"\" Approximation using the Lie Trotter theorem. \"\"\" class Device(Enum):",
"field_sample, rotating_wave, rotating_wave_winding) append_exponentiation_integration(time_evolution_fine, time_evolution_coarse[time_index, :], field_sample, time_step_integration, rotating_wave, rotating_wave_winding) time_fine += time_step_integration",
":obj:`ExponentiationMethod.ANALYTIC` when `spin_quantum_number` is set to :obj:`SpinQuantumNumber.HALF`. See :obj:`ExponentiationMethod` for more details. use_rotating_frame",
"to use in the integration. Defaults to :obj:`IntegrationMethod.MAGNUS_CF4`. See :obj:`IntegrationMethod` for more details.",
"chosen device on object constrution. Parameters: * **sweep_parameter** (:obj:`float`) - The input to",
"jit_host def jit_device(func): return nb.njit()(func) self.jit_device = jit_device def jit_device_template(template): def jit_device_template(func): return",
"time_coarse, time_step_output, time_step_integration, time_end_points, time_evolution_coarse, sweep_parameter) elif device_index == 2: # Run calculation",
"= math.sin(x) # cy = math.cos(y) # sy = math.sin(y) # cisz =",
"1)) // threads_per_block get_spin[blocks_per_grid, threads_per_block](roc.to_device(state), spin) spin = spin.copy_to_host() return spin self.get_time_evolution_raw =",
"J_z - iq J_q)\\\\\\\\ &= \\\\exp(2^{-\\\\tau}(-ix J_x - iy J_y - iz J_z",
"result[1, 0] = operator[1, 0] result[2, 0] = operator[2, 0] result[0, 1] =",
"dtype = np.complex128) elif device_index == 2: time_evolution_old_group = roc.shared.array((threads_per_block, dimension, dimension), dtype",
"y) J_y) \\\\exp(-i(2^{-\\\\tau} z) J_z)^{2^\\\\tau}\\\\\\\\ &= \\\\begin{pmatrix} (c_Xc_Y - is_Xs_Y) e^{-iZ} & -(c_Xs_Y",
"self.matrix_multiply = matrix_multiply self.adjoint = adjoint self.matrix_exponential_analytic = matrix_exponential_analytic self.matrix_exponential_lie_trotter = matrix_exponential_lie_trotter self.matrix_square_residual",
"\\\\end{pmatrix}^{2^\\\\tau}\\\\\\\\ &= T^{2^\\\\tau}, \\\\end{align*} with .. math:: \\\\begin{align*} X &= \\\\frac{1}{2}2^{-\\\\tau}x,\\\\\\\\ Y &=",
"some information. state : :obj:`numpy.ndarray` of :obj:`numpy.complex128` (time_index, magnetic_quantum_number) The evaluated quantum state",
"for matrix exponentiation within the integrator. Parameters ---------- value : :obj:`str` A text",
"result[2, 1] = operator[2, 1] result[0, 2] = operator[0, 2] result[1, 2] =",
"one systems. Assumes the exponent is an imaginary linear combination of :math:`\\\\mathfrak{su}(2)`, being,",
"@jit_device def set_to(operator, result): result[0, 0] = operator[0, 0] result[1, 0] = operator[1,",
"& i & 0 \\\\end{pmatrix},\\\\\\\\ J_z &= \\\\begin{pmatrix} 1 & 0 & 0",
"that describes the field that the spin system is being put under. It",
"contain, when running on the GPU target devices :obj:`Device.CUDA` (:obj:`Device.ROC`). Defaults to 64.",
"is conjugated. Thus the inner product of two orthogonal vectors is 0. ..",
"result[0, 1] = operator[0, 1] result[1, 1] = operator[1, 1] @jit_device def set_to_one(operator):",
"The matrix to left multiply by. * **right** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index))",
"math.cos(z + q/3) - 1j*math.sin(z + q/3) # result[0, 0] = 0.5*cisz*(cx +",
"time_coarse : :class:`numpy.ndarray` of :class:`numpy.float64` (time_index) A coarse grained list of time samples",
"field_sample[2, 2] = math.tau*time_step_integration*(w0*field_sample[0, 2] + w1*field_sample[1, 2]) if dimension > 2: field_sample[2,",
"time_fine + 0.5*time_step_integration - time_coarse rotating_wave_winding[0] = math.cos(math.tau*rotating_wave*time_sample) + 1j*math.sin(math.tau*rotating_wave*time_sample) time_sample += time_coarse",
"np.float64) rotating_wave_winding = np.empty(sample_index_end, dtype = np.complex128) elif device_index == 1: time_evolution_fine =",
"(1j*state[time_index, 0]*conj(state[time_index, 1])).real spin[time_index, 2] = 0.5*(state[time_index, 0].real**2 + state[time_index, 0].imag**2 - state[time_index,",
"python function that describes the field that the spin system is being put",
"matrix_exponential_lie_trotter(field_sample, time_evolution_fine, trotter_cutoff) # Premultiply to the exitsing time evolution operator set_to(time_evolution_coarse, time_evolution_old)",
":class:`numpy.complex128`, (y_index, x_index)) - The matrix to copy from. * **result** (:class:`numpy.ndarray` of",
"hyper_cube_amount = math.ceil(trotter_cutoff/2) if hyper_cube_amount < 0: hyper_cube_amount = 0 precision = 4**hyper_cube_amount",
"**cz** (:class:`numpy.complex128`) - The conjugate of z. complex_abs(z) : :obj:`callable` The absolute value",
"conj(operator[0, 1]) result[0, 1] = conj(operator[1, 0]) result[1, 1] = conj(operator[1, 1]) @jit_device",
"for more details. trotter_cutoff : :obj:`int` The number of squares made by the",
"whole number multiple of `time_step_integration`. Measured in s. * **time_evolution_coarse** (:obj:`numpy.ndarray` of :obj:`numpy.float128`",
"# result[0, 2] = 0.5*cisz*(cx - cy + 1j*sx*sy) # result[1, 2] =",
"/= r y /= r z /= r c = math.cos(r/2) s =",
":], time_evolution_fine, time_evolution_coarse) field_sample[2, 0] = math.tau*time_step_integration*(w1*field_sample[0, 0] + w0*field_sample[1, 0]) field_sample[2, 1]",
"1] @jit_device def adjoint(operator, result): result[0, 0] = conj(operator[0, 0]) result[1, 0] =",
"\"\"\" # from . import utilities from enum import Enum import numpy as",
"&= \\\\exp(2^{-\\\\tau}(-ix J_x - iy J_y - iz J_z))^{2^\\\\tau}\\\\\\\\ &\\\\approx (\\\\exp(-i(2^{-\\\\tau} x) J_x)",
"\\\\begin{align*} J_x &= \\\\frac{1}{2}\\\\begin{pmatrix} 0 & 1 \\\\\\\\ 1 & 0 \\\\end{pmatrix},& J_y",
"of the outputs `time_coarse` and `time_evolution_coarse`. time_evolution_coarse : :class:`numpy.ndarray` of :class:`numpy.complex128` (time_index, bra_state_index,",
"def conj(z): return (z.real - 1j*z.imag) @jit_device def complex_abs(z): return math.sqrt(z.real**2 + z.imag**2)",
"at the expense of fewer resgiters being avaliable to each thread, meaning slower",
"append_exponentiation_integration_magnus_cf4 elif integration_method == IntegrationMethod.HALF_STEP: @jit_device_template(\"(float64, float64, float64, float64, float64[:, :], float64, complex128[:])\")",
"state for x_index in nb.prange(state.shape[1]): state[time_index, x_index] = 0 if time_index > 0:",
"= cuda.local.array((sample_index_max, lie_dimension), dtype = np.float64) rotating_wave_winding = cuda.local.array(sample_index_end, dtype = np.complex128) elif",
"time_evolution_fine, time_evolution_coarse) get_field_integration = get_field_integration_half_step append_exponentiation_integration = append_exponentiation_integration_half_step elif integration_method == IntegrationMethod.MIDPOINT_SAMPLE: @jit_device_template(\"(float64,",
"finish at. Measured in s. * **time_step_integration** (:obj:`float`) - The integration time step.",
"- The time offset that the experiment is to start at, and the",
"no attribute called {}.\".format(self, attr_name)) class Simulator: \"\"\" Attributes ---------- spin_quantum_number : :obj:`SpinQuantumNumber`",
"= ((time_fine + 0.5*time_step_integration*(1 + 1/sqrt3)) - time_coarse) rotating_wave_winding[1] = math.cos(math.tau*rotating_wave*time_sample) + 1j*math.sin(math.tau*rotating_wave*time_sample)",
"= (field_sample[0] + 1j*field_sample[1])/a else: ep = 1 a = a/precision Ca =",
"0] = 1 operator[1, 0] = 0 operator[0, 1] = 0 operator[1, 1]",
"= 0.5*cisz*(cx - cy + 1j*sx*sy) # result[1, 2] = cisz*(-1j*sx - cx*sy)/sqrt2",
"slower memory must be used. Thus, there will be an optimal value of",
"if use_rotating_frame: time_sample = time_coarse[time_index] + time_step_output/2 get_field_jit(time_sample, sweep_parameter, field_sample[0, :]) rotating_wave =",
"time_evolution_coarse, sweep_parameter) elif device_index == 2: # Run calculation for each coarse timestep",
"== 0: spin = np.empty((state.shape[0], 3), np.float64) get_spin(state, spin) elif device == Device.CUDA:",
"resolution of the output timeseries for the state. Must be a whole number",
"= transform_frame_spin_one_rotating else: @jit_device_template(\"(float64[:], float64, complex128)\") def transform_frame_spin_half_rotating(field_sample, rotating_wave, rotating_wave_winding): X = (field_sample[0]",
"the `sweep_parameter` is used to define the bias field strength in `get_field`, then",
"an optimal value of `max_registers` for each model of GPU running :mod:`spinsim`, balancing",
"the hermitian adjoint of a matrix. .. math:: \\\\begin{align*} A^\\\\dagger &\\\\equiv A^H\\\\\\\\ (A^\\\\dagger)_{y,x}",
"iy J_y - iz J_z))^{2^\\\\tau}\\\\\\\\ &\\\\approx (\\\\exp(-i(2^{-\\\\tau} x) J_x) \\\\exp(-i(2^{-\\\\tau} y) J_y) \\\\exp(-i(2^{-\\\\tau}",
":obj:`callable` Calculates a matrix exponential based on the Lie Product Formula, .. math::",
"= time.copy_to_host() state = np.empty((time_index_max, self.spin_quantum_number.dimension), np.complex128) self.get_state(state_init, state, time_evolution_coarse) results = Results(time,",
"rotating_wave, rotating_wave_winding[1]) field_sample[2, 0] = math.tau*time_step_integration*field_sample[0, 0]/2 field_sample[2, 1] = math.tau*time_step_integration*field_sample[0, 1]/2 field_sample[2,",
"matrix_square_residual(result, temporary) matrix_square_residual(temporary, result) # matrix_multiply(result, result, temporary) # matrix_multiply(temporary, temporary, result) result[0,",
"4**hyper_cube_amount # x = field_sample[0]/precision # y = field_sample[1]/precision # z = field_sample[2]/precision",
"for evaluating the time evolution operator in parallel. Compiled for chosen device on",
"x_index)) - The matrix to right multiply by. * **result** (:class:`numpy.ndarray` of :class:`numpy.complex128`,",
"(state.shape[0] + (threads_per_block - 1)) // threads_per_block get_spin[blocks_per_grid, threads_per_block](cuda.to_device(state), spin) spin = spin.copy_to_host()",
"sampled. time_evolution : :class:`numpy.ndarray` of :class:`numpy.complex128` (time_index, bra_state_index, ket_state_index) The evaluated time evolution",
"of :obj:`numpy.float128` (time_index, y_index, x_index) The evaluated time evolution operator between each time",
"roc.jit(template)(func) return jit_host self.jit_host = jit_host def jit_device(func): return roc.jit(device = True)(func) self.jit_device",
"0: if device_index == 1: time_index = cuda.grid(1) elif device_index == 1: time_index",
"@jit_device def inner(left, right): return conj(left[0])*right[0] + conj(left[1])*right[1] + conj(left[2])*right[2] @jit_device def set_to(operator,",
"could not jit get_field function into a cuda device function.\\033[0m\\n\") raise time_evolution_coarse =",
"\"\"\" Commutator free, fourth order Magnus based integrator. \"\"\" MIDPOINT_SAMPLE = \"midpoint_sample\" \"\"\"",
"CUDA Python features`_ for compilable python features. \"\"\" ROC = (\"roc\", 2) \"\"\"",
"Numpy features`_ for compilable numpy features. \"\"\" CPU = (\"cpu\", 0) \"\"\" Use",
"belong to. label : :obj:`str` A text label that can be used for",
"+ operator[0, 1]*operator[1, 1] result[1, 1] = operator[1, 0]*operator[0, 1] + (2 +",
"2])*operator[2, 1] result[0, 2] = (2 + operator[0, 0])*operator[0, 2] + operator[0, 1]*operator[1,",
"roc.jit(template, device = True)(func) return jit_device_template self.jit_device_template = jit_device_template PYTHON = (\"python\", 0)",
"if value == \"python\": def jit_host(template, max_registers): def jit_host(func): return func return jit_host",
"a Lie Trotter method.\\033[0m\") exponentiation_method = ExponentiationMethod.LIE_TROTTER exponentiation_method_index = 1 @jit_device_template(\"(float64[:], complex128[:, :],",
"given time series of a quantum state. Parameters ---------- state : :obj:`numpy.ndarray` of",
"= utilities.set_to set_to_one = utilities.set_to_one set_to_zero = utilities.set_to_zero matrix_multiply = utilities.matrix_multiply adjoint =",
"entries being x, y, z spatial directions (to model a magnetic field, for",
"The time difference between each element of `time_coarse`. In units of s. Determines",
"- |\\\\psi_{-1}(t)|^2 \\\\end{pmatrix} \\\\end{align*} Parameters ---------- state : :class:`numpy.ndarray` of :class:`numpy.complex128` (time_index, state_index)",
"increase execution time for different GPU models. \"\"\" jit_device = device.jit_device device_index =",
"for example), and the fourth entry being the amplitude of the quadratic shift",
"l, r \\\\rangle\\\\\\\\ l \\\\cdot r &= \\\\sum_i (l_i)^* r_i \\\\end{align*} Parameters: *",
"math.cos(2*q/3) + 1j*math.sin(2*q/3) # result[0, 1] = cisz*(-sy - 1j*cy*sx)/sqrt2 # result[1, 1]",
"moves into a frame rotating in the z axis by an amount defined",
"Returns * **az** (:class:`numpy.float64`) - The absolute value of z. norm2(z) : :obj:`callable`",
"result[0, 2] = left[0, 0]*right[0, 2] + left[0, 1]*right[1, 2] + left[0, 2]*right[2,",
"\\\\Re(\\\\psi_{+\\\\frac{1}{2}}(t)\\\\psi_{-\\\\frac{1}{2}}(t)^*)\\\\\\\\ -\\\\Im(\\\\psi_{+\\\\frac{1}{2}}(t)\\\\psi_{-\\\\frac{1}{2}}(t)^*)\\\\\\\\ \\\\frac{1}{2}(|\\\\psi_{+\\\\frac{1}{2}}(t)|^2 - |\\\\psi_{-\\\\frac{1}{2}}(t)|^2) \\\\end{pmatrix} \\\\end{align*} For spin one: .. math:: \\\\begin{align*}",
"a matrix the additive identity, ie, :math:`0`. .. math:: \\\\begin{align*} (A)_{i, j} =",
"method.\\033[0m\") exponentiation_method = ExponentiationMethod.LIE_TROTTER exponentiation_method_index = 1 @jit_device_template(\"(float64[:], complex128[:, :], complex128[:, :])\") def",
"&= T^{2^\\\\tau}, \\\\end{align*} with .. math:: \\\\begin{align*} X &= \\\\frac{1}{2}2^{-\\\\tau}x,\\\\\\\\ Y &= \\\\frac{1}{2}2^{-\\\\tau}y,\\\\\\\\",
"Returns: * **spin** (:obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index, spatial_direction)) - The expected spin projection",
"occupancy. Lowering the value increases GPU occupancy, meaning more threads run concurrently, at",
"executed there just in time if the `spin` property is needed. Compiled for",
"per thread is always added to the number specified for control, so really",
"a given time series of a quantum state. Used to calculate `spin` the",
": :obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index) The times that `state` was evaluated at. time_evolution",
"self.spin_calculator) return results @staticmethod @nb.njit def get_state(state_init, state, time_evolution): \"\"\" Use the stepwise",
"2] = left[1, 0]*right[0, 2] + left[1, 1]*right[1, 2] + left[1, 2]*right[2, 2]",
"AtomicPy. Makes two Euler integration steps, one sampling the field from the start",
"time_coarse) rotating_wave_winding[1] = math.cos(math.tau*rotating_wave*time_sample) + 1j*math.sin(math.tau*rotating_wave*time_sample) time_sample += time_coarse get_field_jit(time_sample, sweep_parameter, field_sample[1, :])",
"commonly associated with the use of a rotating wave approximation, a technique used",
"+ 1j*math.sin(math.tau*rotating_wave*time_step_output) time_evolution_coarse[time_index, 0, 0] /= rotating_wave_winding[0] time_evolution_coarse[time_index, 0, 1] /= rotating_wave_winding[0] if",
"= 1 operator[1, 0] = 0 operator[0, 1] = 0 operator[1, 1] =",
"= 1 @jit_device_template(\"(float64[:], complex128[:, :], complex128[:, :])\") def append_exponentiation(field_sample, time_evolution_fine, time_evolution_coarse): if device_index",
"device_index == 1: time_evolution_fine = cuda.local.array((dimension, dimension), dtype = np.complex128) field_sample = cuda.local.array((sample_index_max,",
"= math.tau*time_step_integration*field_sample[0, 1]/2 field_sample[2, 2] = math.tau*time_step_integration*field_sample[0, 2]/2 if dimension > 2: field_sample[2,",
"Switching to a Lie Trotter method.\\033[0m\") exponentiation_method = ExponentiationMethod.LIE_TROTTER exponentiation_method_index = 1 @jit_device_template(\"(float64[:],",
"operator in the z direction. spin : :obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index, spatial_direction) The",
"= math.cos(r/2) s = math.sin(r/2) result[0, 0] = c - 1j*z*s result[1, 0]",
":, :] # Calculate the exponential if exponentiation_method_index == 0: matrix_exponential_analytic(field_sample, time_evolution_fine) elif",
"* **result** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - A matrix to be filled",
"2]) result[0, 2] = conj(operator[2, 0]) result[1, 2] = conj(operator[2, 1]) result[2, 2]",
"systems). .. note:: This function must be compilable for the device that the",
"result[0, 1] = conj(operator[1, 0]) result[1, 1] = conj(operator[1, 1]) result[2, 1] =",
"with :func:`numpy.empty()`, or declare a :class:`numpy.ndarray` using :func:`numba.cuda.device_array_like()`. \"\"\" if device_index == 0:",
"J_z &= \\\\begin{pmatrix} 1 & 0 & 0 \\\\\\\\ 0 & 0 &",
"first three entries being x, y, z spatial directions (to model a magnetic",
"details. trotter_cutoff : :obj:`int` The number of squares made by the matrix exponentiator,",
"rotating frame is commonly associated with the use of a rotating wave approximation,",
"r = math.sqrt(x**2 + y**2 + z**2) if r > 0: x /=",
"elif value == \"cpu_single\": def jit_host(template, max_registers): def jit_host(func): return nb.njit(template)(func) return jit_host",
"= 0 operator[0, 2] = 0 operator[1, 2] = 0 operator[2, 2] =",
"is a four dimensional vector, with the first three entries being x, y,",
"device will be targeted for integration. That is, whether the integrator is compiled",
"cuda.local.array((3, 3), dtype = np.complex128) # elif device_index == 2: # temporary_group =",
"s_Xs_Y)}{2} & \\\\frac{e^{i\\\\frac{2Q}{3}} (-s_Y -i c_Y s_X)}{\\\\sqrt{2}} & \\\\frac{e^{-i\\\\left(-Z + \\\\frac{Q}{3}\\\\right)}(c_X - c_Y",
"math.ceil(trotter_cutoff/2) if hyper_cube_amount < 0: hyper_cube_amount = 0 precision = 4**hyper_cube_amount a =",
"the left vector is conjugated. Thus the inner product of two orthogonal vectors",
"= None, trotter_cutoff:int = 28, threads_per_block = 64, max_registers = 63): \"\"\" Compiles",
"jit_device_template self.jit_device_template = jit_device_template elif value == \"cuda\": def jit_host(template, max_registers): def jit_host(func):",
"can be in :math:`\\\\mathbb{C}^{2\\\\times2}` or :math:`\\\\mathbb{C}^{3\\\\times3}`. Parameters: * **operator** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index,",
"np.asarray(state_init, np.complex128) time_index_max = int((time_end_points[1] - time_end_points[0])/time_step_output) if self.device.index == 0: time =",
"* **result** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The matrix to copy to.",
"= 0 operator[0, 1] = 0 operator[1, 1] = 0 @jit_device def matrix_multiply(left,",
"the physics definition, so the left vector is conjugated. Thus the inner product",
"&= -i(x J_x + y J_y + z J_z + q J_q), \\\\end{align*}",
"elif device_index == 1: time_evolution_old = cuda.local.array((dimension, dimension), dtype = np.complex128) elif device_index",
"3] = math.tau*time_step_integration*(w0*field_sample[0, 3] + w1*field_sample[1, 3]) append_exponentiation(field_sample[2, :], time_evolution_fine, time_evolution_coarse) field_sample[2, 0]",
"different GPU models. device : :obj:`Device` The option to select which device will",
"self.jit_device_template = jit_device_template elif value == \"cpu_single\": def jit_host(template, max_registers): def jit_host(func): return",
"rotating_wave, rotating_wave_winding): return transform_frame = transform_frame_lab get_field_jit = jit_device(get_field) if integration_method == IntegrationMethod.MAGNUS_CF4:",
"result[1] = conj(left[2]*right[0] - left[0]*right[2]) result[2] = conj(left[0]*right[1] - left[1]*right[0]) @jit_device def inner(left,",
"4**hyper_cube_amount a = math.sqrt(field_sample[0]*field_sample[0] + field_sample[1]*field_sample[1]) if a > 0: ep = (field_sample[0]",
"from. * **result** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The matrix to copy",
"units of s. time_step_output : :obj:`float` The time difference between each element of",
"time_step_integration - time_coarse rotating_wave_winding[1] = math.cos(math.tau*rotating_wave*time_sample) + 1j*math.sin(math.tau*rotating_wave*time_sample) time_sample += time_coarse get_field_jit(time_sample, sweep_parameter,",
"time_sample = time_fine + time_step_integration - time_coarse rotating_wave_winding[1] = math.cos(math.tau*rotating_wave*time_sample) + 1j*math.sin(math.tau*rotating_wave*time_sample) time_sample",
"== 2: temporary_group = roc.shared.array((threads_per_block, 2, 2), dtype = np.complex128) temporary = temporary_group[roc.get_local_id(1),",
"user defined field function must be :func:`numba.jit()` compilable. See `Supported Python features`_ for",
"elif integration_method == IntegrationMethod.MIDPOINT_SAMPLE: sample_index_max = 1 sample_index_end = 1 exponentiation_method_index = exponentiation_method.index",
"= operator[1, 0]*operator[0, 0] + (2 + operator[1, 1])*operator[1, 0] result[0, 1] =",
"x_index)) - A matrix to be filled with the result of the product.",
"- \\\\frac{Q}{3})} (-i s_X - c_X s_Y)}{\\\\sqrt{2}} \\\\\\\\ \\\\frac{e^{-i\\\\left(Z + \\\\frac{Q}{3}\\\\right)}(c_X - c_Y",
"def jit_device_template(func): return nb.njit(template)(func) return jit_device_template self.jit_device_template = jit_device_template elif value == \"cuda\":",
"threads_per_block](roc.to_device(state), spin) spin = spin.copy_to_host() return spin self.get_time_evolution_raw = get_time_evolution self.spin_calculator = spin_calculator",
"temporary_group[roc.get_local_id(1), :, :] # for power_index in range(hyper_cube_amount): # matrix_multiply(result, result, temporary) #",
"this method multiple times, each time varying `sweep_parameter`. * **time_coarse** (:obj:`numpy.ndarray` of :obj:`numpy.float64`",
"result[2, 0] = 0.5*cisz*(cx - cy - 1j*sx*sy) # cisz = math.cos(2*q/3) +",
"so the integrator can be used for many experiments, without the need for",
"utilities.conj complex_abs = utilities.complex_abs norm2 = utilities.norm2 inner = utilities.inner set_to = utilities.set_to",
"by the matrix exponentiator, if :obj:`ExponentiationMethod.LIE_TROTTER` is chosen. threads_per_block : :obj:`int` The size",
"lie_dimension), dtype = np.float64) field_sample = field_sample_group[roc.get_local_id(1), :, :] rotating_wave_winding_group = roc.shared.array((threads_per_block, sample_index_end),",
"spin = spin.copy_to_host() elif device == Device.ROC: spin = roc.device_array((state.shape[0], 3), np.float64) blocks_per_grid",
"device that the integrator is being compiled for. See :class:`Device` for more information",
"+ 1j*sx*sy) # if device_index == 0: # temporary = np.empty((3, 3), dtype",
"the spin projection operator in the z direction. spin_calculator : :obj:`callable` Calculates the",
"where higher level objects like enums cannot be interpreted. \"\"\" def __init__(self, value,",
"in the lab frame, for each time sampled. time_evolution : :class:`numpy.ndarray` of :class:`numpy.complex128`",
"**simulation_index** (:obj:`int`) - a parameter that can be swept over when multiple simulations",
"# State = time evolution * previous state for x_index in nb.prange(state.shape[1]): state[time_index,",
"result): result[0, 0] = operator[0, 0] result[1, 0] = operator[1, 0] result[2, 0]",
"import numpy as np import numba as nb from numba import cuda from",
"device, use_rotating_frame, integration_method, exponentiation_method, trotter_cutoff, threads_per_block, max_registers) except: print(\"\\033[31mspinsim error: numba could not",
"the integration. Parameters ---------- value : :obj:`str` A text label that can be",
"time_step_output, time_evolution_coarse) except: print(\"\\033[31mspinsim error: numba.cuda could not jit get_field function into a",
"matrix exponentiator, if :obj:`ExponentiationMethod.LIE_TROTTER` is chosen. threads_per_block : :obj:`int` The size of each",
"= True, integration_method = IntegrationMethod.MAGNUS_CF4, exponentiation_method = None, trotter_cutoff:int = 28, threads_per_block =",
"0]) result[1, 1] = conj(operator[1, 1]) @jit_device def matrix_exponential_analytic(field_sample, result): x = field_sample[0]",
"right): return conj(left[0])*right[0] + conj(left[1])*right[1] + conj(left[2])*right[2] @jit_device def set_to(operator, result): result[0, 0]",
"accuracy of the output since the integrator will on average take smaller steps.",
"which method is used during the integration. Parameters ---------- value : :obj:`str` A",
"eq = field_sample[3]/(6*precision) eq = math.cos(eq) + 1j*math.sin(eq) # Ca = 1 #",
"Parameters: * **sweep_parameter** (:obj:`float`) - The input to the `get_field` function supplied by",
"field_sample[2, 1] = math.tau*time_step_integration*field_sample[1, 1]/2 field_sample[2, 2] = math.tau*time_step_integration*field_sample[1, 2]/2 if dimension >",
"conj self.complex_abs = complex_abs self.norm2 = norm2 self.inner = inner self.set_to = set_to",
"complex_abs(z): return math.sqrt(z.real**2 + z.imag**2) if spin_quantum_number == SpinQuantumNumber.HALF: @jit_device def norm2(z): return",
"= utilities.conj complex_abs = utilities.complex_abs norm2 = utilities.norm2 inner = utilities.inner set_to =",
"left[1, 1]*right[1, 1] + left[1, 2]*right[2, 1] result[2, 1] = left[2, 0]*right[0, 1]",
"matrix_exponential_lie_trotter(field_sample, result, trotter_cutoff): hyper_cube_amount = math.ceil(trotter_cutoff/2) if hyper_cube_amount < 0: hyper_cube_amount = 0",
"math:: \\\\begin{align*} J_x &= \\\\frac{1}{2}\\\\begin{pmatrix} 0 & 1 \\\\\\\\ 1 & 0 \\\\end{pmatrix},&",
"time_index = roc.get_global_id(1) if time_index < spin.shape[0]: if dimension == 2: spin[time_index, 0]",
"combination of a subspace of :math:`\\\\mathfrak{su}(2)`, being, .. math:: \\\\begin{align*} A &= -i(x",
"self.index = index ANALYTIC = (\"analytic\", 0) \"\"\" Analytic expression of the matrix",
"temporary = cuda.local.array((2, 2), dtype = np.complex128) elif device_index == 2: temporary_group =",
"PYTHON = (\"python\", 0) \"\"\" Use pure python interpreted code for the integrator,",
"range(math.floor(time_step_output/time_step_integration + 0.5)): get_field_integration(sweep_parameter, time_fine, time_coarse[time_index], time_step_integration, field_sample, rotating_wave, rotating_wave_winding) append_exponentiation_integration(time_evolution_fine, time_evolution_coarse[time_index, :],",
"+ 1j*math.sin(ez) eq = field_sample[3]/(6*precision) eq = math.cos(eq) + 1j*math.sin(eq) # Ca =",
"given out of the rotating frame. One can, of course, use :mod:`spinsim` to",
"# y = field_sample[1]/precision # z = field_sample[2]/precision # q = field_sample[3]/precision #",
"in the inner product. Returns * **d** (:class:`numpy.complex128`) - The inner product of",
"-(c_Xs_Y + is_Xc_Y) e^{iZ} \\\\\\\\ (c_Xs_Y - is_Xc_Y) e^{-iZ} & (c_Xc_Y + is_Xs_Y)",
"of :class:`numpy.complex128`, (y_index, x_index)) - The matrix which the result of the exponentiation",
"0 operator[2, 0] = 0 operator[0, 1] = 0 operator[1, 1] = 1",
"0]*right[0, 0] + left[0, 1]*right[1, 0] result[1, 0] = left[1, 0]*right[0, 0] +",
"= set_to_zero self.matrix_multiply = matrix_multiply self.adjoint = adjoint self.matrix_exponential_analytic = matrix_exponential_analytic self.matrix_exponential_lie_trotter =",
"result[1, 1] = ca*(eq*eq*eq*eq) - 1 result[2, 1] = sa*eq*ez*ep result[0, 2] =",
"1 # @jit_device # def matrix_exponential_lie_trotter(field_sample, result, trotter_cutoff): # hyper_cube_amount = math.ceil(trotter_cutoff/2) #",
"of :obj:`numpy.float64` (time_index) The times that `state` was evaluated at. time_evolution : :obj:`numpy.ndarray`",
"0.5)): get_field_integration(sweep_parameter, time_fine, time_coarse[time_index], time_step_integration, field_sample, rotating_wave, rotating_wave_winding) append_exponentiation_integration(time_evolution_fine, time_evolution_coarse[time_index, :], field_sample, time_step_integration,",
"a subspace of :math:`\\\\mathfrak{su}(3)`, being, .. math:: \\\\begin{align*} A &= -i(x J_x +",
"cuda.local.array((sample_index_max, lie_dimension), dtype = np.float64) rotating_wave_winding = cuda.local.array(sample_index_end, dtype = np.complex128) elif device_index",
"in nb.prange(spin.shape[0]): if dimension == 2: spin[time_index, 0] = (state[time_index, 0]*conj(state[time_index, 1])).real spin[time_index,",
"print(\"\\033[31mspinsim error: numba could not jit get_field function into a device function.\\033[0m\\n\") raise",
"= device.jit_device jit_device_template = device.jit_device_template device_index = device.index dimension = spin_quantum_number.dimension lie_dimension =",
"The integration time step. Measured in s. * **time_step_output** (:obj:`float`) - The sample",
"= left[1, 0]*right[0, 0] + left[1, 1]*right[1, 0] result[0, 1] = left[0, 0]*right[0,",
"= field_sample[2]/(2*precision) ez = math.cos(ez) + 1j*math.sin(ez) # eq = field_sample[3]/(6*precision) # eq",
"product. Returns * **d** (:class:`numpy.complex128`) - The inner product of l and r.",
"2] + left[2, 2]*right[2, 2] @jit_device def matrix_square_residual(operator, result): result[0, 0] = (2",
"and the fourth entry being the amplitude of the quadratic shift (only appearing,",
"experiment is `time_end - time_start`. time_step_integration : :obj:`float` The integration time step. Measured",
"to :obj:`SpinQuantumNumber.ONE`, and defaults to :obj:`ExponentiationMethod.ANALYTIC` when `spin_quantum_number` is set to :obj:`SpinQuantumNumber.HALF`. See",
"elif spin_quantum_number == SpinQuantumNumber.HALF: exponentiation_method = ExponentiationMethod.ANALYTIC if integration_method == IntegrationMethod.MAGNUS_CF4: sample_index_max =",
"\\\\end{pmatrix} \\\\end{align*} Then the exponential can be calculated as .. math:: \\\\begin{align*} \\\\exp(A)",
"c_Y s_X)}{\\\\sqrt{2}} & \\\\frac{e^{-i\\\\left(-Z + \\\\frac{Q}{3}\\\\right)}(c_X - c_Y + i s_Xs_Y)}{2} \\\\\\\\ \\\\frac{e^{-i\\\\left(Z",
"= field_sample[2]/precision # q = field_sample[3]/precision # cx = math.cos(x) # sx =",
"def spin_calculator(state): \"\"\" Calculates the expected spin projection (Bloch vector) over time for",
"multiply in the inner product. * **right** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (index)) - The",
"operator[1, 1] = 1 @jit_device def set_to_zero(operator): operator[0, 0] = 0 operator[1, 0]",
":func:`numba.roc.jit()` LLVM compiler to compile the integrator to run on an AMD ROCm",
"= ExponentiationMethod.ANALYTIC if integration_method == IntegrationMethod.MAGNUS_CF4: sample_index_max = 3 sample_index_end = 4 elif",
"== IntegrationMethod.MIDPOINT_SAMPLE: sample_index_max = 1 sample_index_end = 1 exponentiation_method_index = exponentiation_method.index if (exponentiation_method",
"+ 1 # utility_set = spin_quantum_number.utility_set if not exponentiation_method: if spin_quantum_number == SpinQuantumNumber.ONE:",
"2] if dimension == 2: rotating_wave /= 2 # For every fine step",
"of z. norm2(z) : :obj:`callable` The 2 norm of a complex vector. ..",
"results @staticmethod @nb.njit def get_state(state_init, state, time_evolution): \"\"\" Use the stepwise time evolution",
":obj:`Device` for all options and more details. exponentiation_method : :obj:`ExponentiationMethod` Which method to",
"\"\"\" Use the :func:`numba.cuda.jit()` LLVM compiler to compile the integrator to run on",
"when running on the GPU target devices :obj:`Device.CUDA` (:obj:`Device.ROC`). Defaults to 64. Modifying",
"really this number is 64). Raising this value allocates more registers (fast memory)",
"spin system, written in terms of the eigenstates of the spin projection operator",
"system over time. Parameters ---------- sweep_parameter : :obj:`float` The input to the `get_field`",
"since the integrator will on average take smaller steps. .. note :: The",
"each time sampled. time_evolution : :class:`numpy.ndarray` of :class:`numpy.complex128` (time_index, bra_state_index, ket_state_index) The evaluated",
"2] = 1 @jit_device def set_to_zero(operator): operator[0, 0] = 0 operator[1, 0] =",
"conj(operator[1, 0]) result[1, 1] = conj(operator[1, 1]) result[2, 1] = conj(operator[1, 2]) result[0,",
":obj:`Device.CPU` otherwise. See :obj:`Device` for all options and more details. get_time_evolution_raw : :obj:`callable`",
":class:`numpy.complex128`, (y_index, x_index)) - The matrix to right multiply by. * **result** (:class:`numpy.ndarray`",
"of a complex vector. .. math:: \\|a + ib\\|_2 = \\\\sqrt {\\\\left(\\\\sum_i a_i^2",
"by calling this method multiple times, each time varying `sweep_parameter`. time_start : :obj:`float`",
"system dynamics. This is not done when this option is set to :obj:`True`",
"math:: \\\\begin{align*} (LR)_{i,k} = \\\\sum_j (L)_{i,j} (R)_{j,k} \\\\end{align*} Parameters: * **left** (:class:`numpy.ndarray` of",
":class:`numpy.complex128`, (y_index, x_index)) - The operator to take the adjoint of. * **result**",
"left[2, 0]*right[0, 2] + left[2, 1]*right[1, 2] + left[2, 2]*right[2, 2] @jit_device def",
"(\"analytic\", 0) \"\"\" Analytic expression of the matrix exponential. For spin half :obj:`SpinQuantumNumber.HALF`",
"Once :math:`T` is calculated, it is then recursively squared :math:`\\\\tau` times to obtain",
"+ time_step_output/2 get_field_jit(time_sample, sweep_parameter, field_sample[0, :]) rotating_wave = field_sample[0, 2] if dimension ==",
"the contents of one matrix into another. .. math:: (A)_{i, j} = (B)_{i,",
":obj:`str` A text label that can be used for archiving. \"\"\" MAGNUS_CF4 =",
"= 1 sample_index_end = 1 exponentiation_method_index = exponentiation_method.index if (exponentiation_method == ExponentiationMethod.ANALYTIC) and",
"& 0 \\\\\\\\ 0 & 0 & 0 \\\\\\\\ 0 & 0 &",
":obj:`float` The sample resolution of the output timeseries for the state. Must be",
"Parameters: * **operator** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The matrix to copy",
"for a CPU or GPU. Defaults to :obj:`Device.CUDA` if the system it is",
"0] = operator[1, 0]*operator[0, 0] + (2 + operator[1, 1])*operator[1, 0] result[0, 1]",
"The times that `state` was evaluated at. * **time_end_points** (:obj:`numpy.ndarray` of :obj:`numpy.float64` (start/end))",
"lab frame, for each time sampled. time_evolution : :class:`numpy.ndarray` of :class:`numpy.complex128` (time_index, bra_state_index,",
"+ state[time_index, 2])/sqrt2).real spin[time_index, 1] = (2j*conj(state[time_index, 1])*(state[time_index, 0] - state[time_index, 2])/sqrt2).real spin[time_index,",
"a roc device function.\\033[0m\\n\") raise time_evolution_coarse = time_evolution_coarse.copy_to_host() time = time.copy_to_host() state =",
"+ state[time_index, 0].imag**2 - state[time_index, 2].real**2 - state[time_index, 2].imag**2 return def spin_calculator(state): \"\"\"",
"\"\"\" class ExponentiationMethod(Enum): \"\"\" The implementation to use for matrix exponentiation within the",
"1] = cisz*cx*cy # result[2, 1] = cisz*(sy - 1j*cy*sx)/sqrt2 # cisz =",
"defined field function must be :func:`numba.cuda.jit()` compilable. See `Supported CUDA Python features`_ for",
"system in the lab frame, for each time sampled. Units of :math:`\\\\hbar`. This",
"= complex_abs self.norm2 = norm2 self.inner = inner self.set_to = set_to self.set_to_one =",
": :obj:`numpy.ndarray` of :obj:`numpy.complex128` (magnetic_quantum_number) The initial quantum state of the spin system,",
"2] @jit_device def matrix_square_residual(operator, result): result[0, 0] = (2 + operator[0, 0])*operator[0, 0]",
"experiment is to finish at. Measured in s. The duration of the experiment",
"used to sweep over dressing frequencies during the simulations that `spinsim` was designed",
"time_step_output : :obj:`float` The sample resolution of the output timeseries for the state.",
"1] @jit_device def set_to_one(operator): operator[0, 0] = 1 operator[1, 0] = 0 operator[0,",
"left[0, 2]*right[2, 0] result[1, 0] = left[1, 0]*right[0, 0] + left[1, 1]*right[1, 0]",
"np.complex128) self.get_state(state_init, state, time_evolution_coarse) results = Results(time, time_evolution_coarse, state, self.spin_calculator) return results @staticmethod",
"1] = sa*eq*ez*ep result[0, 2] = -((Sa*eq/ep)*(Sa*eq/ep)) result[1, 2] = sa*eq*ez/ep result[2, 2]",
"\"\"\" Attributes ---------- spin_quantum_number : :obj:`SpinQuantumNumber` The option to select whether the simulator",
"operator[0, 0])*operator[0, 1] + operator[0, 1]*operator[1, 1] result[1, 1] = operator[1, 0]*operator[0, 1]",
"be a whole number multiple of `time_step_integration`. Measured in s. * **time_evolution_coarse** (:obj:`numpy.ndarray`",
"compilable numpy features. \"\"\" CPU = (\"cpu\", 0) \"\"\" Use the :func:`numba.jit()` LLVM",
"jit_host(template, max_registers): def jit_host(func): return cuda.jit(template, debug = False, max_registers = max_registers)(func) return",
":], complex128[:, :])\") def append_exponentiation(field_sample, time_evolution_fine, time_evolution_coarse): if device_index == 0: time_evolution_old =",
"* **time_sample** (:obj:`float`) - the time to sample the field at, in units",
"mathematics definition is used here rather than the physics definition, so the left",
"&= 2^{-\\\\tau}y,\\\\\\\\ Z &= 2^{-\\\\tau}z,\\\\\\\\ Q &= 2^{-\\\\tau}q,\\\\\\\\ c_{\\\\theta} &= \\\\cos(\\\\theta),\\\\\\\\ s_{\\\\theta} &=",
"the accuracy of the output since the integrator will on average take smaller",
"the Lie Trotter theorem. \"\"\" class Device(Enum): \"\"\" The target device that the",
"field_sample[3]/(6*precision) eq = math.cos(eq) + 1j*math.sin(eq) # Ca = 1 # Sa =",
"field_sample[2, 3] = math.tau*time_step_integration*field_sample[1, 3]/2 append_exponentiation(field_sample[2, :], time_evolution_fine, time_evolution_coarse) get_field_integration = get_field_integration_half_step append_exponentiation_integration",
"value self.index = index if value == \"python\": def jit_host(template, max_registers): def jit_host(func):",
"Parameters: * **operator** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The matrix to set",
"1 if device_index == 0: temporary = np.empty((3, 3), dtype = np.complex128) elif",
"is chosen. threads_per_block : :obj:`int` The size of each thread block (workgroup), in",
"a frame rotating in the z axis by an amount defined by the",
"@jit_device_template(\"(float64[:], float64, complex128)\") def transform_frame_spin_one_rotating(field_sample, rotating_wave, rotating_wave_winding): X = (field_sample[0] + 1j*field_sample[1])/rotating_wave_winding field_sample[0]",
"The results of a an evaluation of the integrator. Attributes ---------- time :",
"value == \"python\": def jit_host(template, max_registers): def jit_host(func): return func return jit_host self.jit_host",
"through bias values, by calling this method multiple times, each time varying `sweep_parameter`.",
"a spin half :obj:`SpinQuantumNumber.HALF`, or spin one :obj:`SpinQuantumNumber.ONE` quantum system. threads_per_block : :obj:`int`",
"= device.jit_device device_index = device.index @jit_device def conj(z): return (z.real - 1j*z.imag) @jit_device",
"ie, don't compile the integrator. \"\"\" CPU_SINGLE = (\"cpu_single\", 0) \"\"\" Use the",
"np.complex128) elif device_index == 2: temporary_group = roc.shared.array((threads_per_block, 2, 2), dtype = np.complex128)",
"+ 1j*math.sin(z - q/3) # result[0, 2] = 0.5*cisz*(cx - cy + 1j*sx*sy)",
"value == \"cuda\": def jit_host(template, max_registers): def jit_host(func): return cuda.jit(template, debug = False,",
"evolution operator in parallel. Compiled for chosen device on object constrution. Parameters: *",
"dtype = np.complex128) time_evolution_fine = time_evolution_fine_group[roc.get_local_id(1), :, :] field_sample_group = roc.shared.array((threads_per_block, sample_index_max, lie_dimension),",
"integration_method = IntegrationMethod.MAGNUS_CF4, trotter_cutoff = 32, threads_per_block = 64, max_registers = 63): \"\"\"",
"+ left[1, 1]*right[1, 0] result[0, 1] = left[0, 0]*right[0, 1] + left[0, 1]*right[1,",
"be used for archiving. \"\"\" MAGNUS_CF4 = \"magnus_cf4\" \"\"\" Commutator free, fourth order",
"3] append_exponentiation(field_sample[0, :], time_evolution_fine, time_evolution_coarse) get_field_integration = get_field_integration_midpoint append_exponentiation_integration = append_exponentiation_integration_midpoint @jit_device_template(\"(int64, float64[:],",
"\\\\exp(-i(2^{-\\\\tau} y) J_y) \\\\exp(-i(2^{-\\\\tau} z J_z + (2^{-\\\\tau} q) J_q)))^{2^\\\\tau}\\\\\\\\ &= \\\\begin{pmatrix} \\\\frac{e^{-i\\\\left(Z",
"(and q for spin one) respectively, as described above. * **result** (:class:`numpy.ndarray` of",
"@jit_device def cross(left, right, result): result[0] = conj(left[1]*right[2] - left[2]*right[1]) result[1] = conj(left[2]*right[0]",
"= cuda.local.array((3, 3), dtype = np.complex128) elif device_index == 2: temporary_group = roc.shared.array((threads_per_block,",
"in parallel time_index = roc.get_global_id(1) if time_index < time_coarse.size: get_time_evolution_loop(time_index, time_coarse, time_step_output, time_step_integration,",
"= jit_host def jit_device(func): return cuda.jit(device = True, inline = True)(func) self.jit_device =",
"= Sa*ep result[0, 1] = Sa/ep result[1, 1] = Ca*ez - 1 if",
"added to the number specified for control, so really this number is 64).",
"0 operator[0, 1] = 0 operator[1, 1] = 1 operator[2, 1] = 0",
"The inner (maths convention dot) product between two complex vectors. .. note:: The",
"execution speed for a specific GPU model. Defaults to 63 (optimal for GTX1070,",
"0 operator[0, 1] = 0 operator[1, 1] = 0 operator[2, 1] = 0",
"rotating_wave_winding): transform_frame(field_sample[0, :], rotating_wave, rotating_wave_winding[0]) field_sample[0, 0] = math.tau*time_step_integration*field_sample[0, 0] field_sample[0, 1] =",
"return nb.njit(template)(func) return jit_device_template self.jit_device_template = jit_device_template elif value == \"cuda\": def jit_host(template,",
"Conjugate of a complex number. .. math:: \\\\begin{align*} (a + ib)^* &= a",
"rotating_wave_winding = cuda.local.array(sample_index_end, dtype = np.complex128) elif device_index == 2: time_evolution_fine_group = roc.shared.array((threads_per_block,",
"Parameters ---------- state : :obj:`numpy.ndarray` of :obj:`numpy.complex128` (time_index, magnetic_quantum_number) The quantum state of",
"2] = (2 + operator[0, 0])*operator[0, 2] + operator[0, 1]*operator[1, 2] + operator[0,",
"varying `sweep_parameter`. * **time_coarse** (:obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index)) - The times that `state`",
".. math:: \\\\begin{align*} X &= \\\\frac{1}{2}2^{-\\\\tau}x,\\\\\\\\ Y &= \\\\frac{1}{2}2^{-\\\\tau}y,\\\\\\\\ Z &= \\\\frac{1}{2}2^{-\\\\tau}z,\\\\\\\\ c_{\\\\theta}",
"(2^{-\\\\tau} q) J_q)))^{2^\\\\tau}\\\\\\\\ &= \\\\begin{pmatrix} \\\\frac{e^{-i\\\\left(Z + \\\\frac{Q}{3}\\\\right)}(c_X + c_Y - i s_Xs_Y)}{2}",
"is set to :obj:`SpinQuantumNumber.ONE`, and defaults to :obj:`ExponentiationMethod.ANALYTIC` when `spin_quantum_number` is set to",
"spin system over time. Parameters ---------- sweep_parameter : :obj:`float` The input to the",
"the device functions (functions compiled for use on the target device) used in",
"rotating_wave_winding): X = (field_sample[0] + 1j*field_sample[1])/rotating_wave_winding field_sample[0] = X.real field_sample[1] = X.imag field_sample[2]",
"operator[1, 1] = 0 @jit_device def matrix_multiply(left, right, result): result[0, 0] = left[0,",
"0 operator[1, 1] = 0 operator[2, 1] = 0 operator[0, 2] = 0",
"time for a given time series of a quantum state. Parameters ---------- state",
"result[2, 0] = conj(operator[0, 2]) result[0, 1] = conj(operator[1, 0]) result[1, 1] =",
"(y_index, x_index)) - The values of x, y and z respectively, as described",
"(:obj:`numpy.ndarray` of :obj:`numpy.float64` (start/end)) - The time offset that the experiment is to",
"device_index == 2: # Run calculation for each coarse timestep in parallel time_index",
"= math.tau*time_step_integration*field_sample[1, 2]/2 if dimension > 2: field_sample[2, 3] = math.tau*time_step_integration*field_sample[1, 3]/2 append_exponentiation(field_sample[2,",
"for all options and more details. exponentiation_method : :obj:`ExponentiationMethod` Which method to use",
"magnetic_quantum_number) The evaluated quantum state of the spin system over time, written in",
"roc.shared.array((threads_per_block, 2, 2), dtype = np.complex128) temporary = temporary_group[roc.get_local_id(1), :, :] for power_index",
"z. norm2(z) : :obj:`callable` The 2 norm of a complex vector. .. math::",
"== IntegrationMethod.HALF_STEP: @jit_device_template(\"(float64, float64, float64, float64, float64[:, :], float64, complex128[:])\") def get_field_integration_half_step(sweep_parameter, time_fine,",
"over time. \"\"\" def __init__(self, time, time_evolution, state, spin_calculator): \"\"\" Parameters ---------- time",
"def jit_device_template(template): def jit_device_template(func): return cuda.jit(template, device = True, inline = True)(func) return",
"= temporary_group[roc.get_local_id(1), :, :] for power_index in range(hyper_cube_amount): matrix_square_residual(result, temporary) matrix_square_residual(temporary, result) result[0,",
"class Device(Enum): \"\"\" The target device that the integrator is being compiled for.",
"== SpinQuantumNumber.HALF: @jit_device def norm2(z): return math.sqrt(z[0].real**2 + z[0].imag**2 + z[1].real**2 + z[1].imag**2)",
"is always added to the number specified for control, so really this number",
"# precision = 4**hyper_cube_amount # x = field_sample[0]/precision # y = field_sample[1]/precision #",
"1] /= rotating_wave_winding[0] if dimension > 2: time_evolution_coarse[time_index, 0, 2] /= rotating_wave_winding[0] time_evolution_coarse[time_index,",
"left[0, 0]*right[0, 1] + left[0, 1]*right[1, 1] + left[0, 2]*right[2, 1] result[1, 1]",
"operator[1, 1])*operator[1, 1] + operator[1, 2]*operator[2, 1] result[2, 1] = operator[2, 0]*operator[0, 1]",
"on is Nvidia Cuda compatible, and defaults to :obj:`Device.CPU` otherwise. See :obj:`Device` for",
":])\", max_registers) def get_time_evolution(sweep_parameter, time_coarse, time_end_points, time_step_integration, time_step_output, time_evolution_coarse): \"\"\" Find the stepwise",
"be used. Thus, there will be an optimal value of `max_registers` for each",
"= transform_frame_spin_half_rotating else: @jit_device_template(\"(float64[:], float64, complex128)\") def transform_frame_lab(field_sample, rotating_wave, rotating_wave_winding): return transform_frame =",
"Which method to use for matrix exponentiation in the integration algorithm. Defaults to",
"of s. time_step_integration : :obj:`float` The time step used within the integration algorithm.",
"z J_z + (2^{-\\\\tau} q) J_q)))^{2^\\\\tau}\\\\\\\\ &= \\\\begin{pmatrix} \\\\frac{e^{-i\\\\left(Z + \\\\frac{Q}{3}\\\\right)}(c_X + c_Y",
"SpinQuantumNumber.HALF: @jit_device def norm2(z): return math.sqrt(z[0].real**2 + z[0].imag**2 + z[1].real**2 + z[1].imag**2) @jit_device",
"whole GPU, for each specific GPU model. This means that if more registers",
"devices :obj:`Device.CUDA` (:obj:`Device.ROC`). Defaults to 64. Modifying might be able to increase execution",
"i & 0 & -i \\\\\\\\ 0 & i & 0 \\\\end{pmatrix},\\\\\\\\ J_z",
"The operator to take the adjoint of. * **result** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index,",
"* **z** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (index)) - The vector to take the 2",
"into a device function.\\033[0m\\n\") raise def compile_time_evolver(self, get_field, spin_quantum_number, device, use_rotating_frame = True,",
"**left** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The matrix to left multiply by.",
"calling this method multiple times, each time varying `sweep_parameter`. time_start : :obj:`float` The",
":obj:`Device` for all options and more details. threads_per_block : :obj:`int` The size of",
"1 operator[1, 0] = 0 operator[0, 1] = 0 operator[1, 1] = 1",
"use an empty :class:`numpy.ndarray` with :func:`numpy.empty()`, or declare a :class:`numpy.ndarray` using :func:`numba.cuda.device_array_like()`. time_end_points",
"define `get_field()` with field functions that use the rotating wave approximation in the",
"0 \\\\\\\\ 0 & -1 \\\\end{pmatrix} \\\\end{align*} Then the exponential can be approximated",
"result[2, 2] = (Ca*ez/eq)*(Ca*ez/eq) - 1 if device_index == 0: temporary = np.empty((3,",
"math.tau*time_step_integration*field_sample[1, 1]/2 field_sample[2, 2] = math.tau*time_step_integration*field_sample[1, 2]/2 if dimension > 2: field_sample[2, 3]",
"Run calculation for each coarse timestep in parallel time_index = cuda.grid(1) if time_index",
"whether the integrator is compiled for a CPU or GPU. Defaults to :obj:`Device.CUDA`",
"&= \\\\exp(-ix J_x - iy J_y - iz J_z - iq J_q)\\\\\\\\ &=",
"large :math:`\\\\tau`, .. math:: \\\\begin{align*} \\\\exp(A) &= \\\\exp(-ix J_x - iy J_y -",
"1 # sa = -1j*a/sqrt2 # ez = field_sample[2]/(2*precision) # ez = 1",
"to :obj:`ExponentiationMethod.LIE_TROTTER` when `spin_quantum_number` is set to :obj:`SpinQuantumNumber.ONE`, and defaults to :obj:`ExponentiationMethod.ANALYTIC` when",
"state[time_index, 0].imag**2 - state[time_index, 2].real**2 - state[time_index, 2].imag**2 return def spin_calculator(state): \"\"\" Calculates",
".. math:: \\\\begin{align*} |a + ib| &= \\\\sqrt{a^2 + b^2}\\\\\\\\ a, b &\\\\in",
"is an imaginary linear combination of :math:`\\\\mathfrak{su}(2)`, being, .. math:: \\\\begin{align*} A &=",
"inner product of l and r. set_to(operator, result) : :obj:`callable` Copy the contents",
"time step. Measured in s. time_step_output : :obj:`float` The sample resolution of the",
"Returns ------- results : :obj:`Results` An object containing the results of the simulation.",
"conj(operator[0, 0]) result[1, 0] = conj(operator[0, 1]) result[2, 0] = conj(operator[0, 2]) result[0,",
"or declare a :class:`numpy.ndarray` using :func:`numba.cuda.device_array_like()`. \"\"\" if device_index == 0: for time_index",
"J_z + q J_q), \\\\end{align*} with .. math:: \\\\begin{align*} J_x &= \\\\frac{1}{\\\\sqrt{2}}\\\\begin{pmatrix} 0",
"0] = math.tau*time_step_integration*field_sample[1, 0]/2 field_sample[2, 1] = math.tau*time_step_integration*field_sample[1, 1]/2 field_sample[2, 2] = math.tau*time_step_integration*field_sample[1,",
"time_evolution_coarse) except: print(\"\\033[31mspinsim error: numba.roc could not jit get_field function into a roc",
"+ 1j*x)*s result[1, 1] = c + 1j*z*s else: result[0, 0] = 1",
"sa*eq*ep/ez result[2, 0] = -((Sa*ep/eq)*(Sa*ep/eq)) result[0, 1] = sa*eq/(ez*ep) result[1, 1] = ca*(eq*eq*eq*eq)",
"0].imag**2 - state[time_index, 2].real**2 - state[time_index, 2].imag**2 elif device_index > 0: if device_index",
"system at the start of the simulation. state : :class:`numpy.ndarray` of :class:`numpy.complex128` (time_index,",
"0] = 0 operator[0, 1] = 0 operator[1, 1] = 0 operator[2, 1]",
"sweep_parameter, field_sample[0, :]) rotating_wave = field_sample[0, 2] if dimension == 2: rotating_wave /=",
"threads vs faster running threads, and changing this value could increase performance for",
"option, the user defined field function must be :func:`numba.cuda.jit()` compilable. See `Supported CUDA",
"so the left vector is conjugated. Thus the inner product of two orthogonal",
"&= 2^{-\\\\tau}z,\\\\\\\\ Q &= 2^{-\\\\tau}q,\\\\\\\\ c_{\\\\theta} &= \\\\cos(\\\\theta),\\\\\\\\ s_{\\\\theta} &= \\\\sin(\\\\theta). \\\\end{align*} Once",
"sqrt3 = math.sqrt(3) machine_epsilon = np.finfo(np.float64).eps*1000 class Utilities: \"\"\" A on object that",
"using the JITed :obj:`callable` `spin_calculator`. spin_calculator : :obj:`callable` Calculates the expected spin projection",
"time_evolution_fine, time_evolution_coarse) field_sample[2, 0] = math.tau*time_step_integration*field_sample[1, 0]/2 field_sample[2, 1] = math.tau*time_step_integration*field_sample[1, 1]/2 field_sample[2,",
"of :obj:`numpy.float64` (time_index, spatial_direction) The expected spin projection (Bloch vector) over time. \"\"\"",
"@jit_device_template(\"(float64, float64, float64, float64, float64[:, :], float64, complex128[:])\") def get_field_integration_magnus_cf4(sweep_parameter, time_fine, time_coarse, time_step_integration,",
"- The quantum state of the spin system over time, written in terms",
"get_field, spin_quantum_number, device, use_rotating_frame = True, integration_method = IntegrationMethod.MAGNUS_CF4, exponentiation_method = None, trotter_cutoff:int",
"= False, max_registers = max_registers)(func) return jit_host self.jit_host = jit_host def jit_device(func): return",
"for chosen device on object constrution. Parameters: * **state** (:obj:`numpy.ndarray` of :obj:`numpy.complex128` (time_index,",
"!= SpinQuantumNumber.HALF): print(\"\\033[31mspinsim warning!!!\\n_attempting to use an analytic exponentiation method outside of spin",
"time to sample the field at, in units of s. * **simulation_index** (:obj:`int`)",
"the spin system over time, written in terms of the eigenstates of the",
"GPU models. \"\"\" jit_device = device.jit_device device_index = device.index @jit_device def conj(z): return",
"**sweep_parameter** (:obj:`float`) - The input to the `get_field` function supplied by the user.",
"elif device_index == 1: # Run calculation for each coarse timestep in parallel",
"\\\\langle F\\\\rangle(t) = \\\\begin{pmatrix} \\\\Re(\\\\psi_{+\\\\frac{1}{2}}(t)\\\\psi_{-\\\\frac{1}{2}}(t)^*)\\\\\\\\ -\\\\Im(\\\\psi_{+\\\\frac{1}{2}}(t)\\\\psi_{-\\\\frac{1}{2}}(t)^*)\\\\\\\\ \\\\frac{1}{2}(|\\\\psi_{+\\\\frac{1}{2}}(t)|^2 - |\\\\psi_{-\\\\frac{1}{2}}(t)|^2) \\\\end{pmatrix} \\\\end{align*} For spin",
"(\"python\", 0) \"\"\" Use pure python interpreted code for the integrator, ie, don't",
"1 sample_index_end = 1 exponentiation_method_index = exponentiation_method.index if (exponentiation_method == ExponentiationMethod.ANALYTIC) and (spin_quantum_number",
"np.empty((time_index_max, self.spin_quantum_number.dimension), np.complex128) self.get_state(state_init, state, time_evolution_coarse) results = Results(time, time_evolution_coarse, state, self.spin_calculator) return",
"integrator to run on a single CPU core. .. note :: To use",
"function for evaluating the time evolution operator in parallel. Compiled for chosen device",
"as nb from numba import cuda from numba import roc import math sqrt2",
"0] result[1, 0] = operator[1, 0] result[2, 0] = operator[2, 0] result[0, 1]",
".. _Supported CUDA Python features: http://numba.pydata.org/numba-doc/latest/cuda/cudapysupported.html \"\"\" def __init__(self, value, index): super().__init__() self._value_",
"1] = Sa/ep result[1, 1] = Ca*ez - 1 if device_index == 0:",
"1] = ca*(eq*eq*eq*eq) - 1 result[2, 1] = sa*eq*ez*ep result[0, 2] = -((Sa*eq/ep)*(Sa*eq/ep))",
"2, 2] *= rotating_wave_winding[0] else: time_evolution_coarse[time_index, 1, 0] *= rotating_wave_winding[0] time_evolution_coarse[time_index, 1, 1]",
"rotating_wave_winding): transform_frame(field_sample[0, :], rotating_wave, rotating_wave_winding[0]) transform_frame(field_sample[1, :], rotating_wave, rotating_wave_winding[1]) w0 = (1.5 +",
"-((Sa*eq/ep)*(Sa*eq/ep)) result[1, 2] = sa*eq*ez/ep result[2, 2] = (Ca*ez/eq)*(Ca*ez/eq) - 1 if device_index",
"result[0, 2] = 0.5*cisz*(cx - cy + 1j*sx*sy) # result[1, 2] = cisz*(-1j*sx",
"\\\\frac{e^{i\\\\frac{2Q}{3}} (s_Y -i c_Y s_X)}{\\\\sqrt{2}} & \\\\frac{e^{-i\\\\left(-Z + \\\\frac{Q}{3}\\\\right)}(c_X + c_Y + i",
"single CPU core. .. note :: To use this device option, the user",
"time_evolution : :class:`numpy.ndarray` of :class:`numpy.complex128` (time_index, bra_state_index, ket_state_index) The evaluated time evolution operator",
"= 4 elif integration_method == IntegrationMethod.HALF_STEP: sample_index_max = 3 sample_index_end = 4 elif",
"(y_index, x_index)) - The matrix to copy to. set_to_one(operator) : :obj:`callable` Make a",
"functions that use the rotating wave approximation in the rotating frame. integration_method :",
"be modified to increase the execution speed for a specific GPU model. Defaults",
"for power_index in range(hyper_cube_amount): matrix_square_residual(result, temporary) matrix_square_residual(temporary, result) # matrix_multiply(result, result, temporary) #",
"This :obj:`callable` is passed to the :obj:`Results` object returned from :func:`Simulator.evaluate()`, and is",
"2]*right[2, 1] result[1, 1] = left[1, 0]*right[0, 1] + left[1, 1]*right[1, 1] +",
"cuda.jit(template, device = True, inline = True)(func) return jit_device_template self.jit_device_template = jit_device_template elif",
"(matrix) between the current and next timesteps, for each time sampled. See :math:`U(t)`",
"matrix_multiply(temporary, temporary, result) result[0, 0] += 1 result[1, 1] += 1 # @jit_device",
"operator[2, 1]*operator[1, 1] + (2 + operator[2, 2])*operator[2, 1] result[0, 2] = (2",
"\\\\sqrt{x^2 + y^2 + z^2}`. Parameters: * **field_sample** (:class:`numpy.ndarray` of :class:`numpy.float64`, (y_index, x_index))",
"- 2*rotating_wave transform_frame = transform_frame_spin_half_rotating else: @jit_device_template(\"(float64[:], float64, complex128)\") def transform_frame_lab(field_sample, rotating_wave, rotating_wave_winding):",
"get_field_jit = jit_device(get_field) if integration_method == IntegrationMethod.MAGNUS_CF4: @jit_device_template(\"(float64, float64, float64, float64, float64[:, :],",
"= rotating_wave_winding_group[roc.get_local_id(1), :] time_coarse[time_index] = time_end_points[0] + time_step_output*time_index time_fine = time_coarse[time_index] # Initialise",
"&= \\\\cos(\\\\theta),\\\\\\\\ s_{\\\\theta} &= \\\\sin(\\\\theta). \\\\end{align*} Once :math:`T` is calculated, it is then",
"on the Lie Product Formula, .. math:: \\\\exp(A + B) = \\\\lim_{c \\\\to",
"the first three entries being x, y, z spatial directions (to model a",
"conj(left[0]*right[1] - left[1]*right[0]) @jit_device def inner(left, right): return conj(left[0])*right[0] + conj(left[1])*right[1] + conj(left[2])*right[2]",
"time step. Measured in s. * **time_step_output** (:obj:`float`) - The sample resolution of",
"constrution. Parameters: * **sweep_parameter** (:obj:`float`) - The input to the `get_field` function supplied",
"if dimension == 2: rotating_wave /= 2 # For every fine step for",
"= cuda.local.array((2, 2), dtype = np.complex128) elif device_index == 2: temporary_group = roc.shared.array((threads_per_block,",
"0]*conj(state[time_index, 1])).real spin[time_index, 1] = (1j*state[time_index, 0]*conj(state[time_index, 1])).real spin[time_index, 2] = 0.5*(state[time_index, 0].real**2",
"1j*math.sin(math.tau*rotating_wave*time_sample) time_sample += time_coarse get_field_jit(time_sample, sweep_parameter, field_sample[0, :]) @jit_device_template(\"(complex128[:, :], complex128[:, :], float64[:,",
": :obj:`Results` An object containing the results of the simulation. \"\"\" if math.fabs(time_step_output/time_step_integration",
"-(cx*sy + 1j*sx*cy)*cisz # result[1, 1] = (cx*cy + 1j*sx*sy)*cisz # if device_index",
"is being put under. It must have three arguments: * **time_sample** (:obj:`float`) -",
"\"\"\" Calculate each expected spin value in parallel. For spin half: .. math::",
"return self.spin raise AttributeError(\"{} has no attribute called {}.\".format(self, attr_name)) class Simulator: \"\"\"",
"get_time_evolution_raw : :obj:`callable` The internal function for evaluating the time evolution operator in",
"elif device == Device.ROC: spin = roc.device_array((state.shape[0], 3), np.float64) blocks_per_grid = (state.shape[0] +",
"y_index, x_index) The evaluated time evolution operator between each time step. See :ref:`architecture`",
"out of the rotating frame. One can, of course, use :mod:`spinsim` to integrate",
"(2 + operator[2, 2])*operator[2, 0] result[0, 1] = (2 + operator[0, 0])*operator[0, 1]",
"jit_device(func): return func self.jit_device = jit_device def jit_device_template(template): def jit_device_template(func): return func return",
"float64[:], float64[:], float64, float64, complex128[:, :, :])\", max_registers) def get_time_evolution(sweep_parameter, time_coarse, time_end_points, time_step_integration,",
"**operator** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The matrix to set to :math:`1`.",
"time_fine, time_coarse, time_step_integration, field_sample, rotating_wave, rotating_wave_winding): time_sample = time_fine + 0.5*time_step_integration - time_coarse",
"+ b^2}\\\\\\\\ a, b &\\\\in \\\\mathbb{R} \\\\end{align*} Parameters: * **z** (:class:`numpy.complex128`) - The",
"jit_device = device.jit_device jit_device_template = device.jit_device_template device_index = device.index dimension = spin_quantum_number.dimension lie_dimension",
"result[0, 1] = (2 + operator[0, 0])*operator[0, 1] + operator[0, 1]*operator[1, 1] +",
"under. It must have three arguments: * **time_sample** (:obj:`float`) - the time to",
"def jit_device(func): return nb.njit()(func) self.jit_device = jit_device def jit_device_template(template): def jit_device_template(func): return nb.njit(template)(func)",
"field_sample[0, 2] = 0 if use_rotating_frame: time_sample = time_coarse[time_index] + time_step_output/2 get_field_jit(time_sample, sweep_parameter,",
"vector is conjugated. Thus the inner product of two orthogonal vectors is 0.",
"hyper_cube_amount < 0: # hyper_cube_amount = 0 # precision = 4**hyper_cube_amount # x",
"and required, in spin one systems). .. note:: This function must be compilable",
"(:class:`numpy.float64`) - The 2 norm of z. inner(left, right) : :obj:`callable` The inner",
"time_coarse, time_step_integration, field_sample, rotating_wave, rotating_wave_winding): time_sample = time_fine + 0.5*time_step_integration - time_coarse rotating_wave_winding[0]",
"over time. This is calculated just in time using the JITed :obj:`callable` `spin_calculator`.",
"result[1, 1] = 1 @jit_device def matrix_exponential_lie_trotter(field_sample, result, trotter_cutoff): hyper_cube_amount = math.ceil(trotter_cutoff/2) if",
"# result[0, 1] = cisz*(-sy - 1j*cy*sx)/sqrt2 # result[1, 1] = cisz*cx*cy #",
"the device that the integrator is being compiled for. See :class:`Device` for more",
"1] + operator[0, 1]*operator[1, 1] + operator[0, 2]*operator[2, 1] result[1, 1] = operator[1,",
"projection operator in the z direction. Returns: * **spin** (:obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index,",
"result, trotter_cutoff): # hyper_cube_amount = math.ceil(trotter_cutoff/2) # if hyper_cube_amount < 0: # hyper_cube_amount",
"of the device functions (functions compiled for use on the target device) used",
"math.sqrt(z.real**2 + z.imag**2) if spin_quantum_number == SpinQuantumNumber.HALF: @jit_device def norm2(z): return math.sqrt(z[0].real**2 +",
"of the simulator. Parameters ---------- get_field : :obj:`callable` A python function that describes",
"jit_device_template elif value == \"cuda\": def jit_host(template, max_registers): def jit_host(func): return cuda.jit(template, debug",
"use for matrix exponentiation in the integration algorithm. Defaults to :obj:`ExponentiationMethod.LIE_TROTTER` when `spin_quantum_number`",
"np.complex128) field_sample = np.empty((sample_index_max, lie_dimension), dtype = np.float64) rotating_wave_winding = np.empty(sample_index_end, dtype =",
"shift (only appearing, and required, in spin one systems). .. note:: This function",
"frame rotating in the z axis by an amount defined by the field",
"compile the integrator to run on all CPU cores, in parallel. .. note",
"sweep over dressing frequencies during the simulations that `spinsim` was designed for. *",
"time_coarse get_field_jit(time_sample, sweep_parameter, field_sample[0, :]) time_sample = ((time_fine + 0.5*time_step_integration*(1 + 1/sqrt3)) -",
"- The input to the `get_field` function supplied by the user. Modifies the",
"(time_index) The times that `state` was evaluated at. time_evolution : :obj:`numpy.ndarray` of :obj:`numpy.float128`",
"= jit_device_template elif value == \"roc\": def jit_host(template, max_registers): def jit_host(func): return roc.jit(template)(func)",
"the z direction. spin_calculator : :obj:`callable` Calculates the expected spin projection (Bloch vector)",
"math.tau*time_step_integration*field_sample[0, 3] append_exponentiation(field_sample[0, :], time_evolution_fine, time_evolution_coarse) get_field_integration = get_field_integration_midpoint append_exponentiation_integration = append_exponentiation_integration_midpoint @jit_device_template(\"(int64,",
"operator[2, 2])*operator[2, 0] result[0, 1] = (2 + operator[0, 0])*operator[0, 1] + operator[0,",
"otherwise. See :obj:`Device` for all options and more details. threads_per_block : :obj:`int` The",
"use_rotating_frame = True, integration_method = IntegrationMethod.MAGNUS_CF4, exponentiation_method = None, trotter_cutoff:int = 28, threads_per_block",
"0] = cisz*(-1j*sx + cx*sy)/sqrt2 # result[2, 0] = 0.5*cisz*(cx - cy -",
"Compiled for chosen device on object constrution. Parameters: * **sweep_parameter** (:obj:`float`) - The",
"time_evolution_coarse, field_sample, time_step_integration, rotating_wave, rotating_wave_winding): transform_frame(field_sample[0, :], rotating_wave, rotating_wave_winding[0]) field_sample[0, 0] = math.tau*time_step_integration*field_sample[0,",
"spin = self.spin_calculator(self.state) setattr(self, attr_name, spin) return self.spin raise AttributeError(\"{} has no attribute",
"See :obj:`Device` for all options and more details. exponentiation_method : :obj:`ExponentiationMethod` Which method",
"1] result[2, 1] = left[2, 0]*right[0, 1] + left[2, 1]*right[1, 1] + left[2,",
"operator[1, 1])*operator[1, 0] result[0, 1] = (2 + operator[0, 0])*operator[0, 1] + operator[0,",
"start and finishes. In units of s. time_step_integration : :obj:`float` The time step",
"object. Attributes ---------- conj(z) : :obj:`callable` Conjugate of a complex number. .. math::",
"ez = field_sample[2]/(2*precision) # ez = 1 + 1j*ez # eq = field_sample[3]/(6*precision)",
".. math:: \\\\begin{align*} l \\\\cdot r &\\\\equiv \\\\langle l, r \\\\rangle\\\\\\\\ l \\\\cdot",
"(index)) - The vector to take the 2 norm of. Returns * **nz**",
"field_sample[2, 1] = math.tau*time_step_integration*field_sample[0, 1]/2 field_sample[2, 2] = math.tau*time_step_integration*field_sample[0, 2]/2 if dimension >",
"to be run. For example, it is used to sweep over dressing frequencies",
":], complex128[:, :], float64[:, :], float64, float64, complex128[:])\") def append_exponentiation_integration_midpoint(time_evolution_fine, time_evolution_coarse, field_sample, time_step_integration,",
":obj:`str` A text label that can be used for archiving. index : :obj:`int`",
"of :class:`numpy.complex128`, (y_index, x_index)) - The matrix to set to :math:`0`. matrix_multiply(left, right,",
".. math:: \\\\begin{align*} \\\\exp(A) &= \\\\exp(-ix J_x - iy J_y - iz J_z",
"0] = conj(operator[0, 0]) result[1, 0] = conj(operator[0, 1]) result[2, 0] = conj(operator[0,",
"projection operator in the z direction. spin : :obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index, spatial_direction)",
"IntegrationMethod.MAGNUS_CF4: sample_index_max = 3 sample_index_end = 4 elif integration_method == IntegrationMethod.HALF_STEP: sample_index_max =",
"Parameters ---------- sweep_parameter : :obj:`float` The input to the `get_field` function supplied by",
"field from the end of the time step. The equivalent of the trapezoidal",
"(A^\\\\dagger)_{y,x} &= ((A)_{x,y})^* \\\\end{align*} Matrix can be in :math:`\\\\mathbb{C}^{2\\\\times2}` or :math:`\\\\mathbb{C}^{3\\\\times3}`. Parameters: *",
"CPU cores, in parallel. .. note :: To use this device option, the",
"(maths convention dot) product between two complex vectors. .. note:: The mathematics definition",
"each thread, meaning slower memory must be used. Thus, there will be an",
":class:`numpy.complex128`, (y_index, x_index)) - A matrix to be filled with the result of",
"utilities = Utilities(spin_quantum_number, device, threads_per_block) conj = utilities.conj complex_abs = utilities.complex_abs norm2 =",
"field_sample, rotating_wave, rotating_wave_winding): time_sample = ((time_fine + 0.5*time_step_integration*(1 - 1/sqrt3)) - time_coarse) rotating_wave_winding[0]",
"field_sample[0, :]) time_sample = ((time_fine + 0.5*time_step_integration*(1 + 1/sqrt3)) - time_coarse) rotating_wave_winding[1] =",
"time step. See :ref:`architecture` for some information. \"\"\" for time_index in range(state.shape[0]): #",
"0: # temporary = np.empty((3, 3), dtype = np.complex128) # elif device_index ==",
"* **time_evolution_coarse** (:obj:`numpy.ndarray` of :obj:`numpy.float128` (time_index, y_index, x_index)) - The evaluated time evolution",
"enums cannot be interpreted. \"\"\" def __init__(self, value, index): super().__init__() self._value_ = value",
"math.sqrt(x**2 + y**2 + z**2) if r > 0: x /= r y",
"elif device_index == 2: # Run calculation for each coarse timestep in parallel",
"= sa*eq/(ez*ep) result[1, 1] = ca*(eq*eq*eq*eq) - 1 result[2, 1] = sa*eq*ez*ep result[0,",
"number of registers allocated per thread when using :obj:`Device.CUDA` as the target device,",
"sampled. See :math:`\\\\psi(t)` in :ref:`overview_of_simulation_method`. spin : :class:`numpy.ndarray` of :class:`numpy.float64` (time_index, spatial_index) The",
"The matrix to copy to. set_to_one(operator) : :obj:`callable` Make a matrix the multiplicative",
"be able to increase execution time for different GPU models. max_registers : :obj:`int`",
"r_i \\\\end{align*} Parameters: * **left** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (index)) - The vector to",
"for compilable python features, and `Supported Numpy features`_ for compilable numpy features. \"\"\"",
"could not jit get_field function into a roc device function.\\033[0m\\n\") raise time_evolution_coarse =",
"i s_Xs_Y)}{2} & \\\\frac{e^{i\\\\frac{2Q}{3}} (s_Y -i c_Y s_X)}{\\\\sqrt{2}} & \\\\frac{e^{-i\\\\left(-Z + \\\\frac{Q}{3}\\\\right)}(c_X +",
"# result[1, 0] = cisz*(-1j*sx + cx*sy)/sqrt2 # result[2, 0] = 0.5*cisz*(cx -",
"on object constrution. Parameters: * **state** (:obj:`numpy.ndarray` of :obj:`numpy.complex128` (time_index, magnetic_quantum_number)) - The",
"attr_name): if attr_name == \"spin\": spin = self.spin_calculator(self.state) setattr(self, attr_name, spin) return self.spin",
"np.float64) state_init = np.asarray(state_init, np.complex128) time_index_max = int((time_end_points[1] - time_end_points[0])/time_step_output) if self.device.index ==",
"use with spin half systems. Will not work with spin one systems. Assumes",
"* **left** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (index)) - The vector to left multiply in",
"= np.complex128) elif device_index == 1: temporary = cuda.local.array((3, 3), dtype = np.complex128)",
"64). Raising this value allocates more registers (fast memory) to each thread, out",
"sweep_parameter) return @jit_host(\"(complex128[:, :], float64[:, :])\", max_registers = max_registers) def get_spin(state, spin): \"\"\"",
"the target device, and can be modified to increase the execution speed for",
"0 & 0 & -1 \\\\end{pmatrix},& J_q &= \\\\frac{1}{3}\\\\begin{pmatrix} 1 & 0 &",
"is 64). Raising this value allocates more registers (fast memory) to each thread,",
"# elif device_index == 2: # temporary_group = roc.shared.array((threads_per_block, 3, 3), dtype =",
"utilities.set_to_zero matrix_multiply = utilities.matrix_multiply adjoint = utilities.adjoint matrix_exponential_analytic = utilities.matrix_exponential_analytic matrix_exponential_lie_trotter = utilities.matrix_exponential_lie_trotter",
"exponentiation_method_index = 1 @jit_device_template(\"(float64[:], complex128[:, :], complex128[:, :])\") def append_exponentiation(field_sample, time_evolution_fine, time_evolution_coarse): if",
"- The values of x, y and z (and q for spin one)",
"of :class:`numpy.complex128`, (y_index, x_index)) - The matrix to copy from. * **result** (:class:`numpy.ndarray`",
"quantum state timeseries of the 3 level atom. Parameters ---------- state_init : :class:`numpy.ndarray`",
"time_evolution_fine, time_evolution_coarse) field_sample[2, 0] = math.tau*time_step_integration*(w1*field_sample[0, 0] + w0*field_sample[1, 0]) field_sample[2, 1] =",
"max_registers = 63): \"\"\" .. _Achieved Occupancy: https://docs.nvidia.com/gameworks/content/developertools/desktop/analysis/report/cudaexperiments/kernellevel/achievedoccupancy.htm Parameters ---------- get_field : :obj:`callable`",
"field_sample[1] = X.imag field_sample[2] = field_sample[2] - rotating_wave transform_frame = transform_frame_spin_one_rotating else: @jit_device_template(\"(float64[:],",
"get_field function into a device function.\\033[0m\\n\") raise def compile_time_evolver(self, get_field, spin_quantum_number, device, use_rotating_frame",
"The times that `state` was evaluated at. time_evolution : :obj:`numpy.ndarray` of :obj:`numpy.float128` (time_index,",
"targeted for integration. That is, whether the integrator is compiled for a CPU",
"3]) append_exponentiation(field_sample[2, :], time_evolution_fine, time_evolution_coarse) field_sample[2, 0] = math.tau*time_step_integration*(w1*field_sample[0, 0] + w0*field_sample[1, 0])",
"2]*right[2, 1] result[0, 2] = left[0, 0]*right[0, 2] + left[0, 1]*right[1, 2] +",
"power_index in range(hyper_cube_amount): matrix_square_residual(result, temporary) matrix_square_residual(temporary, result) result[0, 0] += 1 result[1, 1]",
"balancing more threads vs faster running threads, and changing this value could increase",
"if dimension == 2: spin[time_index, 0] = (state[time_index, 0]*conj(state[time_index, 1])).real spin[time_index, 1] =",
"GPU. Defaults to :obj:`Device.CUDA` if the system it is being run on is",
"= 28, threads_per_block = 64, max_registers = 63): \"\"\" Compiles the integrator and",
"---------- sweep_parameter : :obj:`float` time_coarse : :class:`numpy.ndarray` of :class:`numpy.float64` (time_index) A coarse grained",
"= math.sqrt(2) sqrt3 = math.sqrt(3) class SpinQuantumNumber(Enum): \"\"\" Options for the spin quantum",
"result[0, 0] = operator[0, 0] result[1, 0] = operator[1, 0] result[0, 1] =",
"time. \"\"\" def __init__(self, time, time_evolution, state, spin_calculator): \"\"\" Parameters ---------- time :",
"features. \"\"\" CUDA = (\"cuda\", 1) \"\"\" Use the :func:`numba.cuda.jit()` LLVM compiler to",
"of the spin system, written in terms of the eigenstates of the spin",
"Integration method from AtomicPy. Makes two Euler integration steps, one sampling the field",
"- is_Xs_Y) e^{-iZ} & -(c_Xs_Y + is_Xc_Y) e^{iZ} \\\\\\\\ (c_Xs_Y - is_Xc_Y) e^{-iZ}",
"float64, float64, float64, float64[:, :], float64, complex128[:])\") def get_field_integration_midpoint(sweep_parameter, time_fine, time_coarse, time_step_integration, field_sample,",
"ONE = (1, 3, \"one\") \"\"\" For three level systems. \"\"\" class IntegrationMethod(Enum):",
"jit_device_template self.jit_device_template = jit_device_template PYTHON = (\"python\", 0) \"\"\" Use pure python interpreted",
"+ operator[1, 1])*operator[1, 0] result[0, 1] = (2 + operator[0, 0])*operator[0, 1] +",
"- 1)) // threads_per_block get_spin[blocks_per_grid, threads_per_block](cuda.to_device(state), spin) spin = spin.copy_to_host() elif device ==",
"is then recursively squared :math:`\\\\tau` times to obtain :math:`\\\\exp(A)`. Parameters: * **field_sample** (:class:`numpy.ndarray`",
"0] + operator[0, 2]*operator[2, 0] result[1, 0] = operator[1, 0]*operator[0, 0] + (2",
"2].imag**2 return def spin_calculator(state): \"\"\" Calculates the expected spin projection (Bloch vector) over",
"of :class:`numpy.float64` (time_index, spatial_index) The expected value for hyperfine spin of the spin",
"time_end_points, time_evolution_coarse, sweep_parameter): # Declare variables if device_index == 0: time_evolution_fine = np.empty((dimension,",
"* **nz** (:class:`numpy.float64`) - The 2 norm of z. inner(left, right) : :obj:`callable`",
"= True)(func) return jit_device_template self.jit_device_template = jit_device_template PYTHON = (\"python\", 0) \"\"\" Use",
"= time_fine + 0.5*time_step_integration - time_coarse rotating_wave_winding[0] = math.cos(math.tau*rotating_wave*time_sample) + 1j*math.sin(math.tau*rotating_wave*time_sample) time_sample +=",
"\"\"\" \"\"\" # from . import utilities from enum import Enum import numpy",
"wave approximation: just define `get_field()` with field functions that use the rotating wave",
"combination of :math:`\\\\mathfrak{su}(2)`, being, .. math:: \\\\begin{align*} A &= -i(x J_x + y",
"rotating_wave_winding[0]) transform_frame(field_sample[1, :], rotating_wave, rotating_wave_winding[1]) field_sample[2, 0] = math.tau*time_step_integration*field_sample[0, 0]/2 field_sample[2, 1] =",
"more threads vs faster running threads, and changing this value could increase performance",
"= 4**hyper_cube_amount # x = field_sample[0]/precision # y = field_sample[1]/precision # z =",
"= ExponentiationMethod.LIE_TROTTER elif spin_quantum_number == SpinQuantumNumber.HALF: exponentiation_method = ExponentiationMethod.ANALYTIC if integration_method == IntegrationMethod.MAGNUS_CF4:",
"that `state` was evaluated at. * **time_end_points** (:obj:`numpy.ndarray` of :obj:`numpy.float64` (start/end)) - The",
"increases the accuracy of the output since the integrator will on average take",
"execution time for different GPU models. max_registers : :obj:`int` The maximum number of",
"-1j*a/sqrt2 # ez = field_sample[2]/(2*precision) # ez = 1 + 1j*ez # eq",
"numpy features. \"\"\" CPU = (\"cpu\", 0) \"\"\" Use the :func:`numba.jit()` LLVM compiler",
"- The operator to take the adjoint of. * **result** (:class:`numpy.ndarray` of :class:`numpy.complex128`,",
"np.empty((dimension, dimension), dtype = np.complex128) field_sample = np.empty((sample_index_max, lie_dimension), dtype = np.float64) rotating_wave_winding",
"(y_index, x_index)) - The operator to take the adjoint of. * **result** (:class:`numpy.ndarray`",
"time_evolution_coarse) field_sample[2, 0] = math.tau*time_step_integration*(w1*field_sample[0, 0] + w0*field_sample[1, 0]) field_sample[2, 1] = math.tau*time_step_integration*(w1*field_sample[0,",
"|\\\\psi_{+1}(t)|^2 - |\\\\psi_{-1}(t)|^2 \\\\end{pmatrix} \\\\end{align*} Parameters ---------- state : :class:`numpy.ndarray` of :class:`numpy.complex128` (time_index,",
"= math.cos(y) # sy = math.sin(y) # cisz = math.cos(z + q/3) -",
"the rotating frame. One can, of course, use :mod:`spinsim` to integrate states in",
"None, trotter_cutoff:int = 28, threads_per_block = 64, max_registers = 63): \"\"\" Compiles the",
"Returns * **d** (:class:`numpy.complex128`) - The inner product of l and r. set_to(operator,",
"enum import Enum import numpy as np import numba as nb from numba",
"# matrix_multiply(result, result, temporary) # matrix_multiply(temporary, temporary, result) self.conj = conj self.complex_abs =",
"spin = spin.copy_to_host() return spin self.get_time_evolution_raw = get_time_evolution self.spin_calculator = spin_calculator def evaluate(self,",
"for many experiments, without the need for slow recompilation. For example, if the",
"the multiplicative identity, ie, :math:`1`. .. math:: \\\\begin{align*} (A)_{i, j} &= \\\\delta_{i, j}\\\\\\\\",
"features: http://numba.pydata.org/numba-doc/latest/reference/numpysupported.html .. _Supported CUDA Python features: http://numba.pydata.org/numba-doc/latest/cuda/cudapysupported.html \"\"\" def __init__(self, value, index):",
"2] = math.tau*time_step_integration*(w0*field_sample[0, 2] + w1*field_sample[1, 2]) if dimension > 2: field_sample[2, 3]",
"of :class:`numpy.complex128`, (y_index, x_index)) - The matrix to copy to. set_to_one(operator) : :obj:`callable`",
"operator[0, 1] = 0 operator[1, 1] = 1 operator[2, 1] = 0 operator[0,",
"0] result[1, 0] = operator[1, 0] result[0, 1] = operator[0, 1] result[1, 1]",
"+ 1j*eq result[0, 0] = (Ca/(eq*ez))*(Ca/(eq*ez)) - 1 result[1, 0] = sa*eq*ep/ez result[2,",
"# result[0, 1] = -(cx*sy + 1j*sx*cy)*cisz # result[1, 1] = (cx*cy +",
"time_index = cuda.grid(1) if time_index < time_coarse.size: get_time_evolution_loop(time_index, time_coarse, time_step_output, time_step_integration, time_end_points, time_evolution_coarse,",
"inactive, and the GPU is said to have less occupancy. Lowering the value",
"result[2, 2] = 0.5*cisz*(cx + cy + 1j*sx*sy) # if device_index == 0:",
"to :obj:`True` - no such approximations are made, and the output state in",
"the simulation. \"\"\" if math.fabs(time_step_output/time_step_integration - round(time_step_output/time_step_integration)) > 1e-6: print(f\"\\033[33mspinsim warning: time_step_output not",
"in s. * **time_evolution_coarse** (:obj:`numpy.ndarray` of :obj:`numpy.float128` (time_index, y_index, x_index)) - The evaluated",
"model of GPU running :mod:`spinsim`, balancing more threads vs faster running threads, and",
"of a complex number. .. math:: \\\\begin{align*} (a + ib)^* &= a -",
"result): result[0, 0] = operator[0, 0] result[1, 0] = operator[1, 0] result[0, 1]",
"- state[time_index, 2])/sqrt2).real spin[time_index, 2] = state[time_index, 0].real**2 + state[time_index, 0].imag**2 - state[time_index,",
"z) J_z)^{2^\\\\tau}\\\\\\\\ &= \\\\begin{pmatrix} (c_Xc_Y - is_Xs_Y) e^{-iZ} & -(c_Xs_Y + is_Xc_Y) e^{iZ}",
"cx*sy)/sqrt2 # result[2, 0] = 0.5*cisz*(cx - cy - 1j*sx*sy) # cisz =",
":], float64)\") def get_time_evolution_loop(time_index, time_coarse, time_step_output, time_step_integration, time_end_points, time_evolution_coarse, sweep_parameter): # Declare variables",
"= state[time_index, 0].real**2 + state[time_index, 0].imag**2 - state[time_index, 2].real**2 - state[time_index, 2].imag**2 elif",
"spin quantum number of a system. Parameters ---------- value : :obj:`float` The numerical",
"on construction of the object. Attributes ---------- conj(z) : :obj:`callable` Conjugate of a",
"for some information. spin_calculator : :obj:`callable` Calculates the expected spin projection (Bloch vector)",
"0]*operator[0, 1] + (2 + operator[1, 1])*operator[1, 1] @jit_device def adjoint(operator, result): result[0,",
"might be able to increase execution time for different GPU models. max_registers :",
"= 0 operator[1, 1] = 0 operator[2, 1] = 0 operator[0, 2] =",
"from numba import roc import math sqrt2 = math.sqrt(2) sqrt3 = math.sqrt(3) class",
"self.set_to_zero = set_to_zero self.matrix_multiply = matrix_multiply self.adjoint = adjoint self.matrix_exponential_analytic = matrix_exponential_analytic self.matrix_exponential_lie_trotter",
"= cisz*(-sy - 1j*cy*sx)/sqrt2 # result[1, 1] = cisz*cx*cy # result[2, 1] =",
"temporary_group = roc.shared.array((threads_per_block, 2, 2), dtype = np.complex128) # temporary = temporary_group[roc.get_local_id(1), :,",
"rotating_wave, rotating_wave_winding[0]) transform_frame(field_sample[1, :], rotating_wave, rotating_wave_winding[1]) w0 = (1.5 + sqrt3)/6 w1 =",
"the output timeseries for the state. Must be a whole number multiple of",
"calculate `spin` the first time it is referenced by the user. Parameters: *",
"The conjugate of z. complex_abs(z) : :obj:`callable` The absolute value of a complex",
"one) respectively, as described above. * **result** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) -",
"Time evolution operator (matrix) between the current and next timesteps, for each time",
":obj:`int` The size of each thread block (workgroup), in terms of the number",
"result[1, 0] = operator[1, 0] result[0, 1] = operator[0, 1] result[1, 1] =",
"**time_step_output** (:obj:`float`) - The sample resolution of the output timeseries for the state.",
"(time_index, bra_state_index, ket_state_index) The evaluated time evolution operator between each time step. See",
"matrix exponential. For spin half :obj:`SpinQuantumNumber.HALF` systems only. \"\"\" LIE_TROTTER = (\"lie_trotter\", 1)",
"adjoint(operator, result): result[0, 0] = conj(operator[0, 0]) result[1, 0] = conj(operator[0, 1]) result[0,",
"else: state[time_index, x_index] += state_init[x_index] sqrt2 = math.sqrt(2) sqrt3 = math.sqrt(3) machine_epsilon =",
"\"\"\" if device.index == 0: spin = np.empty((state.shape[0], 3), np.float64) get_spin(state, spin) elif",
"vector) over time for a given time series of a quantum state. Used",
"class Simulator: \"\"\" Attributes ---------- spin_quantum_number : :obj:`SpinQuantumNumber` The option to select whether",
"The time step used within the integration algorithm. In units of s. time_step_output",
"= time_fine - time_coarse rotating_wave_winding[0] = math.cos(math.tau*rotating_wave*time_sample) + 1j*math.sin(math.tau*rotating_wave*time_sample) time_sample += time_coarse get_field_jit(time_sample,",
"lie_dimension = dimension + 1 # utility_set = spin_quantum_number.utility_set if not exponentiation_method: if",
"time_step_integration. Resetting time_step_integration to {time_step_output/round(time_step_output/time_step_integration):8.4e}.\\033[0m\\n\") time_step_integration = time_step_output/round(time_step_output/time_step_integration) time_end_points = np.asarray([time_start, time_end], np.float64)",
"e^{-iZ} & (c_Xc_Y + is_Xs_Y) e^{iZ} \\\\end{pmatrix}^{2^\\\\tau}\\\\\\\\ &= T^{2^\\\\tau}, \\\\end{align*} with .. math::",
"matrix_exponential_lie_trotter(field_sample, result) : :obj:`callable` Calculates a matrix exponential based on the Lie Product",
"time_evolution_coarse[time_index, 0, 1] /= rotating_wave_winding[0] if dimension > 2: time_evolution_coarse[time_index, 0, 2] /=",
"r c = math.cos(r/2) s = math.sin(r/2) result[0, 0] = c - 1j*z*s",
"here rather than the physics definition, so the left vector is conjugated. Thus",
"time, time_evolution, state, spin_calculator): \"\"\" Parameters ---------- time : :obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index)",
"\\\\begin{align*} l \\\\cdot r &\\\\equiv \\\\langle l, r \\\\rangle\\\\\\\\ l \\\\cdot r &=",
"J_x) \\\\exp(-i(2^{-\\\\tau} y) J_y) \\\\exp(-i(2^{-\\\\tau} z J_z + (2^{-\\\\tau} q) J_q)))^{2^\\\\tau}\\\\\\\\ &= \\\\begin{pmatrix}",
"exponentiation_method = None, use_rotating_frame = True, integration_method = IntegrationMethod.MAGNUS_CF4, trotter_cutoff = 32, threads_per_block",
"the z direction. Returns ------- spin : :obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index, spatial_direction) The",
"# temporary = np.empty((3, 3), dtype = np.complex128) # elif device_index == 1:",
"describing which method is used during the integration. Parameters ---------- value : :obj:`str`",
"1] + left[0, 2]*right[2, 1] result[1, 1] = left[1, 0]*right[0, 1] + left[1,",
"two Euler integration steps, one sampling the field from the start of the",
"dtype = np.complex128) # elif device_index == 2: # temporary_group = roc.shared.array((threads_per_block, 3,",
"< 0: # hyper_cube_amount = 0 # precision = 4**hyper_cube_amount # x =",
"= None try: self.compile_time_evolver(get_field, spin_quantum_number, device, use_rotating_frame, integration_method, exponentiation_method, trotter_cutoff, threads_per_block, max_registers) except:",
"parameter that can be swept over when multiple simulations need to be run.",
"\\\\begin{align*} X &= \\\\frac{1}{2}2^{-\\\\tau}x,\\\\\\\\ Y &= \\\\frac{1}{2}2^{-\\\\tau}y,\\\\\\\\ Z &= \\\\frac{1}{2}2^{-\\\\tau}z,\\\\\\\\ c_{\\\\theta} &= \\\\cos(\\\\theta),\\\\\\\\",
"& -i & 0 \\\\\\\\ i & 0 & -i \\\\\\\\ 0 &",
"time_coarse[time_index], time_step_integration, field_sample, rotating_wave, rotating_wave_winding) append_exponentiation_integration(time_evolution_fine, time_evolution_coarse[time_index, :], field_sample, time_step_integration, rotating_wave, rotating_wave_winding) time_fine",
"nb.prange(time_coarse.size): get_time_evolution_loop(time_index, time_coarse, time_step_output, time_step_integration, time_end_points, time_evolution_coarse, sweep_parameter) elif device_index == 1: #",
"integration_method = IntegrationMethod.MAGNUS_CF4, exponentiation_method = None, trotter_cutoff:int = 28, threads_per_block = 64, max_registers",
"+ z^2}`. Parameters: * **field_sample** (:class:`numpy.ndarray` of :class:`numpy.float64`, (y_index, x_index)) - The values",
"the integrator can be used for many experiments, without the need for slow",
"1 result[2, 2] += 1 # @jit_device # def matrix_exponential_lie_trotter(field_sample, result, trotter_cutoff): #",
"jit_device_template PYTHON = (\"python\", 0) \"\"\" Use pure python interpreted code for the",
"== 0: # temporary = np.empty((2, 2), dtype = np.complex128) # elif device_index",
"lie_dimension), dtype = np.float64) rotating_wave_winding = np.empty(sample_index_end, dtype = np.complex128) elif device_index ==",
"recompilation. For example, if the `sweep_parameter` is used to define the bias field",
"time_step_output, time_step_integration, time_end_points, time_evolution_coarse, sweep_parameter): # Declare variables if device_index == 0: time_evolution_fine",
"operator[1, 0]*operator[0, 1] + (2 + operator[1, 1])*operator[1, 1] + operator[1, 2]*operator[2, 1]",
"float64, float64, complex128[:, :, :])\", max_registers) def get_time_evolution(sweep_parameter, time_coarse, time_end_points, time_step_integration, time_step_output, time_evolution_coarse):",
"integrator. Attributes ---------- time : :obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index) The times that `state`",
"0] = 0 result[0, 1] = 0 result[1, 1] = 1 @jit_device def",
"object returned from :func:`Simulator.evaluate()`, and is executed there just in time if the",
"only. \"\"\" LIE_TROTTER = (\"lie_trotter\", 1) \"\"\" Approximation using the Lie Trotter theorem.",
"2] = conj(operator[2, 1]) result[2, 2] = conj(operator[2, 2]) @jit_device def matrix_exponential_analytic(field_sample, result,",
"explanation. \"\"\" if not device: if cuda.is_available(): device = Device.CUDA else: device =",
"Parameters ---------- state_init : :class:`numpy.ndarray` of :class:`numpy.complex128` The state (spin wavefunction) of the",
"(:class:`numpy.complex128`) - The complex number to take the conjugate of. Returns * **cz**",
"If set to :obj:`True`, the integrator moves into a frame rotating in the",
"use_rotating_frame, integration_method, exponentiation_method, trotter_cutoff, threads_per_block, max_registers) except: print(\"\\033[31mspinsim error: numba could not jit",
"self.spin_quantum_number.dimension, self.spin_quantum_number.dimension), np.complex128) blocks_per_grid = (time.size + (self.threads_per_block - 1)) // self.threads_per_block try:",
"into a frame rotating in the z axis by an amount defined by",
"time_sample = time_coarse[time_index] + time_step_output/2 get_field_jit(time_sample, sweep_parameter, field_sample[0, :]) rotating_wave = field_sample[0, 2]",
"in range(hyper_cube_amount): matrix_square_residual(result, temporary) matrix_square_residual(temporary, result) # matrix_multiply(result, result, temporary) # matrix_multiply(temporary, temporary,",
"value of z. norm2(z) : :obj:`callable` The 2 norm of a complex vector.",
"\\\\end{align*} Parameters ---------- state : :class:`numpy.ndarray` of :class:`numpy.complex128` (time_index, state_index) The state (wavefunction)",
"* **cz** (:class:`numpy.complex128`) - The conjugate of z. complex_abs(z) : :obj:`callable` The absolute",
"j\\\\\\\\ 0,&i\\\\neq j \\\\end{cases} \\\\end{align*} Parameters: * **operator** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index))",
"operator[2, 1]*operator[1, 0] + (2 + operator[2, 2])*operator[2, 0] result[0, 1] = (2",
"time_coarse[time_index] = time_end_points[0] + time_step_output*time_index time_fine = time_coarse[time_index] # Initialise time evolution operator",
"max_registers) except: print(\"\\033[31mspinsim error: numba could not jit get_field function into a device",
"\\\\begin{pmatrix} \\\\Re(\\\\sqrt{2}\\\\psi_{0}(t)^*(\\\\psi_{+1}(t) + \\\\psi_{-1}(t))\\\\\\\\ -\\\\Im(\\\\sqrt{2}\\\\psi_{0}(t)^*(\\\\psi_{+1}(t) - \\\\psi_{-1}(t))\\\\\\\\ |\\\\psi_{+1}(t)|^2 - |\\\\psi_{-1}(t)|^2 \\\\end{pmatrix} \\\\end{align*} Parameters",
"trotter_cutoff : :obj:`int` The number of squares made by the matrix exponentiator, if",
"vector to left multiply in the inner product. * **right** (:class:`numpy.ndarray` of :class:`numpy.complex128`,",
"+ 1j*math.sin(math.tau*rotating_wave*time_sample) time_sample += time_coarse get_field_jit(time_sample, sweep_parameter, field_sample[0, :]) time_sample = time_fine +",
"s. Determines the sample rate of the outputs `time_coarse` and `time_evolution_coarse`. time_evolution_coarse :",
"1j*x)*s result[0, 1] = -(y + 1j*x)*s result[1, 1] = c + 1j*z*s",
":obj:`numpy.float64` (time_index, spatial_direction) The expected spin projection (Bloch vector) over time. \"\"\" if",
"at, in units of s. * **simulation_index** (:obj:`int`) - a parameter that can",
"> 2: field_sample[2, 3] = math.tau*time_step_integration*(w1*field_sample[0, 3] + w0*field_sample[1, 3]) append_exponentiation(field_sample[2, :], time_evolution_fine,",
"float64[:, :])\", max_registers = max_registers) def get_spin(state, spin): \"\"\" Calculate each expected spin",
": :obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index, spatial_direction) The expected spin projection (Bloch vector) over",
"parallel time_index = roc.get_global_id(1) if time_index < time_coarse.size: get_time_evolution_loop(time_index, time_coarse, time_step_output, time_step_integration, time_end_points,",
"= math.sin(y) # cisz = math.cos(z) + 1j*math.sin(z) # result[0, 0] = (cx*cy",
"time_coarse, time_step_output, time_step_integration, time_end_points, time_evolution_coarse, sweep_parameter): # Declare variables if device_index == 0:",
"- |\\\\psi_{-\\\\frac{1}{2}}(t)|^2) \\\\end{pmatrix} \\\\end{align*} For spin one: .. math:: \\\\begin{align*} \\\\langle F\\\\rangle(t) =",
"r. set_to(operator, result) : :obj:`callable` Copy the contents of one matrix into another.",
"0: time_evolution_fine = np.empty((dimension, dimension), dtype = np.complex128) field_sample = np.empty((sample_index_max, lie_dimension), dtype",
"ExponentiationMethod(Enum): \"\"\" The implementation to use for matrix exponentiation within the integrator. Parameters",
"that `state` was evaluated at. time_evolution : :obj:`numpy.ndarray` of :obj:`numpy.float128` (time_index, y_index, x_index)",
"= spin_quantum_number self.device = device self.get_time_evolution_raw = None self.get_spin_raw = None try: self.compile_time_evolver(get_field,",
"= dimension + 1 # utility_set = spin_quantum_number.utility_set if not exponentiation_method: if spin_quantum_number",
"@jit_host(\"(float64, float64[:], float64[:], float64, float64, complex128[:, :, :])\", max_registers) def get_time_evolution(sweep_parameter, time_coarse, time_end_points,",
"operator[0, 0])*operator[0, 2] + operator[0, 1]*operator[1, 2] + operator[0, 2]*operator[2, 2] result[1, 2]",
":class:`numpy.complex128`, (y_index, x_index)) - The matrix to set to :math:`1`. set_to_zero(operator) : :obj:`callable`",
"more registers (fast memory) to each thread, out of a maximum number for",
"= field_sample[2]/(2*precision) # ez = 1 + 1j*ez # eq = field_sample[3]/(6*precision) #",
"operator[0, 1] = 0 operator[1, 1] = 0 operator[2, 1] = 0 operator[0,",
"@jit_device def norm2(z): return math.sqrt(z[0].real**2 + z[0].imag**2 + z[1].real**2 + z[1].imag**2 + z[2].real**2",
":obj:`Results` object returned from :func:`Simulator.evaluate()`, and is executed there just in time if",
"0 operator[1, 1] = 1 operator[2, 1] = 0 operator[0, 2] = 0",
"for compilable numpy features. \"\"\" CPU = (\"cpu\", 0) \"\"\" Use the :func:`numba.jit()`",
"user. Parameters: * **state** (:obj:`numpy.ndarray` of :obj:`numpy.complex128` (time_index, magnetic_quantum_number)) - The quantum state",
"time_fine += time_step_integration if use_rotating_frame: # Take out of rotating frame rotating_wave_winding[0] =",
"np.complex128) blocks_per_grid = (time.size + (self.threads_per_block - 1)) // self.threads_per_block try: self.get_time_evolution_raw[blocks_per_grid, self.threads_per_block](sweep_parameter,",
"-i \\\\\\\\ i & 0 \\\\end{pmatrix},& J_z &= \\\\frac{1}{2}\\\\begin{pmatrix} 1 & 0 \\\\\\\\",
"0 operator[0, 1] = 0 operator[1, 1] = 0 @jit_device def matrix_multiply(left, right,",
"+ \\\\frac{Q}{3}\\\\right)}(c_X + c_Y - i s_Xs_Y)}{2} & \\\\frac{e^{i\\\\frac{2Q}{3}} (-s_Y -i c_Y s_X)}{\\\\sqrt{2}}",
"1] = sa*eq/(ez*ep) result[1, 1] = ca*(eq*eq*eq*eq) - 1 result[2, 1] = sa*eq*ez*ep",
"to a Lie Trotter method.\\033[0m\") exponentiation_method = ExponentiationMethod.LIE_TROTTER exponentiation_method_index = 1 @jit_device_template(\"(float64[:], complex128[:,",
"def inner(left, right): return conj(left[0])*right[0] + conj(left[1])*right[1] + conj(left[2])*right[2] @jit_device def set_to(operator, result):",
"operator[2, 2])*operator[2, 2] @jit_device def adjoint(operator, result): result[0, 0] = conj(operator[0, 0]) result[1,",
"if :obj:`ExponentiationMethod.LIE_TROTTER` is chosen. threads_per_block : :obj:`int` The size of each thread block",
"dtype = np.complex128) elif device_index == 2: temporary_group = roc.shared.array((threads_per_block, 3, 3), dtype",
"-i \\\\\\\\ 0 & i & 0 \\\\end{pmatrix},\\\\\\\\ J_z &= \\\\begin{pmatrix} 1 &",
"jit_device_template self.jit_device_template = jit_device_template elif value == \"roc\": def jit_host(template, max_registers): def jit_host(func):",
"result[0, 2] = operator[0, 2] result[1, 2] = operator[1, 2] result[2, 2] =",
"then recursively squared :math:`\\\\tau` times to obtain :math:`\\\\exp(A)`. Parameters: * **field_sample** (:class:`numpy.ndarray` of",
":class:`numpy.ndarray` of :class:`numpy.complex128` (time_index, bra_state_index, ket_state_index) The evaluated time evolution operator between each",
"roc.jit(device = True)(func) self.jit_device = jit_device def jit_device_template(template): def jit_device_template(func): return roc.jit(template, device",
"multiply in the inner product. Returns * **d** (:class:`numpy.complex128`) - The inner product",
"is calculated, it is then recursively squared :math:`\\\\tau` times to obtain :math:`\\\\exp(A)`. Parameters:",
"device_index == 1: time_index = cuda.grid(1) elif device_index == 1: time_index = roc.get_global_id(1)",
"2] + operator[0, 1]*operator[1, 2] + operator[0, 2]*operator[2, 2] result[1, 2] = operator[1,",
"numpy as np import numba as nb from numba import cuda from numba",
"function that describes the field that the spin system is being put under.",
"result of the product. adjoint(operator) : :obj:`callable` Takes the hermitian adjoint of a",
".. warning :: Work in progress, not currently functional! \"\"\" class Results: \"\"\"",
"quadratic shift (only appearing, and required, in spin one systems). .. note:: This",
"time_sample = ((time_fine + 0.5*time_step_integration*(1 - 1/sqrt3)) - time_coarse) rotating_wave_winding[0] = math.cos(math.tau*rotating_wave*time_sample) +",
"to {time_step_output/round(time_step_output/time_step_integration):8.4e}.\\033[0m\\n\") time_step_integration = time_step_output/round(time_step_output/time_step_integration) time_end_points = np.asarray([time_start, time_end], np.float64) state_init = np.asarray(state_init,",
"time_end_points, time_step_integration, time_step_output, time_evolution_coarse): \"\"\" Find the stepwise time evolution opperator. Parameters ----------",
"trotter_cutoff = 32, threads_per_block = 64, max_registers = 63): \"\"\" .. _Achieved Occupancy:",
"spin system is being put under. It must have three arguments: * **time_sample**",
"must be used. Thus, there will be an optimal value of `max_registers` for",
"operator in parallel. Compiled for chosen device on object constrution. Parameters: * **sweep_parameter**",
"Determines the sample rate of the outputs `time_coarse` and `time_evolution_coarse`. time_evolution_coarse : :class:`numpy.ndarray`",
"Calculates a matrix exponential based on the Lie Product Formula, .. math:: \\\\exp(A",
"an imaginary linear combination of a subspace of :math:`\\\\mathfrak{su}(3)`, being, .. math:: \\\\begin{align*}",
"quantum state. Used to calculate `spin` the first time it is referenced by",
"= utilities.complex_abs norm2 = utilities.norm2 inner = utilities.inner set_to = utilities.set_to set_to_one =",
":class:`numpy.ndarray` of :class:`numpy.float64` (time_index) A coarse grained list of time samples that the",
"- time_coarse rotating_wave_winding[0] = math.cos(math.tau*rotating_wave*time_sample) + 1j*math.sin(math.tau*rotating_wave*time_sample) time_sample += time_coarse get_field_jit(time_sample, sweep_parameter, field_sample[0,",
"\\\\begin{pmatrix} 1 & 0 & 0 \\\\\\\\ 0 & 0 & 0 \\\\\\\\",
"systems. Will not work with spin one systems. Assumes the exponent is an",
"device = True, inline = True)(func) return jit_device_template self.jit_device_template = jit_device_template elif value",
"y = field_sample[1]/precision # z = field_sample[2]/precision # q = field_sample[3]/precision # cx",
"= conj(operator[0, 0]) result[1, 0] = conj(operator[0, 1]) result[0, 1] = conj(operator[1, 0])",
"= roc.device_array((state.shape[0], 3), np.float64) blocks_per_grid = (state.shape[0] + (threads_per_block - 1)) // threads_per_block",
"= c + 1j*z*s else: result[0, 0] = 1 result[1, 0] = 0",
"raise AttributeError(\"{} has no attribute called {}.\".format(self, attr_name)) class Simulator: \"\"\" Attributes ----------",
"max_registers): def jit_host(func): return cuda.jit(template, debug = False, max_registers = max_registers)(func) return jit_host",
"of the spin system in the lab frame, for each time sampled. time_evolution",
"spin.shape[0]: if dimension == 2: spin[time_index, 0] = (state[time_index, 0]*conj(state[time_index, 1])).real spin[time_index, 1]",
"float64, complex128)\") def transform_frame_lab(field_sample, rotating_wave, rotating_wave_winding): return transform_frame = transform_frame_lab get_field_jit = jit_device(get_field)",
"(:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The matrix which the result of the",
"in nb.prange(state.shape[1]): state[time_index, x_index] = 0 if time_index > 0: for z_index in",
"complex128[:])\") def get_field_integration_magnus_cf4(sweep_parameter, time_fine, time_coarse, time_step_integration, field_sample, rotating_wave, rotating_wave_winding): time_sample = ((time_fine +",
"to select which device will be targeted for integration. That is, whether the",
"optimisation. Defaults to :obj:`True`. If set to :obj:`True`, the integrator moves into a",
"def get_field_integration_midpoint(sweep_parameter, time_fine, time_coarse, time_step_integration, field_sample, rotating_wave, rotating_wave_winding): time_sample = time_fine + 0.5*time_step_integration",
"def jit_host(func): return roc.jit(template)(func) return jit_host self.jit_host = jit_host def jit_device(func): return roc.jit(device",
"0] = operator[1, 0] result[0, 1] = operator[0, 1] result[1, 1] = operator[1,",
"the user defined field function must be :func:`numba.jit()` compilable. See `Supported Python features`_",
"for more details. use_rotating_frame : :obj:`bool` Whether or not to use the rotating",
"& -\\\\frac{y + ix}{r}\\\\sin(\\\\frac{r}{2})\\\\\\\\ \\\\frac{y - ix}{r}\\\\sin(\\\\frac{r}{2}) & \\\\cos(\\\\frac{r}{2}) + i\\\\frac{z}{r}\\\\sin(\\\\frac{r}{2}) \\\\end{pmatrix} \\\\end{align*}",
".. math:: \\\\begin{align*} A &= -i(x J_x + y J_y + z J_z",
"given time series of a quantum state. This :obj:`callable` is passed to the",
"= 0 operator[0, 1] = 0 operator[1, 1] = 1 @jit_device def set_to_zero(operator):",
"result[2, 0] = operator[2, 0]*operator[0, 0] + operator[2, 1]*operator[1, 0] + (2 +",
"append_exponentiation(field_sample, time_evolution_fine, time_evolution_coarse): if device_index == 0: time_evolution_old = np.empty((dimension, dimension), dtype =",
"exponentiation_method: if spin_quantum_number == SpinQuantumNumber.ONE: exponentiation_method = ExponentiationMethod.LIE_TROTTER elif spin_quantum_number == SpinQuantumNumber.HALF: exponentiation_method",
"= (Ca/(eq*ez))*(Ca/(eq*ez)) - 1 result[1, 0] = sa*eq*ep/ez result[2, 0] = -((Sa*ep/eq)*(Sa*ep/eq)) result[0,",
"= math.tau*time_step_integration*field_sample[0, 3] append_exponentiation(field_sample[0, :], time_evolution_fine, time_evolution_coarse) get_field_integration = get_field_integration_midpoint append_exponentiation_integration = append_exponentiation_integration_midpoint",
"x_index] += time_evolution[time_index - 1, x_index, z_index]*state[time_index - 1, z_index] else: state[time_index, x_index]",
"0]*right[0, 0] + left[1, 1]*right[1, 0] result[0, 1] = left[0, 0]*right[0, 1] +",
"return nb.njit(template, parallel = True)(func) return jit_host self.jit_host = jit_host def jit_device(func): return",
"# result[2, 1] = cisz*(sy - 1j*cy*sx)/sqrt2 # cisz = math.cos(z - q/3)",
"= label HALF = (1/2, 2, \"half\") \"\"\" For two level systems. \"\"\"",
"ca*(eq*eq*eq*eq) - 1 result[2, 1] = sa*eq*ez*ep result[0, 2] = -((Sa*eq/ep)*(Sa*eq/ep)) result[1, 2]",
"integer multiple of time_step_integration. Resetting time_step_integration to {time_step_output/round(time_step_output/time_step_integration):8.4e}.\\033[0m\\n\") time_step_integration = time_step_output/round(time_step_output/time_step_integration) time_end_points =",
"= True, inline = True)(func) self.jit_device = jit_device def jit_device_template(template): def jit_device_template(func): return",
"= time_step_output/round(time_step_output/time_step_integration) time_end_points = np.asarray([time_start, time_end], np.float64) state_init = np.asarray(state_init, np.complex128) time_index_max =",
"3), dtype = np.complex128) elif device_index == 1: temporary = cuda.local.array((3, 3), dtype",
"= jit_device def jit_device_template(template): def jit_device_template(func): return func return jit_device_template self.jit_device_template = jit_device_template",
"an output, so use an empty :class:`numpy.ndarray` with :func:`numpy.empty()`, or declare a :class:`numpy.ndarray`",
"2] = state[time_index, 0].real**2 + state[time_index, 0].imag**2 - state[time_index, 2].real**2 - state[time_index, 2].imag**2",
"time step used within the integration algorithm. In units of s. time_step_output :",
"into a roc device function.\\033[0m\\n\") raise time_evolution_coarse = time_evolution_coarse.copy_to_host() time = time.copy_to_host() state",
"ket_state_index) Time evolution operator (matrix) between the current and next timesteps, for each",
"Raising this value allocates more registers (fast memory) to each thread, out of",
"integration. Defaults to :obj:`IntegrationMethod.MAGNUS_CF4`. See :obj:`IntegrationMethod` for more details. trotter_cutoff : :obj:`int` The",
"not work with spin one systems. Assumes the exponent is an imaginary linear",
"is used here rather than the physics definition, so the left vector is",
"concurrently, at the expense of fewer resgiters being avaliable to each thread, meaning",
"with the use of a rotating wave approximation, a technique used to get",
"(spatial_index)) the returned value of the field. This is a four dimensional vector,",
"temporary_group[roc.get_local_id(1), :, :] for power_index in range(hyper_cube_amount): matrix_square_residual(result, temporary) matrix_square_residual(temporary, result) # matrix_multiply(result,",
"field_sample[0, 0] = math.tau*time_step_integration*field_sample[0, 0] field_sample[0, 1] = math.tau*time_step_integration*field_sample[0, 1] field_sample[0, 2] =",
"of the integrator. Attributes ---------- time : :obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index) The times",
"time_evolution_coarse) get_field_integration = get_field_integration_magnus_cf4 append_exponentiation_integration = append_exponentiation_integration_magnus_cf4 elif integration_method == IntegrationMethod.HALF_STEP: @jit_device_template(\"(float64, float64,",
"parallel. For spin half: .. math:: \\\\begin{align*} \\\\langle F\\\\rangle(t) = \\\\begin{pmatrix} \\\\Re(\\\\psi_{+\\\\frac{1}{2}}(t)\\\\psi_{-\\\\frac{1}{2}}(t)^*)\\\\\\\\ -\\\\Im(\\\\psi_{+\\\\frac{1}{2}}(t)\\\\psi_{-\\\\frac{1}{2}}(t)^*)\\\\\\\\",
"\\\\end{pmatrix} \\\\end{align*} Parameters ---------- state : :class:`numpy.ndarray` of :class:`numpy.complex128` (time_index, state_index) The state",
"\"\"\" Parameters ---------- spin_quantum_number : :obj:`SpinQuantumNumber` The option to select whether the simulator",
"of the spin projection operator in the z direction. Returns ------- spin :",
"elif value == \"cuda\": def jit_host(template, max_registers): def jit_host(func): return cuda.jit(template, debug =",
"2] = cisz*(-1j*sx - cx*sy)/sqrt2 # result[2, 2] = 0.5*cisz*(cx + cy +",
"> 2: field_sample[2, 3] = math.tau*time_step_integration*field_sample[0, 3]/2 append_exponentiation(field_sample[2, :], time_evolution_fine, time_evolution_coarse) field_sample[2, 0]",
"series of a quantum state. Parameters ---------- state : :obj:`numpy.ndarray` of :obj:`numpy.complex128` (time_index,",
"- ib\\\\\\\\ a, b &\\\\in \\\\mathbb{R} \\\\end{align*} Parameters: * **z** (:class:`numpy.complex128`) - The",
"field_sample[0] = X.real field_sample[1] = X.imag field_sample[2] = field_sample[2] - 2*rotating_wave transform_frame =",
"# hyper_cube_amount = math.ceil(trotter_cutoff/2) # if hyper_cube_amount < 0: # hyper_cube_amount = 0",
"device.index == 0: spin = np.empty((state.shape[0], 3), np.float64) get_spin(state, spin) elif device ==",
"be written to. matrix_exponential_lie_trotter(field_sample, result) : :obj:`callable` Calculates a matrix exponential based on",
":obj:`numpy.complex128` (magnetic_quantum_number) The initial quantum state of the spin system, written in terms",
"def get_time_evolution(sweep_parameter, time_coarse, time_end_points, time_step_integration, time_step_output, time_evolution_coarse): \"\"\" Find the stepwise time evolution",
"time_step_output, time_evolution_coarse) except: print(\"\\033[31mspinsim error: numba.roc could not jit get_field function into a",
"the need for slow recompilation. For example, if the `sweep_parameter` is used to",
"spin projection (Bloch vector) over time for a given time series of a",
"returns the quantum state of the spin system over time. Parameters ---------- sweep_parameter",
"right, result): result[0, 0] = left[0, 0]*right[0, 0] + left[0, 1]*right[1, 0] +",
"the integrator will on average take smaller steps. .. note :: The use",
"ep = 1 a = a/precision Ca = math.cos(a/2) Sa = math.sin(a/2) ca",
"contents of one matrix into another. .. math:: (A)_{i, j} = (B)_{i, j}",
"between each time step. See :ref:`architecture` for some information. \"\"\" for time_index in",
"= (\"python\", 0) \"\"\" Use pure python interpreted code for the integrator, ie,",
"device_index == 0: # temporary = np.empty((3, 3), dtype = np.complex128) # elif",
"def get_time_evolution_loop(time_index, time_coarse, time_step_output, time_step_integration, time_end_points, time_evolution_coarse, sweep_parameter): # Declare variables if device_index",
"result[0, 1] = -(y + 1j*x)*s result[1, 1] = c + 1j*z*s else:",
"(:class:`numpy.ndarray` of :class:`numpy.complex128`, (index)) - The vector to right multiply in the inner",
":obj:`callable` The internal function for evaluating the time evolution operator in parallel. Compiled",
"0 operator[1, 2] = 0 operator[2, 2] = 0 @jit_device def matrix_multiply(left, right,",
"J_y + z J_z + q J_q), \\\\end{align*} with .. math:: \\\\begin{align*} J_x",
"method multiple times, each time varying `sweep_parameter`. * **time_coarse** (:obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index))",
"of `time_step_integration`. Measured in s. state_init : :obj:`numpy.ndarray` of :obj:`numpy.complex128` (magnetic_quantum_number) The initial",
"a :math:`\\\\mathfrak{su}(2)` matrix exponential based on its analytic form. .. warning:: Only available",
"dtype = np.complex128) elif device_index == 1: time_evolution_old = cuda.local.array((dimension, dimension), dtype =",
"def set_to(operator, result): result[0, 0] = operator[0, 0] result[1, 0] = operator[1, 0]",
"exponential based on its analytic form. .. warning:: Only available for use with",
"= 1 a = a/precision Ca = math.cos(a/2) Sa = -1j*math.sin(a/2) ez =",
"time_step_output, time_step_integration, time_end_points, time_evolution_coarse, sweep_parameter) return @jit_host(\"(complex128[:, :], float64[:, :])\", max_registers = max_registers)",
"i s_Xs_Y)}{2} \\\\\\\\ \\\\frac{e^{-i\\\\left(Z + \\\\frac{Q}{3}\\\\right)} (-i s_X + c_X s_Y)}{\\\\sqrt{2}} & e^{i\\\\frac{2Q}{3}}",
"- iy J_y - iz J_z - iq J_q))^{2^\\\\tau}\\\\\\\\ &\\\\approx (\\\\exp(-i(2^{-\\\\tau} x) J_x)",
"q/3) + 1j*math.sin(z - q/3) # result[0, 2] = 0.5*cisz*(cx - cy +",
"to run on all CPU cores, in parallel. .. note :: To use",
"and the GPU is said to have less occupancy. Lowering the value increases",
"raise def compile_time_evolver(self, get_field, spin_quantum_number, device, use_rotating_frame = True, integration_method = IntegrationMethod.MAGNUS_CF4, exponentiation_method",
"s_{\\\\theta} &= \\\\sin(\\\\theta). \\\\end{align*} Once :math:`T` is calculated, it is then recursively squared",
"1] result[1, 1] = operator[1, 1] result[2, 1] = operator[2, 1] result[0, 2]",
":obj:`ExponentiationMethod.LIE_TROTTER` is chosen. threads_per_block : :obj:`int` The size of each thread block (workgroup),",
"put under. It must have three arguments: * **time_sample** (:obj:`float`) - the time",
"max_registers = max_registers) def get_spin(state, spin): \"\"\" Calculate each expected spin value in",
"in the integrator. These device functions are compiled for the chosen target device",
"np.complex128) time_index_max = int((time_end_points[1] - time_end_points[0])/time_step_output) if self.device.index == 0: time = np.empty(time_index_max,",
"= math.cos(eq) + 1j*math.sin(eq) result[0, 0] = Ca/ez - 1 result[1, 0] =",
"self.get_time_evolution_raw[blocks_per_grid, self.threads_per_block](sweep_parameter, time, time_end_points, time_step_integration, time_step_output, time_evolution_coarse) except: print(\"\\033[31mspinsim error: numba.roc could not",
"of the spin projection operator in the z direction. Returns: * **spin** (:obj:`numpy.ndarray`",
"matrix_multiply(time_evolution_fine, time_evolution_old, time_evolution_coarse) if use_rotating_frame: if dimension == 3: @jit_device_template(\"(float64[:], float64, complex128)\") def",
"= math.cos(a/2) Sa = math.sin(a/2) ca = math.cos(a) sa = -1j*math.sin(a)/sqrt2 ez =",
"details. exponentiation_method : :obj:`ExponentiationMethod` Which method to use for matrix exponentiation in the",
":class:`numpy.float64` (spatial_index)) the returned value of the field. This is a four dimensional",
"\\\\\\\\ 0 & -2 & 0 \\\\\\\\ 0 & 0 & 1 \\\\end{pmatrix}",
"if the system it is being run on is Nvidia Cuda compatible, and",
"class IntegrationMethod(Enum): \"\"\" Options for describing which method is used during the integration.",
"must be :func:`numba.cuda.jit()` compilable. See `Supported CUDA Python features`_ for compilable python features.",
"result[1, 2] = operator[1, 0]*operator[0, 2] + (2 + operator[1, 1])*operator[1, 2] +",
"def adjoint(operator, result): result[0, 0] = conj(operator[0, 0]) result[1, 0] = conj(operator[0, 1])",
"The option to select which device will be targeted for integration. That is,",
"1] = conj(operator[1, 0]) result[1, 1] = conj(operator[1, 1]) result[2, 1] = conj(operator[1,",
"To use this device option, the user defined field function must be :func:`numba.cuda.jit()`",
"+ operator[0, 1]*operator[1, 0] result[1, 0] = operator[1, 0]*operator[0, 0] + (2 +",
"if (exponentiation_method == ExponentiationMethod.ANALYTIC) and (spin_quantum_number != SpinQuantumNumber.HALF): print(\"\\033[31mspinsim warning!!!\\n_attempting to use an",
"result[0, 1] = 0 result[1, 1] = 1 @jit_device def matrix_exponential_lie_trotter(field_sample, result, trotter_cutoff):",
"result[0, 0] = operator[0, 0] result[1, 0] = operator[1, 0] result[2, 0] =",
"2] = (Ca*ez/eq)*(Ca*ez/eq) - 1 if device_index == 0: temporary = np.empty((3, 3),",
"time_index in nb.prange(time_coarse.size): get_time_evolution_loop(time_index, time_coarse, time_step_output, time_step_integration, time_end_points, time_evolution_coarse, sweep_parameter) elif device_index ==",
"concurrently than it has Cuda cores, meaning some cores are inactive, and the",
"whether the simulator will integrate a spin half :obj:`SpinQuantumNumber.HALF`, or spin one :obj:`SpinQuantumNumber.ONE`",
"= (\"roc\", 2) \"\"\" Use the :func:`numba.roc.jit()` LLVM compiler to compile the integrator",
"sample resolution of the output timeseries for the state. Must be a whole",
"+ cy + 1j*sx*sy) # if device_index == 0: # temporary = np.empty((3,",
"bias values, by calling this method multiple times, each time varying `sweep_parameter`. *",
"Parameters: * **field_sample** (:class:`numpy.ndarray` of :class:`numpy.float64`, (y_index, x_index)) - The values of x,",
":ref:`overview_of_simulation_method`. spin : :class:`numpy.ndarray` of :class:`numpy.float64` (time_index, spatial_index) The expected value for hyperfine",
"is `time_end - time_start`. time_step_integration : :obj:`float` The integration time step. Measured in",
"2: field_sample[2, 3] = math.tau*time_step_integration*(w1*field_sample[0, 3] + w0*field_sample[1, 3]) append_exponentiation(field_sample[2, :], time_evolution_fine, time_evolution_coarse)",
"SpinQuantumNumber.HALF): print(\"\\033[31mspinsim warning!!!\\n_attempting to use an analytic exponentiation method outside of spin half.",
"of s. * **simulation_index** (:obj:`int`) - a parameter that can be swept over",
"* previous state for x_index in nb.prange(state.shape[1]): state[time_index, x_index] = 0 if time_index",
"value == \"cpu\": def jit_host(template, max_registers): def jit_host(func): return nb.njit(template, parallel = True)(func)",
"integration algorithm. Defaults to :obj:`ExponentiationMethod.LIE_TROTTER` when `spin_quantum_number` is set to :obj:`SpinQuantumNumber.ONE`, and defaults",
"trotter_cutoff): hyper_cube_amount = math.ceil(trotter_cutoff/2) if hyper_cube_amount < 0: hyper_cube_amount = 0 precision =",
"GPU models. max_registers : :obj:`int` The maximum number of registers allocated per thread",
"The option to select whether the simulator will integrate a spin half :obj:`SpinQuantumNumber.HALF`,",
"1])*operator[1, 0] result[0, 1] = (2 + operator[0, 0])*operator[0, 1] + operator[0, 1]*operator[1,",
"- The expected spin projection (Bloch vector) over time. \"\"\" def __init__(self, time,",
"integrator is compiled for a CPU or GPU. Defaults to :obj:`Device.CUDA` if the",
"= field_sample[0] y = field_sample[1] z = field_sample[2] r = math.sqrt(x**2 + y**2",
"2] + (2 + operator[2, 2])*operator[2, 2] @jit_device def adjoint(operator, result): result[0, 0]",
"the rotating frame, using the rating wave approximation: just define `get_field()` with field",
"J_z), \\\\end{align*} with .. math:: \\\\begin{align*} J_x &= \\\\frac{1}{2}\\\\begin{pmatrix} 0 & 1 \\\\\\\\",
"number multiple of `time_step_integration`. Measured in s. * **time_evolution_coarse** (:obj:`numpy.ndarray` of :obj:`numpy.float128` (time_index,",
"a given time series of a quantum state. Parameters ---------- state : :obj:`numpy.ndarray`",
"time offset that the experiment is to start at, and the time that",
"ie, :math:`1`. .. math:: \\\\begin{align*} (A)_{i, j} &= \\\\delta_{i, j}\\\\\\\\ &= \\\\begin{cases} 1,&i",
"& 0 \\\\end{pmatrix},& J_y &= \\\\frac{1}{2}\\\\begin{pmatrix} 0 & -i \\\\\\\\ i & 0",
"operator[2, 1]*operator[1, 2] + (2 + operator[2, 2])*operator[2, 2] @jit_device def adjoint(operator, result):",
"np.complex128) self.get_time_evolution_raw(sweep_parameter, time, time_end_points, time_step_integration, time_step_output, time_evolution_coarse) elif self.device == Device.CUDA: time =",
"be swept over when multiple simulations need to be run. For example, it",
"= math.tau*time_step_integration*field_sample[1, 0]/2 field_sample[2, 1] = math.tau*time_step_integration*field_sample[1, 1]/2 field_sample[2, 2] = math.tau*time_step_integration*field_sample[1, 2]/2",
"w1*field_sample[1, 3]) append_exponentiation(field_sample[2, :], time_evolution_fine, time_evolution_coarse) field_sample[2, 0] = math.tau*time_step_integration*(w1*field_sample[0, 0] + w0*field_sample[1,",
"state[time_index, 0].real**2 + state[time_index, 0].imag**2 - state[time_index, 2].real**2 - state[time_index, 2].imag**2 elif device_index",
"for the spin quantum number of a system. Parameters ---------- value : :obj:`float`",
"dimension == 2: rotating_wave /= 2 # For every fine step for time_fine_index",
"field_sample[2] = field_sample[2] - 2*rotating_wave transform_frame = transform_frame_spin_half_rotating else: @jit_device_template(\"(float64[:], float64, complex128)\") def",
"For example, it is used to sweep over dressing frequencies during the simulations",
"use this device option, the user defined field function must be :func:`numba.jit()` compilable.",
"def __init__(self, spin_quantum_number, device, threads_per_block): \"\"\" Parameters ---------- spin_quantum_number : :obj:`SpinQuantumNumber` The option",
"(-i s_X + c_X s_Y)}{\\\\sqrt{2}} & e^{i\\\\frac{2Q}{3}} c_X c_Y & \\\\frac{e^{-i(Z - \\\\frac{Q}{3})}",
"state_init = np.asarray(state_init, np.complex128) time_index_max = int((time_end_points[1] - time_end_points[0])/time_step_output) if self.device.index == 0:",
"result[0, 0] += 1 result[1, 1] += 1 # @jit_device # def matrix_exponential_lie_trotter(field_sample,",
"CPU = (\"cpu\", 0) \"\"\" Use the :func:`numba.jit()` LLVM compiler to compile the",
"experiment is to start and finishes. In units of s. time_step_integration : :obj:`float`",
"direction. Returns: * **spin** (:obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index, spatial_direction)) - The expected spin",
"\"\"\" if device_index == 0: for time_index in nb.prange(spin.shape[0]): if dimension == 2:",
"device_index == 2: temporary_group = roc.shared.array((threads_per_block, 3, 3), dtype = np.complex128) temporary =",
"is used to sweep over dressing frequencies during the simulations that `spinsim` was",
"operator between each time step. See :ref:`architecture` for some information. \"\"\" for time_index",
"1 result[1, 0] = sa*eq*ep/ez result[2, 0] = -((Sa*ep/eq)*(Sa*ep/eq)) result[0, 1] = sa*eq/(ez*ep)",
"X &= 2^{-\\\\tau}x,\\\\\\\\ Y &= 2^{-\\\\tau}y,\\\\\\\\ Z &= 2^{-\\\\tau}z,\\\\\\\\ Q &= 2^{-\\\\tau}q,\\\\\\\\ c_{\\\\theta}",
"is, whether the integrator is compiled for a CPU or GPU. Defaults to",
"in the z direction. Returns ------- results : :obj:`Results` An object containing the",
"transform_frame(field_sample[1, :], rotating_wave, rotating_wave_winding[1]) w0 = (1.5 + sqrt3)/6 w1 = (1.5 -",
"self._value_ = value self.dimension = dimension self.label = label HALF = (1/2, 2,",
"quantum state. This :obj:`callable` is passed to the :obj:`Results` object returned from :func:`Simulator.evaluate()`,",
"that the integrator is being compiled for. See :class:`Device` for more information and",
"= 0 \\\\end{align*} Parameters: * **operator** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The",
"hyper_cube_amount < 0: hyper_cube_amount = 0 precision = 4**hyper_cube_amount a = math.sqrt(field_sample[0]*field_sample[0] +",
"\"\"\" if device_index == 0: for time_index in nb.prange(time_coarse.size): get_time_evolution_loop(time_index, time_coarse, time_step_output, time_step_integration,",
"rotating_wave, rotating_wave_winding): transform_frame(field_sample[0, :], rotating_wave, rotating_wave_winding[0]) field_sample[0, 0] = math.tau*time_step_integration*field_sample[0, 0] field_sample[0, 1]",
"for testing. Note that one extra register per thread is always added to",
"3: @jit_device_template(\"(float64[:], float64, complex128)\") def transform_frame_spin_one_rotating(field_sample, rotating_wave, rotating_wave_winding): X = (field_sample[0] + 1j*field_sample[1])/rotating_wave_winding",
"the user. Parameters: * **state** (:obj:`numpy.ndarray` of :obj:`numpy.complex128` (time_index, magnetic_quantum_number)) - The quantum",
"time sampled. time_evolution : :class:`numpy.ndarray` of :class:`numpy.complex128` (time_index, bra_state_index, ket_state_index) The evaluated time",
"(:class:`numpy.ndarray` of :class:`numpy.float64`, (y_index, x_index)) - The values of x, y and z",
"the field from the start of the time step, one sampling the field",
"/= rotating_wave_winding[0] time_evolution_coarse[time_index, 0, 1] /= rotating_wave_winding[0] if dimension > 2: time_evolution_coarse[time_index, 0,",
"roc.shared.array((threads_per_block, sample_index_end), dtype = np.complex128) rotating_wave_winding = rotating_wave_winding_group[roc.get_local_id(1), :] time_coarse[time_index] = time_end_points[0] +",
"compilable for the device that the integrator is being compiled for. See :class:`Device`",
"device self.get_time_evolution_raw = None self.get_spin_raw = None try: self.compile_time_evolver(get_field, spin_quantum_number, device, use_rotating_frame, integration_method,",
"information and links. spin_quantum_number : :obj:`SpinQuantumNumber` The option to select whether the simulator",
"operator[1, 2]*operator[2, 2] result[2, 2] = operator[2, 0]*operator[0, 2] + operator[2, 1]*operator[1, 2]",
"compilable numpy features. \"\"\" CUDA = (\"cuda\", 1) \"\"\" Use the :func:`numba.cuda.jit()` LLVM",
"rotating_wave_winding_group[roc.get_local_id(1), :] time_coarse[time_index] = time_end_points[0] + time_step_output*time_index time_fine = time_coarse[time_index] # Initialise time",
"def append_exponentiation(field_sample, time_evolution_fine, time_evolution_coarse): if device_index == 0: time_evolution_old = np.empty((dimension, dimension), dtype",
"nb.njit(template)(func) return jit_device_template self.jit_device_template = jit_device_template elif value == \"cpu\": def jit_host(template, max_registers):",
"def jit_host(template, max_registers): def jit_host(func): return cuda.jit(template, debug = False, max_registers = max_registers)(func)",
"evolution operator set_to(time_evolution_coarse, time_evolution_old) matrix_multiply(time_evolution_fine, time_evolution_old, time_evolution_coarse) if use_rotating_frame: if dimension == 3:",
"0] + state[time_index, 2])/sqrt2).real spin[time_index, 1] = (2j*conj(state[time_index, 1])*(state[time_index, 0] - state[time_index, 2])/sqrt2).real",
"at. * **time_end_points** (:obj:`numpy.ndarray` of :obj:`numpy.float64` (start/end)) - The time offset that the",
"list of time samples that the time evolution operator is found for. In",
"based on its analytic form. .. warning:: Only available for use with spin",
"value : :obj:`float` The numerical value of the spin quantum number. dimension :",
"sample_index_end = 4 elif integration_method == IntegrationMethod.HALF_STEP: sample_index_max = 3 sample_index_end = 4",
"integration method to use in the integration. Defaults to :obj:`IntegrationMethod.MAGNUS_CF4`. See :obj:`IntegrationMethod` for",
"A coarse grained list of time samples that the time evolution operator is",
"-(y + 1j*x)*s result[1, 1] = c + 1j*z*s else: result[0, 0] =",
"function into a roc device function.\\033[0m\\n\") raise time_evolution_coarse = time_evolution_coarse.copy_to_host() time = time.copy_to_host()",
"= 0 operator[1, 1] = 0 @jit_device def matrix_multiply(left, right, result): result[0, 0]",
"math.tau*time_step_integration*(w1*field_sample[0, 2] + w0*field_sample[1, 2]) if dimension > 2: field_sample[2, 3] = math.tau*time_step_integration*(w1*field_sample[0,",
"0 & -i \\\\\\\\ i & 0 \\\\end{pmatrix},& J_z &= \\\\frac{1}{2}\\\\begin{pmatrix} 1 &",
"operator[1, 2] result[2, 2] = operator[2, 2] @jit_device def set_to_one(operator): operator[0, 0] =",
"= exponentiation_method.index if (exponentiation_method == ExponentiationMethod.ANALYTIC) and (spin_quantum_number != SpinQuantumNumber.HALF): print(\"\\033[31mspinsim warning!!!\\n_attempting to",
"complex128[:, :])\") def append_exponentiation(field_sample, time_evolution_fine, time_evolution_coarse): if device_index == 0: time_evolution_old = np.empty((dimension,",
"max_registers): def jit_host(func): return func return jit_host self.jit_host = jit_host def jit_device(func): return",
"numba.roc could not jit get_field function into a roc device function.\\033[0m\\n\") raise time_evolution_coarse",
"more details. threads_per_block : :obj:`int` The size of each thread block (workgroup), in",
"elif device_index == 1: # temporary = cuda.local.array((2, 2), dtype = np.complex128) #",
"# eq = 1 + 1j*eq result[0, 0] = (Ca/(eq*ez))*(Ca/(eq*ez)) - 1 result[1,",
":class:`numpy.float64` (time_index) A coarse grained list of time samples that the time evolution",
"// self.threads_per_block try: self.get_time_evolution_raw[blocks_per_grid, self.threads_per_block](sweep_parameter, time, time_end_points, time_step_integration, time_step_output, time_evolution_coarse) except: print(\"\\033[31mspinsim error:",
"operator[0, 1]*operator[1, 1] result[1, 1] = operator[1, 0]*operator[0, 1] + (2 + operator[1,",
"field_sample_group = roc.shared.array((threads_per_block, sample_index_max, lie_dimension), dtype = np.float64) field_sample = field_sample_group[roc.get_local_id(1), :, :]",
"c_X s_Y)}{\\\\sqrt{2}} & e^{i\\\\frac{2Q}{3}} c_X c_Y & \\\\frac{e^{-i(Z - \\\\frac{Q}{3})} (-i s_X -",
"for each coarse timestep in parallel time_index = cuda.grid(1) if time_index < time_coarse.size:",
"spin[time_index, 0] = (2*conj(state[time_index, 1])*(state[time_index, 0] + state[time_index, 2])/sqrt2).real spin[time_index, 1] = (2j*conj(state[time_index,",
"= device.index @jit_device def conj(z): return (z.real - 1j*z.imag) @jit_device def complex_abs(z): return",
"4 elif integration_method == IntegrationMethod.MIDPOINT_SAMPLE: sample_index_max = 1 sample_index_end = 1 exponentiation_method_index =",
"2] /= rotating_wave_winding[0] time_evolution_coarse[time_index, 2, 0] *= rotating_wave_winding[0] time_evolution_coarse[time_index, 2, 1] *= rotating_wave_winding[0]",
"float64, complex128[:])\") def get_field_integration_half_step(sweep_parameter, time_fine, time_coarse, time_step_integration, field_sample, rotating_wave, rotating_wave_winding): time_sample = time_fine",
"the experiment is to start at, and the time that the experiment is",
"complex128[:])\") def append_exponentiation_integration_midpoint(time_evolution_fine, time_evolution_coarse, field_sample, time_step_integration, rotating_wave, rotating_wave_winding): transform_frame(field_sample[0, :], rotating_wave, rotating_wave_winding[0]) field_sample[0,",
"0 \\\\end{pmatrix},& J_y &= \\\\frac{1}{\\\\sqrt{2}}\\\\begin{pmatrix} 0 & -i & 0 \\\\\\\\ i &",
"Modifying might be able to increase execution time for different GPU models. max_registers",
"spin[time_index, 2] = 0.5*(state[time_index, 0].real**2 + state[time_index, 0].imag**2 - state[time_index, 1].real**2 - state[time_index,",
"& -2 & 0 \\\\\\\\ 0 & 0 & 1 \\\\end{pmatrix} \\\\end{align*} Then",
"- ix}{r}\\\\sin(\\\\frac{r}{2}) & \\\\cos(\\\\frac{r}{2}) + i\\\\frac{z}{r}\\\\sin(\\\\frac{r}{2}) \\\\end{pmatrix} \\\\end{align*} with :math:`r = \\\\sqrt{x^2 +",
"field_sample[2] = field_sample[2] - rotating_wave transform_frame = transform_frame_spin_one_rotating else: @jit_device_template(\"(float64[:], float64, complex128)\") def",
"device_index > 0: if device_index == 1: time_index = cuda.grid(1) elif device_index ==",
"more details. exponentiation_method : :obj:`ExponentiationMethod` Which method to use for matrix exponentiation in",
"when multiple simulations need to be run. For example, it is used to",
"= math.sqrt(2) sqrt3 = math.sqrt(3) machine_epsilon = np.finfo(np.float64).eps*1000 class Utilities: \"\"\" A on",
"(:obj:`float`) - the time to sample the field at, in units of s.",
":math:`1`. set_to_zero(operator) : :obj:`callable` Make a matrix the additive identity, ie, :math:`0`. ..",
"spin projection operator in the z direction. Returns ------- results : :obj:`Results` An",
"The input to the `get_field` function supplied by the user. Modifies the field",
"self.get_time_evolution_raw(sweep_parameter, time, time_end_points, time_step_integration, time_step_output, time_evolution_coarse) elif self.device == Device.CUDA: time = cuda.device_array(time_index_max,",
"np.float64) blocks_per_grid = (state.shape[0] + (threads_per_block - 1)) // threads_per_block get_spin[blocks_per_grid, threads_per_block](roc.to_device(state), spin)",
"= math.tau*time_step_integration*(w1*field_sample[0, 0] + w0*field_sample[1, 0]) field_sample[2, 1] = math.tau*time_step_integration*(w1*field_sample[0, 1] + w0*field_sample[1,",
":math:`\\\\psi(t)` in :ref:`overview_of_simulation_method`. spin : :class:`numpy.ndarray` of :class:`numpy.float64` (time_index, spatial_index) The expected value",
"Parameters: * **z** (:class:`numpy.complex128`) - The complex number to take the absolute value",
"math:: \\\\begin{align*} \\\\exp(A) &= \\\\exp(-ix J_x - iy J_y - iz J_z)\\\\\\\\ &=",
"(\"cpu\", 0) \"\"\" Use the :func:`numba.jit()` LLVM compiler to compile the integrator to",
"roc.shared.array((threads_per_block, dimension, dimension), dtype = np.complex128) time_evolution_fine = time_evolution_fine_group[roc.get_local_id(1), :, :] field_sample_group =",
"x_index)) - The matrix to copy from. * **result** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index,",
"---------- conj(z) : :obj:`callable` Conjugate of a complex number. .. math:: \\\\begin{align*} (a",
"space the states with this spin belong to. label : :obj:`str` A text",
"2] = 0 operator[2, 2] = 0 @jit_device def matrix_multiply(left, right, result): result[0,",
"analytic form. .. warning:: Only available for use with spin half systems. Will",
":], float64, complex128[:])\") def get_field_integration_midpoint(sweep_parameter, time_fine, time_coarse, time_step_integration, field_sample, rotating_wave, rotating_wave_winding): time_sample =",
"= 0 precision = 4**hyper_cube_amount a = math.sqrt(field_sample[0]*field_sample[0] + field_sample[1]*field_sample[1]) if a >",
"= conj(operator[1, 0]) result[1, 1] = conj(operator[1, 1]) @jit_device def matrix_exponential_analytic(field_sample, result): x",
"(c_Xc_Y + is_Xs_Y) e^{iZ} \\\\end{pmatrix}^{2^\\\\tau}\\\\\\\\ &= T^{2^\\\\tau}, \\\\end{align*} with .. math:: \\\\begin{align*} X",
"chosen. threads_per_block : :obj:`int` The size of each thread block (workgroup), in terms",
"step. See :ref:`architecture` for some information. spin_calculator : :obj:`callable` Calculates the expected spin",
"inner product. * **right** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (index)) - The vector to right",
"directions (to model a magnetic field, for example), and the fourth entry being",
"+ (2 + operator[1, 1])*operator[1, 0] result[0, 1] = (2 + operator[0, 0])*operator[0,",
"result[1, 1] = (cx*cy + 1j*sx*sy)*cisz # if device_index == 0: # temporary",
"field_sample[2, 0] = math.tau*time_step_integration*field_sample[1, 0]/2 field_sample[2, 1] = math.tau*time_step_integration*field_sample[1, 1]/2 field_sample[2, 2] =",
":obj:`float` The integration time step. Measured in s. time_step_output : :obj:`float` The sample",
"conj(operator[1, 0]) result[1, 1] = conj(operator[1, 1]) @jit_device def matrix_exponential_analytic(field_sample, result): x =",
"fewer resgiters being avaliable to each thread, meaning slower memory must be used.",
"norm2(z): return math.sqrt(z[0].real**2 + z[0].imag**2 + z[1].real**2 + z[1].imag**2 + z[2].real**2 + z[2].imag**2)",
"\\\\frac{Q}{3}\\\\right)}(c_X - c_Y + i s_Xs_Y)}{2} \\\\\\\\ \\\\frac{e^{-i\\\\left(Z + \\\\frac{Q}{3}\\\\right)} (-i s_X +",
"float64[:, :], float64, complex128[:])\") def get_field_integration_midpoint(sweep_parameter, time_fine, time_coarse, time_step_integration, field_sample, rotating_wave, rotating_wave_winding): time_sample",
"= roc.shared.array((threads_per_block, sample_index_end), dtype = np.complex128) rotating_wave_winding = rotating_wave_winding_group[roc.get_local_id(1), :] time_coarse[time_index] = time_end_points[0]",
"Parameters: * **left** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (index)) - The vector to left multiply",
"(s_Y -i c_Y s_X)}{\\\\sqrt{2}} & \\\\frac{e^{-i\\\\left(-Z + \\\\frac{Q}{3}\\\\right)}(c_X + c_Y + i s_Xs_Y)}{2}",
"&= \\\\delta_{i, j}\\\\\\\\ &= \\\\begin{cases} 1,&i = j\\\\\\\\ 0,&i\\\\neq j \\\\end{cases} \\\\end{align*} Parameters:",
"- is_Xc_Y) e^{-iZ} & (c_Xc_Y + is_Xs_Y) e^{iZ} \\\\end{pmatrix}^{2^\\\\tau}\\\\\\\\ &= T^{2^\\\\tau}, \\\\end{align*} with",
"simulation. state : :class:`numpy.ndarray` of :class:`numpy.complex128` (time_index, state_index) The state (wavefunction) of the",
"rotating wave approximation in the rotating frame. integration_method : :obj:`IntegrationMethod` Which integration method",
"&= T^{2^\\\\tau}, \\\\end{align*} with .. math:: \\\\begin{align*} X &= 2^{-\\\\tau}x,\\\\\\\\ Y &= 2^{-\\\\tau}y,\\\\\\\\",
"the spin system in the lab frame, for each time sampled. Units of",
"**time_coarse** (:obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index)) - The times that `state` was evaluated at.",
"of the output timeseries for the state. Must be a whole number multiple",
"eigenstates of the spin projection operator in the z direction. Returns ------- spin",
"each time varying `sweep_parameter`. time_start : :obj:`float` The time offset that the experiment",
"- The complex number to take the conjugate of. Returns * **cz** (:class:`numpy.complex128`)",
"1]) result[2, 1] = conj(operator[1, 2]) result[0, 2] = conj(operator[2, 0]) result[1, 2]",
"3), dtype = np.complex128) elif device_index == 2: temporary_group = roc.shared.array((threads_per_block, 3, 3),",
"the field that the spin system is being put under. It must have",
"= field_sample[2]/(2*precision) # cx = math.cos(x) # sx = math.sin(x) # cy =",
"0] = operator[0, 0] result[1, 0] = operator[1, 0] result[2, 0] = operator[2,",
"define the bias field strength in `get_field`, then one can run many simulations,",
"operator (matrix) between the current and next timesteps, for each time sampled. See",
"left[1]*right[0]) @jit_device def inner(left, right): return conj(left[0])*right[0] + conj(left[1])*right[1] + conj(left[2])*right[2] @jit_device def",
"to start at. Measured in s. time_end : :obj:`float` The time that the",
"field_sample_group[roc.get_local_id(1), :, :] rotating_wave_winding_group = roc.shared.array((threads_per_block, sample_index_end), dtype = np.complex128) rotating_wave_winding = rotating_wave_winding_group[roc.get_local_id(1),",
"integration algorithm. In units of s. time_step_output : :obj:`float` The time difference between",
"time series of a quantum state. Parameters ---------- state : :obj:`numpy.ndarray` of :obj:`numpy.complex128`",
"spin half systems:** Assumes the exponent is an imaginary linear combination of a",
"1 operator[2, 1] = 0 operator[0, 2] = 0 operator[1, 2] = 0",
"set_to self.set_to_one = set_to_one self.set_to_zero = set_to_zero self.matrix_multiply = matrix_multiply self.adjoint = adjoint",
":obj:`int` A reference number, used when compiling the integrator, where higher level objects",
"= (state.shape[0] + (threads_per_block - 1)) // threads_per_block get_spin[blocks_per_grid, threads_per_block](roc.to_device(state), spin) spin =",
"the value increases GPU occupancy, meaning more threads run concurrently, at the expense",
":], time_evolution_fine, time_evolution_coarse) get_field_integration = get_field_integration_midpoint append_exponentiation_integration = append_exponentiation_integration_midpoint @jit_device_template(\"(int64, float64[:], float64, float64,",
"Whether or not to use the rotating frame optimisation. Defaults to :obj:`True`. If",
"float64, complex128[:])\") def append_exponentiation_integration_midpoint(time_evolution_fine, time_evolution_coarse, field_sample, time_step_integration, rotating_wave, rotating_wave_winding): transform_frame(field_sample[0, :], rotating_wave, rotating_wave_winding[0])",
"0] = (cx*sy -1j*sx*cy)/cisz # result[0, 1] = -(cx*sy + 1j*sx*cy)*cisz # result[1,",
"= 1 # sa = -1j*a/sqrt2 # ez = field_sample[2]/(2*precision) # ez =",
"more threads run concurrently, at the expense of fewer resgiters being avaliable to",
"a :class:`numpy.ndarray` using :func:`numba.cuda.device_array_like()`. \"\"\" if device_index == 0: for time_index in nb.prange(spin.shape[0]):",
"+= 1 result[1, 1] += 1 # @jit_device # def matrix_exponential_lie_trotter(field_sample, result, trotter_cutoff):",
"# sa = -1j*a/sqrt2 # ez = field_sample[2]/(2*precision) # ez = 1 +",
"threads (workitems) they each contain, when running on the GPU target devices :obj:`Device.CUDA`",
"use :mod:`spinsim` to integrate states in the rotating frame, using the rating wave",
"def matrix_exponential_analytic(field_sample, result, trotter_cutoff): pass @jit_device def matrix_exponential_lie_trotter(field_sample, result, trotter_cutoff): hyper_cube_amount = math.ceil(trotter_cutoff/2)",
"the z direction. This removes the (possibly large) z component of the field,",
"(Bloch vector) over time. \"\"\" def __init__(self, time, time_evolution, state, spin_calculator): \"\"\" Parameters",
"math.sin(a/2) ca = math.cos(a) sa = -1j*math.sin(a)/sqrt2 ez = field_sample[2]/(2*precision) ez = math.cos(ez)",
"cisz*(-sy - 1j*cy*sx)/sqrt2 # result[1, 1] = cisz*cx*cy # result[2, 1] = cisz*(sy",
"GPU running :mod:`spinsim`, balancing more threads vs faster running threads, and changing this",
": :obj:`str` A text label that can be used for archiving. \"\"\" def",
"to run on an Nvidia cuda compatible GPU, in parallel. .. note ::",
":math:`0`. matrix_multiply(left, right, result) : :obj:`callable` Multiply matrices left and right together, to",
"or not to use the rotating frame optimisation. Defaults to :obj:`True`. If set",
"i s_Xs_Y)}{2} & \\\\frac{e^{i\\\\frac{2Q}{3}} (-s_Y -i c_Y s_X)}{\\\\sqrt{2}} & \\\\frac{e^{-i\\\\left(-Z + \\\\frac{Q}{3}\\\\right)}(c_X -",
"of all of the device functions (functions compiled for use on the target",
"of the field, which increases the accuracy of the output since the integrator",
"\\\\frac{e^{-i\\\\left(Z + \\\\frac{Q}{3}\\\\right)} (-i s_X + c_X s_Y)}{\\\\sqrt{2}} & e^{i\\\\frac{2Q}{3}} c_X c_Y &",
"conj(left[0])*right[0] + conj(left[1])*right[1] @jit_device def set_to(operator, result): result[0, 0] = operator[0, 0] result[1,",
"time_step_output*time_index time_fine = time_coarse[time_index] # Initialise time evolution operator to 1 set_to_one(time_evolution_coarse[time_index, :])",
"sa*eq/(ez*ep) result[1, 1] = ca*(eq*eq*eq*eq) - 1 result[2, 1] = sa*eq*ez*ep result[0, 2]",
"sample the field at, in units of s. * **simulation_index** (:obj:`int`) - a",
"for a given time series of a quantum state. Parameters ---------- state :",
"x /= r y /= r z /= r c = math.cos(r/2) s",
"inner product. Returns * **d** (:class:`numpy.complex128`) - The inner product of l and",
"= field_sample[3]/(6*precision) # eq = 1 + 1j*eq result[0, 0] = (Ca/(eq*ez))*(Ca/(eq*ez)) -",
":math:`\\\\mathbb{C}^{2\\\\times2}` or :math:`\\\\mathbb{C}^{3\\\\times3}`. Parameters: * **operator** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The",
"Device.ROC: spin = roc.device_array((state.shape[0], 3), np.float64) blocks_per_grid = (state.shape[0] + (threads_per_block - 1))",
"0]*right[0, 1] + left[1, 1]*right[1, 1] @jit_device def matrix_square_residual(operator, result): result[0, 0] =",
"value : :obj:`str` A text label that can be used for archiving. index",
"conj(z): return (z.real - 1j*z.imag) @jit_device def complex_abs(z): return math.sqrt(z.real**2 + z.imag**2) if",
"_Achieved Occupancy: https://docs.nvidia.com/gameworks/content/developertools/desktop/analysis/report/cudaexperiments/kernellevel/achievedoccupancy.htm Parameters ---------- get_field : :obj:`callable` A python function that describes",
"for compilable numpy features. \"\"\" CUDA = (\"cuda\", 1) \"\"\" Use the :func:`numba.cuda.jit()`",
"for a specific GPU model. Defaults to 63 (optimal for GTX1070, the device",
"0] = 0.5*cisz*(cx + cy - 1j*sx*sy) # result[1, 0] = cisz*(-1j*sx +",
"jit_host def jit_device(func): return roc.jit(device = True)(func) self.jit_device = jit_device def jit_device_template(template): def",
"time_start, time_end, time_step_integration, time_step_output, state_init): \"\"\" Integrates the time dependent Schroedinger equation and",
"operator[1, 1])*operator[1, 1] @jit_device def adjoint(operator, result): result[0, 0] = conj(operator[0, 0]) result[1,",
"\\\\\\\\ 1 & 0 & 1 \\\\\\\\ 0 & 1 & 0 \\\\end{pmatrix},&",
"1] += 1 # @jit_device # def matrix_exponential_lie_trotter(field_sample, result, trotter_cutoff): # hyper_cube_amount =",
": :obj:`float` The time step used within the integration algorithm. In units of",
"right) : :obj:`callable` The inner (maths convention dot) product between two complex vectors.",
"the 3 level atom. Parameters ---------- state_init : :class:`numpy.ndarray` of :class:`numpy.complex128` The state",
"(1, 3, \"one\") \"\"\" For three level systems. \"\"\" class IntegrationMethod(Enum): \"\"\" Options",
"don't compile the integrator. \"\"\" CPU_SINGLE = (\"cpu_single\", 0) \"\"\" Use the :func:`numba.jit()`",
"+ 1j*math.sin(ez) # eq = field_sample[3]/(6*precision) # eq = math.cos(eq) + 1j*math.sin(eq) result[0,",
"in the rotating frame, using the rating wave approximation: just define `get_field()` with",
"that contains definitions of all of the device functions (functions compiled for use",
"+ operator[2, 2])*operator[2, 2] @jit_device def adjoint(operator, result): result[0, 0] = conj(operator[0, 0])",
"0] = operator[1, 0] result[2, 0] = operator[2, 0] result[0, 1] = operator[0,",
"of time samples that the time evolution operator is found for. In units",
"\\\\end{pmatrix},& J_y &= \\\\frac{1}{\\\\sqrt{2}}\\\\begin{pmatrix} 0 & -i & 0 \\\\\\\\ i & 0",
"0] = 0 operator[0, 1] = 0 operator[1, 1] = 0 @jit_device def",
"y /= r z /= r c = math.cos(r/2) s = math.sin(r/2) result[0,",
"elif device_index == 2: time_evolution_fine_group = roc.shared.array((threads_per_block, dimension, dimension), dtype = np.complex128) time_evolution_fine",
"= np.asarray([time_start, time_end], np.float64) state_init = np.asarray(state_init, np.complex128) time_index_max = int((time_end_points[1] - time_end_points[0])/time_step_output)",
":obj:`numpy.complex128` (time_index, magnetic_quantum_number) The evaluated quantum state of the spin system over time,",
"a spin half :obj:`SpinQuantumNumber.HALF`, or spin one :obj:`SpinQuantumNumber.ONE` quantum system. device : :obj:`Device`",
"for different GPU models. \"\"\" jit_device = device.jit_device device_index = device.index @jit_device def",
"field_sample[0] = X.real field_sample[1] = X.imag field_sample[2] = field_sample[2] - rotating_wave transform_frame =",
"GPU must run fewer threads concurrently than it has Cuda cores, meaning some",
"- 1j*x)*s result[0, 1] = -(y + 1j*x)*s result[1, 1] = c +",
"time_index < time_coarse.size: get_time_evolution_loop(time_index, time_coarse, time_step_output, time_step_integration, time_end_points, time_evolution_coarse, sweep_parameter) elif device_index ==",
"note :: The use of a rotating frame is commonly associated with the",
"= np.complex128) field_sample = np.empty((sample_index_max, lie_dimension), dtype = np.float64) rotating_wave_winding = np.empty(sample_index_end, dtype",
"vector) over time. \"\"\" if device.index == 0: spin = np.empty((state.shape[0], 3), np.float64)",
"2]/2 if dimension > 2: field_sample[2, 3] = math.tau*time_step_integration*field_sample[1, 3]/2 append_exponentiation(field_sample[2, :], time_evolution_fine,",
"3]) append_exponentiation(field_sample[2, :], time_evolution_fine, time_evolution_coarse) get_field_integration = get_field_integration_magnus_cf4 append_exponentiation_integration = append_exponentiation_integration_magnus_cf4 elif integration_method",
"to :obj:`IntegrationMethod.MAGNUS_CF4`. See :obj:`IntegrationMethod` for more details. trotter_cutoff : :obj:`int` The number of",
"projection (Bloch vector) over time. This is calculated just in time using the",
"in range(math.floor(time_step_output/time_step_integration + 0.5)): get_field_integration(sweep_parameter, time_fine, time_coarse[time_index], time_step_integration, field_sample, rotating_wave, rotating_wave_winding) append_exponentiation_integration(time_evolution_fine, time_evolution_coarse[time_index,",
"to :math:`1`. set_to_zero(operator) : :obj:`callable` Make a matrix the additive identity, ie, :math:`0`.",
"transform_frame_lab get_field_jit = jit_device(get_field) if integration_method == IntegrationMethod.MAGNUS_CF4: @jit_device_template(\"(float64, float64, float64, float64, float64[:,",
"conj(operator[2, 0]) result[1, 2] = conj(operator[2, 1]) result[2, 2] = conj(operator[2, 2]) @jit_device",
"found for. In units of s. This is an output, so use an",
"1 result[2, 1] = sa*eq*ez*ep result[0, 2] = -((Sa*eq/ep)*(Sa*eq/ep)) result[1, 2] = sa*eq*ez/ep",
"time_sample += time_coarse get_field_jit(time_sample, sweep_parameter, field_sample[0, :]) @jit_device_template(\"(complex128[:, :], complex128[:, :], float64[:, :],",
"could not jit get_field function into a device function.\\033[0m\\n\") raise def compile_time_evolver(self, get_field,",
"operators in succession to find the quantum state timeseries of the 3 level",
"for large :math:`\\\\tau`, .. math:: \\\\begin{align*} \\\\exp(A) &= \\\\exp(-ix J_x - iy J_y",
"time_step_output, time_evolution_coarse): \"\"\" Find the stepwise time evolution opperator. Parameters ---------- sweep_parameter :",
"2] result[1, 2] = left[1, 0]*right[0, 2] + left[1, 1]*right[1, 2] + left[1,",
"to :obj:`True`. If set to :obj:`True`, the integrator moves into a frame rotating",
"the matrix exponential. For spin half :obj:`SpinQuantumNumber.HALF` systems only. \"\"\" LIE_TROTTER = (\"lie_trotter\",",
"vector, with the first three entries being x, y, z spatial directions (to",
"* **time_step_output** (:obj:`float`) - The sample resolution of the output timeseries for the",
"one :obj:`SpinQuantumNumber.ONE` quantum system. device : :obj:`Device` The option to select which device",
"error: numba could not jit get_field function into a device function.\\033[0m\\n\") raise def",
"J_x &= \\\\frac{1}{\\\\sqrt{2}}\\\\begin{pmatrix} 0 & 1 & 0 \\\\\\\\ 1 & 0 &",
"math:: \\\\begin{align*} (a + ib)^* &= a - ib\\\\\\\\ a, b &\\\\in \\\\mathbb{R}",
"compile_time_evolver(self, get_field, spin_quantum_number, device, use_rotating_frame = True, integration_method = IntegrationMethod.MAGNUS_CF4, exponentiation_method = None,",
"---------- state : :class:`numpy.ndarray` of :class:`numpy.complex128` (time_index, state_index) The state (wavefunction) of the",
"return nb.njit()(func) self.jit_device = jit_device def jit_device_template(template): def jit_device_template(func): return nb.njit(template)(func) return jit_device_template",
"0 operator[2, 1] = 0 operator[0, 2] = 0 operator[1, 2] = 0",
"matrix to set to :math:`1`. set_to_zero(operator) : :obj:`callable` Make a matrix the additive",
"= IntegrationMethod.MAGNUS_CF4, trotter_cutoff = 32, threads_per_block = 64, max_registers = 63): \"\"\" ..",
"jit_host(func): return roc.jit(template)(func) return jit_host self.jit_host = jit_host def jit_device(func): return roc.jit(device =",
"-1 \\\\end{pmatrix},& J_q &= \\\\frac{1}{3}\\\\begin{pmatrix} 1 & 0 & 0 \\\\\\\\ 0 &",
"times, each time varying `sweep_parameter`. * **time_coarse** (:obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index)) - The",
"J_x - iy J_y - iz J_z))^{2^\\\\tau}\\\\\\\\ &\\\\approx (\\\\exp(-i(2^{-\\\\tau} x) J_x) \\\\exp(-i(2^{-\\\\tau} y)",
"right, result): result[0, 0] = left[0, 0]*right[0, 0] + left[0, 1]*right[1, 0] result[1,",
"s_Y)}{\\\\sqrt{2}} & e^{i\\\\frac{2Q}{3}} c_X c_Y & \\\\frac{e^{-i(Z - \\\\frac{Q}{3})} (-i s_X - c_X",
"**result** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - A matrix to be filled with",
"A &= -i(x J_x + y J_y + z J_z), \\\\end{align*} with ..",
"+ sqrt3)/6 w1 = (1.5 - sqrt3)/6 field_sample[2, 0] = math.tau*time_step_integration*(w0*field_sample[0, 0] +",
"time_evolution_coarse[time_index, 1, 0] *= rotating_wave_winding[0] time_evolution_coarse[time_index, 1, 1] *= rotating_wave_winding[0] @jit_host(\"(float64, float64[:], float64[:],",
"power_index in range(hyper_cube_amount): # matrix_multiply(result, result, temporary) # matrix_multiply(temporary, temporary, result) self.conj =",
"+ operator[2, 1]*operator[1, 1] + (2 + operator[2, 2])*operator[2, 1] result[0, 2] =",
"+ ib)^* &= a - ib\\\\\\\\ a, b &\\\\in \\\\mathbb{R} \\\\end{align*} Parameters: *",
"level systems. \"\"\" ONE = (1, 3, \"one\") \"\"\" For three level systems.",
"`spin` property is needed. Compiled for chosen device on object constrution. Parameters: *",
"0 operator[1, 1] = 1 @jit_device def set_to_zero(operator): operator[0, 0] = 0 operator[1,",
"jit_device def jit_device_template(template): def jit_device_template(func): return nb.njit(template)(func) return jit_device_template self.jit_device_template = jit_device_template elif",
"+ operator[2, 1]*operator[1, 0] + (2 + operator[2, 2])*operator[2, 0] result[0, 1] =",
"lab frame, for each time sampled. Units of :math:`\\\\hbar`. This is an output,",
"operator[0, 2]*operator[2, 0] result[1, 0] = operator[1, 0]*operator[0, 0] + (2 + operator[1,",
"\"\"\" def __init__(self, spin_quantum_number, device, threads_per_block): \"\"\" Parameters ---------- spin_quantum_number : :obj:`SpinQuantumNumber` The",
"to copy from. * **result** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The matrix",
"import roc import math sqrt2 = math.sqrt(2) sqrt3 = math.sqrt(3) class SpinQuantumNumber(Enum): \"\"\"",
"return cuda.jit(device = True, inline = True)(func) self.jit_device = jit_device def jit_device_template(template): def",
".. math:: \\\\begin{align*} (A)_{i, j} &= \\\\delta_{i, j}\\\\\\\\ &= \\\\begin{cases} 1,&i = j\\\\\\\\",
"float64, float64, float64, float64[:, :], float64, complex128[:])\") def get_field_integration_half_step(sweep_parameter, time_fine, time_coarse, time_step_integration, field_sample,",
"the output since the integrator will on average take smaller steps. .. note",
"evolution * previous state for x_index in nb.prange(state.shape[1]): state[time_index, x_index] = 0 if",
"parallel. .. warning :: Work in progress, not currently functional! \"\"\" class Results:",
"warning: time_step_output not an integer multiple of time_step_integration. Resetting time_step_integration to {time_step_output/round(time_step_output/time_step_integration):8.4e}.\\033[0m\\n\") time_step_integration",
"+ \\\\frac{Q}{3}\\\\right)}(c_X - c_Y + i s_Xs_Y)}{2} \\\\\\\\ \\\\frac{e^{-i\\\\left(Z + \\\\frac{Q}{3}\\\\right)} (-i s_X",
"if time_index < time_coarse.size: get_time_evolution_loop(time_index, time_coarse, time_step_output, time_step_integration, time_end_points, time_evolution_coarse, sweep_parameter) elif device_index",
"GPU, in parallel. .. warning :: Work in progress, not currently functional! \"\"\"",
".. math:: (A)_{i, j} = (B)_{i, j} Parameters: * **operator** (:class:`numpy.ndarray` of :class:`numpy.complex128`,",
"being the amplitude of the quadratic shift (only appearing, and required, in spin",
"= jit_device def jit_device_template(template): def jit_device_template(func): return nb.njit(template)(func) return jit_device_template self.jit_device_template = jit_device_template",
"0] = conj(operator[0, 1]) result[0, 1] = conj(operator[1, 0]) result[1, 1] = conj(operator[1,",
"+ left[0, 2]*right[2, 1] result[1, 1] = left[1, 0]*right[0, 1] + left[1, 1]*right[1,",
"1]*right[1, 2] + left[1, 2]*right[2, 2] result[2, 2] = left[2, 0]*right[0, 2] +",
"a = a/precision Ca = math.cos(a/2) Sa = -1j*math.sin(a/2) ez = field_sample[2]/(2*precision) ez",
"(time_index, state_index) The state (wavefunction) of the spin system in the lab frame,",
"in progress, not currently functional! \"\"\" class Results: \"\"\" The results of a",
"(L)_{i,j} (R)_{j,k} \\\\end{align*} Parameters: * **left** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The",
"operator[0, 0] = 0 operator[1, 0] = 0 operator[0, 1] = 0 operator[1,",
"1 \\\\\\\\ 1 & 0 \\\\end{pmatrix},& J_y &= \\\\frac{1}{2}\\\\begin{pmatrix} 0 & -i \\\\\\\\",
"return conj(left[0])*right[0] + conj(left[1])*right[1] @jit_device def set_to(operator, result): result[0, 0] = operator[0, 0]",
"time varying `sweep_parameter`. time_start : :obj:`float` The time offset that the experiment is",
"\\\\begin{cases} 1,&i = j\\\\\\\\ 0,&i\\\\neq j \\\\end{cases} \\\\end{align*} Parameters: * **operator** (:class:`numpy.ndarray` of",
"of :class:`numpy.complex128` (time_index, bra_state_index, ket_state_index) Time evolution operator (matrix) between the current and",
"the exponentiation is to be written to. * **trotter_cutoff** (:obj:`int`) - The number",
"time_end_points : :class:`numpy.ndarray` of :class:`numpy.float64` (start time (0) or end time (1)) The",
"0: hyper_cube_amount = 0 precision = 4**hyper_cube_amount a = math.sqrt(field_sample[0]*field_sample[0] + field_sample[1]*field_sample[1]) if",
"- The absolute value of z. norm2(z) : :obj:`callable` The 2 norm of",
"y**2 + z**2) if r > 0: x /= r y /= r",
"device, and can be modified to increase the execution speed for a specific",
"3), dtype = np.complex128) # elif device_index == 1: # temporary = cuda.local.array((3,",
"J_y - iz J_z - iq J_q)\\\\\\\\ &= \\\\exp(2^{-\\\\tau}(-ix J_x - iy J_y",
"dtype = np.complex128) # temporary = temporary_group[roc.get_local_id(1), :, :] # for power_index in",
"state : :obj:`numpy.ndarray` of :obj:`numpy.complex128` (time_index, magnetic_quantum_number) The evaluated quantum state of the",
"iz J_z)\\\\\\\\ &= \\\\exp(2^{-\\\\tau}(-ix J_x - iy J_y - iz J_z))^{2^\\\\tau}\\\\\\\\ &\\\\approx (\\\\exp(-i(2^{-\\\\tau}",
"return jit_device_template self.jit_device_template = jit_device_template elif value == \"roc\": def jit_host(template, max_registers): def",
"for different GPU models. max_registers : :obj:`int` The maximum number of registers allocated",
"= time_evolution_fine_group[roc.get_local_id(1), :, :] field_sample_group = roc.shared.array((threads_per_block, sample_index_max, lie_dimension), dtype = np.float64) field_sample",
"above). \"\"\" def __init__(self, spin_quantum_number, device, threads_per_block): \"\"\" Parameters ---------- spin_quantum_number : :obj:`SpinQuantumNumber`",
"roc.get_global_id(1) if time_index < spin.shape[0]: if dimension == 2: spin[time_index, 0] = (state[time_index,",
"frame is commonly associated with the use of a rotating wave approximation, a",
"large) z component of the field, which increases the accuracy of the output",
"for a given time series of a quantum state. Used to calculate `spin`",
"the simulations that `spinsim` was designed for. * **field_sample** (:class:`numpy.ndarray` of :class:`numpy.float64` (spatial_index))",
"return cuda.jit(template, debug = False, max_registers = max_registers)(func) return jit_host self.jit_host = jit_host",
"of s. This is an output, so use an empty :class:`numpy.ndarray` with :func:`numpy.empty()`,",
"projection operator in the z direction. Returns ------- spin : :obj:`numpy.ndarray` of :obj:`numpy.float64`",
"0 \\\\\\\\ 0 & 0 & 0 \\\\\\\\ 0 & 0 & -1",
"(Bloch vector) over time. \"\"\" self.time = time self.time_evolution = time_evolution self.state =",
"spin projection (Bloch vector) over time. \"\"\" def __init__(self, time, time_evolution, state, spin_calculator):",
":obj:`bool` Whether or not to use the rotating frame optimisation. Defaults to :obj:`True`.",
"compiled for a CPU or GPU. Defaults to :obj:`Device.CUDA` if the system it",
"complex128[:])\") def append_exponentiation_integration_half_step(time_evolution_fine, time_evolution_coarse, field_sample, time_step_integration, rotating_wave, rotating_wave_winding): transform_frame(field_sample[0, :], rotating_wave, rotating_wave_winding[0]) transform_frame(field_sample[1,",
"super().__init__() self._value_ = value self.dimension = dimension self.label = label HALF = (1/2,",
"Use the :func:`numba.jit()` LLVM compiler to compile the integrator to run on a",
"the hilbert space the states with this spin belong to. label : :obj:`str`",
"z_index in range(state.shape[1]): state[time_index, x_index] += time_evolution[time_index - 1, x_index, z_index]*state[time_index - 1,",
"into a cuda device function.\\033[0m\\n\") raise time_evolution_coarse = time_evolution_coarse.copy_to_host() time = time.copy_to_host() elif",
"time series of a quantum state. Used to calculate `spin` the first time",
"1] = operator[1, 1] result[2, 1] = operator[2, 1] result[0, 2] = operator[0,",
"cuda.local.array((dimension, dimension), dtype = np.complex128) elif device_index == 2: time_evolution_old_group = roc.shared.array((threads_per_block, dimension,",
"def jit_device_template(template): def jit_device_template(func): return func return jit_device_template self.jit_device_template = jit_device_template elif value",
"the state. Must be a whole number multiple of `time_step_integration`. Measured in s.",
"math.cos(x) # sx = math.sin(x) # cy = math.cos(y) # sy = math.sin(y)",
"The duration of the experiment is `time_end - time_start`. time_step_integration : :obj:`float` The",
"0 & i & 0 \\\\end{pmatrix},\\\\\\\\ J_z &= \\\\begin{pmatrix} 1 & 0 &",
"CPU or GPU. Defaults to :obj:`Device.CUDA` if the system it is being run",
"details. threads_per_block : :obj:`int` The size of each thread block (workgroup), in terms",
":obj:`callable` Copy the contents of one matrix into another. .. math:: (A)_{i, j}",
"supplied by the user. Modifies the field function so the integrator can be",
"need for slow recompilation. For example, if the `sweep_parameter` is used to define",
"is said to have less occupancy. Lowering the value increases GPU occupancy, meaning",
"\\\\end{pmatrix} \\\\end{align*} Then the exponential can be approximated as, for large :math:`\\\\tau`, ..",
"max_registers = max_registers)(func) return jit_host self.jit_host = jit_host def jit_device(func): return cuda.jit(device =",
"a :class:`numpy.ndarray` using :func:`numba.cuda.device_array_like()`. \"\"\" if device_index == 0: for time_index in nb.prange(time_coarse.size):",
"r &\\\\equiv \\\\langle l, r \\\\rangle\\\\\\\\ l \\\\cdot r &= \\\\sum_i (l_i)^* r_i",
"if device_index == 0: time_evolution_fine = np.empty((dimension, dimension), dtype = np.complex128) field_sample =",
":obj:`numpy.complex128` (time_index, magnetic_quantum_number) The quantum state of the spin system over time, written",
"the approximate matrix (:math:`\\\\tau` above). \"\"\" def __init__(self, spin_quantum_number, device, threads_per_block): \"\"\" Parameters",
"x_index)) - The evaluated time evolution operator between each time step. See :ref:`architecture`",
"0 result[1, 1] = 1 @jit_device def matrix_exponential_lie_trotter(field_sample, result, trotter_cutoff): hyper_cube_amount = math.ceil(trotter_cutoff/2)",
"device_index = device.index dimension = spin_quantum_number.dimension lie_dimension = dimension + 1 # utility_set",
"The absolute value of a complex number. .. math:: \\\\begin{align*} |a + ib|",
"2] @jit_device def adjoint(operator, result): result[0, 0] = conj(operator[0, 0]) result[1, 0] =",
"two complex vectors. .. note:: The mathematics definition is used here rather than",
"state in given out of the rotating frame. One can, of course, use",
"float64[:, :], float64, complex128[:])\") def get_field_integration_magnus_cf4(sweep_parameter, time_fine, time_coarse, time_step_integration, field_sample, rotating_wave, rotating_wave_winding): time_sample",
"- state[time_index, 2].imag**2 elif device_index > 0: if device_index == 1: time_index =",
"each time step. See :ref:`architecture` for some information. \"\"\" for time_index in range(state.shape[0]):",
"J_z + (2^{-\\\\tau} q) J_q)))^{2^\\\\tau}\\\\\\\\ &= \\\\begin{pmatrix} \\\\frac{e^{-i\\\\left(Z + \\\\frac{Q}{3}\\\\right)}(c_X + c_Y -",
"thread block (workgroup), in terms of the number of threads (workitems) they each",
"0] = left[0, 0]*right[0, 0] + left[0, 1]*right[1, 0] result[1, 0] = left[1,",
"= 0 # precision = 4**hyper_cube_amount # x = field_sample[0]/(2*precision) # y =",
"\\\\langle F\\\\rangle(t) = \\\\begin{pmatrix} \\\\Re(\\\\sqrt{2}\\\\psi_{0}(t)^*(\\\\psi_{+1}(t) + \\\\psi_{-1}(t))\\\\\\\\ -\\\\Im(\\\\sqrt{2}\\\\psi_{0}(t)^*(\\\\psi_{+1}(t) - \\\\psi_{-1}(t))\\\\\\\\ |\\\\psi_{+1}(t)|^2 - |\\\\psi_{-1}(t)|^2",
"evaluated at. time_evolution : :obj:`numpy.ndarray` of :obj:`numpy.float128` (time_index, y_index, x_index) The evaluated time",
"math.tau*time_step_integration*field_sample[0, 1] field_sample[0, 2] = math.tau*time_step_integration*field_sample[0, 2] if dimension > 2: field_sample[0, 3]",
"The matrix to set to :math:`0`. matrix_multiply(left, right, result) : :obj:`callable` Multiply matrices",
"+ z J_z + q J_q), \\\\end{align*} with .. math:: \\\\begin{align*} J_x &=",
"always added to the number specified for control, so really this number is",
"exponentiation_method = ExponentiationMethod.LIE_TROTTER elif spin_quantum_number == SpinQuantumNumber.HALF: exponentiation_method = ExponentiationMethod.ANALYTIC if integration_method ==",
"Defaults to 64. Modifying might be able to increase execution time for different",
"form. .. warning:: Only available for use with spin half systems. Will not",
"field_sample[2]/precision # q = field_sample[3]/precision # cx = math.cos(x) # sx = math.sin(x)",
"eq = math.cos(eq) + 1j*math.sin(eq) # Ca = 1 # Sa = a/2",
":obj:`numpy.float64` (time_index, spatial_direction)) - The expected spin projection (Bloch vector) over time. \"\"\"",
"float64[:], complex128[:, :, :], float64)\") def get_time_evolution_loop(time_index, time_coarse, time_step_output, time_step_integration, time_end_points, time_evolution_coarse, sweep_parameter):",
"= conj(operator[0, 2]) result[0, 1] = conj(operator[1, 0]) result[1, 1] = conj(operator[1, 1])",
"= 0 # precision = 4**hyper_cube_amount # x = field_sample[0]/precision # y =",
"= -(y + 1j*x)*s result[1, 1] = c + 1j*z*s else: result[0, 0]",
"spin projection (Bloch vector) over time. \"\"\" def __init__(self, get_field, spin_quantum_number, device =",
"2^{-\\\\tau}x,\\\\\\\\ Y &= 2^{-\\\\tau}y,\\\\\\\\ Z &= 2^{-\\\\tau}z,\\\\\\\\ Q &= 2^{-\\\\tau}q,\\\\\\\\ c_{\\\\theta} &= \\\\cos(\\\\theta),\\\\\\\\",
"= conj(left[0]*right[1] - left[1]*right[0]) @jit_device def inner(left, right): return conj(left[0])*right[0] + conj(left[1])*right[1] +",
"function into a cuda device function.\\033[0m\\n\") raise time_evolution_coarse = time_evolution_coarse.copy_to_host() time = time.copy_to_host()",
"time sampled. Units of :math:`\\\\hbar`. This is an output, so use an empty",
"(:class:`numpy.float64`) - The absolute value of z. norm2(z) : :obj:`callable` The 2 norm",
"and z (and q for spin one) respectively, as described above. * **result**",
"field_sample, time_step_integration, rotating_wave, rotating_wave_winding) time_fine += time_step_integration if use_rotating_frame: # Take out of",
"x_index) The evaluated time evolution operator between each time step. See :ref:`architecture` for",
"*= rotating_wave_winding[0] time_evolution_coarse[time_index, 2, 1] *= rotating_wave_winding[0] time_evolution_coarse[time_index, 2, 2] *= rotating_wave_winding[0] else:",
"elif device_index == 1: temporary = cuda.local.array((2, 2), dtype = np.complex128) elif device_index",
"- The times that `state` was evaluated at. * **time_end_points** (:obj:`numpy.ndarray` of :obj:`numpy.float64`",
"\\\\end{align*} Parameters: * **operator** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The matrix to",
"raise time_evolution_coarse = time_evolution_coarse.copy_to_host() time = time.copy_to_host() state = np.empty((time_index_max, self.spin_quantum_number.dimension), np.complex128) self.get_state(state_init,",
"the outputs `time_coarse` and `time_evolution_coarse`. time_evolution_coarse : :class:`numpy.ndarray` of :class:`numpy.complex128` (time_index, bra_state_index, ket_state_index)",
"result) # matrix_multiply(result, result, temporary) # matrix_multiply(temporary, temporary, result) result[0, 0] += 1",
"> 2: time_evolution_coarse[time_index, 0, 2] /= rotating_wave_winding[0] time_evolution_coarse[time_index, 2, 0] *= rotating_wave_winding[0] time_evolution_coarse[time_index,",
"elif device_index > 0: if device_index == 1: time_index = cuda.grid(1) elif device_index",
"order Magnus based integrator. \"\"\" MIDPOINT_SAMPLE = \"midpoint_sample\" \"\"\" Euler integration method. \"\"\"",
"operator[0, 1] result[1, 1] = operator[1, 1] result[2, 1] = operator[2, 1] result[0,",
"this method multiple times, each time varying `sweep_parameter`. time_start : :obj:`float` The time",
"= cuda.local.array((3, 3), dtype = np.complex128) # elif device_index == 2: # temporary_group",
":], rotating_wave, rotating_wave_winding[1]) w0 = (1.5 + sqrt3)/6 w1 = (1.5 - sqrt3)/6",
"2: time_evolution_fine_group = roc.shared.array((threads_per_block, dimension, dimension), dtype = np.complex128) time_evolution_fine = time_evolution_fine_group[roc.get_local_id(1), :,",
"**z** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (index)) - The vector to take the 2 norm",
"z[2].imag**2) @jit_device def cross(left, right, result): result[0] = conj(left[1]*right[2] - left[2]*right[1]) result[1] =",
"defaults to :obj:`Device.CPU` otherwise. See :obj:`Device` for all options and more details. exponentiation_method",
"elif device_index == 2: time_evolution_old_group = roc.shared.array((threads_per_block, dimension, dimension), dtype = np.complex128) time_evolution_old",
"Calculates a :math:`\\\\mathfrak{su}(2)` matrix exponential based on its analytic form. .. warning:: Only",
"spin_quantum_number == SpinQuantumNumber.HALF: @jit_device def norm2(z): return math.sqrt(z[0].real**2 + z[0].imag**2 + z[1].real**2 +",
"= Results(time, time_evolution_coarse, state, self.spin_calculator) return results @staticmethod @nb.njit def get_state(state_init, state, time_evolution):",
"of :obj:`numpy.complex128` (time_index, magnetic_quantum_number) The evaluated quantum state of the spin system over",
"roc import math sqrt2 = math.sqrt(2) sqrt3 = math.sqrt(3) class SpinQuantumNumber(Enum): \"\"\" Options",
"*= rotating_wave_winding[0] else: time_evolution_coarse[time_index, 1, 0] *= rotating_wave_winding[0] time_evolution_coarse[time_index, 1, 1] *= rotating_wave_winding[0]",
"in given out of the rotating frame. One can, of course, use :mod:`spinsim`",
"&= \\\\sin(\\\\theta). \\\\end{align*} Once :math:`T` is calculated, it is then recursively squared :math:`\\\\tau`",
"+ operator[0, 2]*operator[2, 2] result[1, 2] = operator[1, 0]*operator[0, 2] + (2 +",
"respectively, as described above. * **result** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The",
"1]*right[1, 0] + left[2, 2]*right[2, 0] result[0, 1] = left[0, 0]*right[0, 1] +",
"= matrix_multiply self.adjoint = adjoint self.matrix_exponential_analytic = matrix_exponential_analytic self.matrix_exponential_lie_trotter = matrix_exponential_lie_trotter self.matrix_square_residual =",
"time = cuda.device_array(time_index_max, np.float64) time_evolution_coarse = cuda.device_array((time_index_max, self.spin_quantum_number.dimension, self.spin_quantum_number.dimension), np.complex128) blocks_per_grid = (time.size",
"vector to take the 2 norm of. Returns * **nz** (:class:`numpy.float64`) - The",
"# cy = math.cos(y) # sy = math.sin(y) # cisz = math.cos(z) +",
"time_fine = time_coarse[time_index] # Initialise time evolution operator to 1 set_to_one(time_evolution_coarse[time_index, :]) field_sample[0,",
"quantum number. dimension : :obj:`int` Dimension of the hilbert space the states with",
"1] result[0, 2] = (2 + operator[0, 0])*operator[0, 2] + operator[0, 1]*operator[1, 2]",
"return roc.jit(template)(func) return jit_host self.jit_host = jit_host def jit_device(func): return roc.jit(device = True)(func)",
"elif value == \"roc\": def jit_host(template, max_registers): def jit_host(func): return roc.jit(template)(func) return jit_host",
"label that can be used for archiving. \"\"\" MAGNUS_CF4 = \"magnus_cf4\" \"\"\" Commutator",
"Then the exponential can be approximated as, for large :math:`\\\\tau`, .. math:: \\\\begin{align*}",
"0] += 1 result[1, 1] += 1 result[2, 2] += 1 # @jit_device",
"+ 1j*sx*sy)*cisz # if device_index == 0: # temporary = np.empty((2, 2), dtype",
"import Enum import numpy as np import numba as nb from numba import",
"1 # Sa = a/2 # ca = 1 # sa = -1j*a/sqrt2",
"**operator** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The operator to take the adjoint",
"63): \"\"\" Compiles the integrator and spin calculation functions of the simulator. Parameters",
"1j*math.sin(math.tau*rotating_wave*time_sample) time_sample += time_coarse get_field_jit(time_sample, sweep_parameter, field_sample[0, :]) time_sample = time_fine + time_step_integration",
"a, b &\\\\in \\\\mathbb{R} \\\\end{align*} Parameters: * **z** (:class:`numpy.complex128`) - The complex number",
"0] field_sample[0, 1] = math.tau*time_step_integration*field_sample[0, 1] field_sample[0, 2] = math.tau*time_step_integration*field_sample[0, 2] if dimension",
"features: http://numba.pydata.org/numba-doc/latest/cuda/cudapysupported.html \"\"\" def __init__(self, value, index): super().__init__() self._value_ = value self.index =",
"math.sqrt(z[0].real**2 + z[0].imag**2 + z[1].real**2 + z[1].imag**2) @jit_device def inner(left, right): return conj(left[0])*right[0]",
"offset that the experiment is to start at, and the time that the",
"the eigenstates of the spin projection operator in the z direction. Returns: *",
"An object containing the results of the simulation. \"\"\" if math.fabs(time_step_output/time_step_integration - round(time_step_output/time_step_integration))",
"(y_index, x_index)) - The values of x, y and z (and q for",
"ExponentiationMethod.LIE_TROTTER elif spin_quantum_number == SpinQuantumNumber.HALF: exponentiation_method = ExponentiationMethod.ANALYTIC if integration_method == IntegrationMethod.MAGNUS_CF4: sample_index_max",
"- 1j*sx*sy)/cisz # result[1, 0] = (cx*sy -1j*sx*cy)/cisz # result[0, 1] = -(cx*sy",
"- 1 if device_index == 0: temporary = np.empty((3, 3), dtype = np.complex128)",
"(fast memory) to each thread, out of a maximum number for the whole",
"+ z[1].real**2 + z[1].imag**2 + z[2].real**2 + z[2].imag**2) @jit_device def cross(left, right, result):",
"left[2, 2]*right[2, 2] @jit_device def matrix_square_residual(operator, result): result[0, 0] = (2 + operator[0,",
"of :class:`numpy.float64` (spatial_index)) the returned value of the field. This is a four",
"\\\\frac{y - ix}{r}\\\\sin(\\\\frac{r}{2}) & \\\\cos(\\\\frac{r}{2}) + i\\\\frac{z}{r}\\\\sin(\\\\frac{r}{2}) \\\\end{pmatrix} \\\\end{align*} with :math:`r = \\\\sqrt{x^2",
"(state[time_index, 0]*conj(state[time_index, 1])).real spin[time_index, 1] = (1j*state[time_index, 0]*conj(state[time_index, 1])).real spin[time_index, 2] = 0.5*(state[time_index,",
"- 1 result[2, 1] = sa*eq*ez*ep result[0, 2] = -((Sa*eq/ep)*(Sa*eq/ep)) result[1, 2] =",
"(\"cuda\", 1) \"\"\" Use the :func:`numba.cuda.jit()` LLVM compiler to compile the integrator to",
"= utilities.adjoint matrix_exponential_analytic = utilities.matrix_exponential_analytic matrix_exponential_lie_trotter = utilities.matrix_exponential_lie_trotter jit_host = device.jit_host jit_device =",
"self.device = device self.get_time_evolution_raw = None self.get_spin_raw = None try: self.compile_time_evolver(get_field, spin_quantum_number, device,",
"self.device.index == 0: time = np.empty(time_index_max, np.float64) time_evolution_coarse = np.empty((time_index_max, self.spin_quantum_number.dimension, self.spin_quantum_number.dimension), np.complex128)",
"= 63): \"\"\" .. _Achieved Occupancy: https://docs.nvidia.com/gameworks/content/developertools/desktop/analysis/report/cudaexperiments/kernellevel/achievedoccupancy.htm Parameters ---------- get_field : :obj:`callable` A",
"+ cx*sy)/sqrt2 # result[2, 0] = 0.5*cisz*(cx - cy - 1j*sx*sy) # cisz",
"the lab frame, for each time sampled. time_evolution : :class:`numpy.ndarray` of :class:`numpy.complex128` (time_index,",
"set to :obj:`True` - no such approximations are made, and the output state",
"duration of the experiment is `time_end - time_start`. time_step_integration : :obj:`float` The integration",
"in s. time_step_output : :obj:`float` The sample resolution of the output timeseries for",
"c_X s_Y)}{\\\\sqrt{2}} \\\\\\\\ \\\\frac{e^{-i\\\\left(Z + \\\\frac{Q}{3}\\\\right)}(c_X - c_Y - i s_Xs_Y)}{2} & \\\\frac{e^{i\\\\frac{2Q}{3}}",
"`Supported Python features`_ for compilable python features, and `Supported Numpy features`_ for compilable",
"z_index]*state[time_index - 1, z_index] else: state[time_index, x_index] += state_init[x_index] sqrt2 = math.sqrt(2) sqrt3",
"2]) if dimension > 2: field_sample[2, 3] = math.tau*time_step_integration*(w0*field_sample[0, 3] + w1*field_sample[1, 3])",
"when the experiment is to start and finishes. In units of s. time_step_integration",
"features`_ for compilable python features. \"\"\" ROC = (\"roc\", 2) \"\"\" Use the",
"def norm2(z): return math.sqrt(z[0].real**2 + z[0].imag**2 + z[1].real**2 + z[1].imag**2 + z[2].real**2 +",
"print(\"\\033[31mspinsim error: numba.roc could not jit get_field function into a roc device function.\\033[0m\\n\")",
"append_exponentiation(field_sample[2, :], time_evolution_fine, time_evolution_coarse) field_sample[2, 0] = math.tau*time_step_integration*(w1*field_sample[0, 0] + w0*field_sample[1, 0]) field_sample[2,",
"float64, float64, float64, float64[:, :], float64, complex128[:])\") def get_field_integration_magnus_cf4(sweep_parameter, time_fine, time_coarse, time_step_integration, field_sample,",
"rotating frame. integration_method : :obj:`IntegrationMethod` Which integration method to use in the integration.",
"systems only. \"\"\" LIE_TROTTER = (\"lie_trotter\", 1) \"\"\" Approximation using the Lie Trotter",
": :obj:`SpinQuantumNumber` The option to select whether the simulator will integrate a spin",
"get_time_evolution self.spin_calculator = spin_calculator def evaluate(self, sweep_parameter, time_start, time_end, time_step_integration, time_step_output, state_init): \"\"\"",
".. note :: To use this device option, the user defined field function",
"== 1: time_evolution_fine = cuda.local.array((dimension, dimension), dtype = np.complex128) field_sample = cuda.local.array((sample_index_max, lie_dimension),",
"**time_sample** (:obj:`float`) - the time to sample the field at, in units of",
"spin[time_index, 1] = (1j*state[time_index, 0]*conj(state[time_index, 1])).real spin[time_index, 2] = 0.5*(state[time_index, 0].real**2 + state[time_index,",
"0] = operator[2, 0]*operator[0, 0] + operator[2, 1]*operator[1, 0] + (2 + operator[2,",
"left[0, 0]*right[0, 0] + left[0, 1]*right[1, 0] result[1, 0] = left[1, 0]*right[0, 0]",
": :obj:`callable` Conjugate of a complex number. .. math:: \\\\begin{align*} (a + ib)^*",
"field_sample[1, :]) @jit_device_template(\"(complex128[:, :], complex128[:, :], float64[:, :], float64, float64, complex128[:])\") def append_exponentiation_integration_half_step(time_evolution_fine,",
"time_end_points, time_step_integration, time_step_output, time_evolution_coarse) except: print(\"\\033[31mspinsim error: numba.cuda could not jit get_field function",
"== 2: temporary_group = roc.shared.array((threads_per_block, 3, 3), dtype = np.complex128) temporary = temporary_group[roc.get_local_id(1),",
"in the lab frame, for each time sampled. See :math:`\\\\psi(t)` in :ref:`overview_of_simulation_method`. spin",
"system is being put under. It must have three arguments: * **time_sample** (:obj:`float`)",
"s. This is an output, so use an empty :class:`numpy.ndarray` with :func:`numpy.empty()`, or",
"Sa = a/2 # ca = 1 # sa = -1j*a/sqrt2 # ez",
"method. \"\"\" class ExponentiationMethod(Enum): \"\"\" The implementation to use for matrix exponentiation within",
"Must be a whole number multiple of `time_step_integration`. Measured in s. * **time_evolution_coarse**",
"print(\"\\033[31mspinsim error: numba.cuda could not jit get_field function into a cuda device function.\\033[0m\\n\")",
"spin system in the lab frame, for each time sampled. Units of :math:`\\\\hbar`.",
"hyperfine spin of the spin system in the lab frame, for each time",
"states in the rotating frame, using the rating wave approximation: just define `get_field()`",
":], float64[:, :], float64, float64, complex128[:])\") def append_exponentiation_integration_magnus_cf4(time_evolution_fine, time_evolution_coarse, field_sample, time_step_integration, rotating_wave, rotating_wave_winding):",
"2] result[2, 2] = operator[2, 2] @jit_device def set_to_one(operator): operator[0, 0] = 1",
"compile the integrator. \"\"\" CPU_SINGLE = (\"cpu_single\", 0) \"\"\" Use the :func:`numba.jit()` LLVM",
"be approximated as, for large :math:`\\\\tau`, .. math:: \\\\begin{align*} \\\\exp(A) &= \\\\exp(-ix J_x",
":obj:`callable` Make a matrix the multiplicative identity, ie, :math:`1`. .. math:: \\\\begin{align*} (A)_{i,",
"def jit_host(func): return nb.njit(template)(func) return jit_host self.jit_host = jit_host def jit_device(func): return nb.njit()(func)",
"reference number, used when compiling the integrator, where higher level objects like enums",
"`Supported Numpy features`_ for compilable numpy features. \"\"\" CPU = (\"cpu\", 0) \"\"\"",
"to be written to. * **trotter_cutoff** (:obj:`int`) - The number of squares to",
"+ z[0].imag**2 + z[1].real**2 + z[1].imag**2) @jit_device def inner(left, right): return conj(left[0])*right[0] +",
"q/3) # result[0, 2] = 0.5*cisz*(cx - cy + 1j*sx*sy) # result[1, 2]",
"higher level objects like enums cannot be interpreted. \"\"\" def __init__(self, value, index):",
"< time_coarse.size: get_time_evolution_loop(time_index, time_coarse, time_step_output, time_step_integration, time_end_points, time_evolution_coarse, sweep_parameter) return @jit_host(\"(complex128[:, :], float64[:,",
"= roc.get_global_id(1) if time_index < spin.shape[0]: if dimension == 2: spin[time_index, 0] =",
"conj(operator[0, 0]) result[1, 0] = conj(operator[0, 1]) result[0, 1] = conj(operator[1, 0]) result[1,",
"if dimension > 2: field_sample[2, 3] = math.tau*time_step_integration*field_sample[1, 3]/2 append_exponentiation(field_sample[2, :], time_evolution_fine, time_evolution_coarse)",
"of spin half. Switching to a Lie Trotter method.\\033[0m\") exponentiation_method = ExponentiationMethod.LIE_TROTTER exponentiation_method_index",
"self.spin_quantum_number = spin_quantum_number self.device = device self.get_time_evolution_raw = None self.get_spin_raw = None try:",
"cuda from numba import roc import math sqrt2 = math.sqrt(2) sqrt3 = math.sqrt(3)",
"0] + (2 + operator[2, 2])*operator[2, 0] result[0, 1] = (2 + operator[0,",
"-i(x J_x + y J_y + z J_z), \\\\end{align*} with .. math:: \\\\begin{align*}",
"that the spin system is being put under. It must have three arguments:",
"time_coarse[time_index] # Initialise time evolution operator to 1 set_to_one(time_evolution_coarse[time_index, :]) field_sample[0, 2] =",
"= max_registers)(func) return jit_host self.jit_host = jit_host def jit_device(func): return cuda.jit(device = True,",
": :obj:`float` The integration time step. Measured in s. time_step_output : :obj:`float` The",
"than the physics definition, so the left vector is conjugated. Thus the inner",
"# ca = 1 # sa = -1j*a/sqrt2 # ez = field_sample[2]/(2*precision) #",
"evolution operator to 1 set_to_one(time_evolution_coarse[time_index, :]) field_sample[0, 2] = 0 if use_rotating_frame: time_sample",
"@jit_device_template(\"(complex128[:, :], complex128[:, :], float64[:, :], float64, float64, complex128[:])\") def append_exponentiation_integration_half_step(time_evolution_fine, time_evolution_coarse, field_sample,",
"for archiving. index : :obj:`int` A reference number, used when compiling the integrator,",
"of the output since the integrator will on average take smaller steps. ..",
"int((time_end_points[1] - time_end_points[0])/time_step_output) if self.device.index == 0: time = np.empty(time_index_max, np.float64) time_evolution_coarse =",
"can be used for many experiments, without the need for slow recompilation. For",
"0] = conj(operator[0, 2]) result[0, 1] = conj(operator[1, 0]) result[1, 1] = conj(operator[1,",
"operator[0, 2]*operator[2, 1] result[1, 1] = operator[1, 0]*operator[0, 1] + (2 + operator[1,",
"time_evolution_coarse) elif self.device == Device.CUDA: time = cuda.device_array(time_index_max, np.float64) time_evolution_coarse = cuda.device_array((time_index_max, self.spin_quantum_number.dimension,",
"average take smaller steps. .. note :: The use of a rotating frame",
"print(\"\\033[31mspinsim warning!!!\\n_attempting to use an analytic exponentiation method outside of spin half. Switching",
"to start at, and the time that the experiment is to finish at.",
"= (cx*cy - 1j*sx*sy)/cisz # result[1, 0] = (cx*sy -1j*sx*cy)/cisz # result[0, 1]",
"math.sqrt(3) class SpinQuantumNumber(Enum): \"\"\" Options for the spin quantum number of a system.",
"fewer threads concurrently than it has Cuda cores, meaning some cores are inactive,",
"\\\\begin{align*} J_x &= \\\\frac{1}{\\\\sqrt{2}}\\\\begin{pmatrix} 0 & 1 & 0 \\\\\\\\ 1 & 0",
"the z direction. Returns: * **spin** (:obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index, spatial_direction)) - The",
"= left[2, 0]*right[0, 0] + left[2, 1]*right[1, 0] + left[2, 2]*right[2, 0] result[0,",
"dtype = np.float64) rotating_wave_winding = cuda.local.array(sample_index_end, dtype = np.complex128) elif device_index == 2:",
"1] + operator[0, 2]*operator[2, 1] result[1, 1] = operator[1, 0]*operator[0, 1] + (2",
"spin half :obj:`SpinQuantumNumber.HALF`, or spin one :obj:`SpinQuantumNumber.ONE` quantum system. threads_per_block : :obj:`int` The",
"compiler to compile the integrator to run on an AMD ROCm compatible GPU,",
"to each thread, meaning slower memory must be used. Thus, there will be",
"0: x /= r y /= r z /= r c = math.cos(r/2)",
"registers (fast memory) to each thread, out of a maximum number for the",
"float64, complex128[:])\") def get_field_integration_midpoint(sweep_parameter, time_fine, time_coarse, time_step_integration, field_sample, rotating_wave, rotating_wave_winding): time_sample = time_fine",
"X &= \\\\frac{1}{2}2^{-\\\\tau}x,\\\\\\\\ Y &= \\\\frac{1}{2}2^{-\\\\tau}y,\\\\\\\\ Z &= \\\\frac{1}{2}2^{-\\\\tau}z,\\\\\\\\ c_{\\\\theta} &= \\\\cos(\\\\theta),\\\\\\\\ s_{\\\\theta}",
"= True, integration_method = IntegrationMethod.MAGNUS_CF4, trotter_cutoff = 32, threads_per_block = 64, max_registers =",
"time_start`. time_step_integration : :obj:`float` The integration time step. Measured in s. time_step_output :",
"0] = 1 operator[1, 0] = 0 operator[2, 0] = 0 operator[0, 1]",
"0: matrix_exponential_analytic(field_sample, time_evolution_fine) elif exponentiation_method_index == 1: matrix_exponential_lie_trotter(field_sample, time_evolution_fine, trotter_cutoff) # Premultiply to",
"Note that one extra register per thread is always added to the number",
"&= \\\\exp(-ix J_x - iy J_y - iz J_z)\\\\\\\\ &= \\\\exp(2^{-\\\\tau}(-ix J_x -",
"being run on is Nvidia Cuda compatible, and defaults to :obj:`Device.CPU` otherwise. See",
"1] = conj(operator[1, 1]) @jit_device def matrix_exponential_analytic(field_sample, result): x = field_sample[0] y =",
"\\\\begin{align*} (a + ib)^* &= a - ib\\\\\\\\ a, b &\\\\in \\\\mathbb{R} \\\\end{align*}",
"= time_coarse[time_index] + time_step_output/2 get_field_jit(time_sample, sweep_parameter, field_sample[0, :]) rotating_wave = field_sample[0, 2] if",
"time_evolution_coarse[time_index, 0, 2] /= rotating_wave_winding[0] time_evolution_coarse[time_index, 2, 0] *= rotating_wave_winding[0] time_evolution_coarse[time_index, 2, 1]",
"- 1j*sx*sy) # cisz = math.cos(2*q/3) + 1j*math.sin(2*q/3) # result[0, 1] = cisz*(-sy",
"HALF_STEP = \"half_step\" \"\"\" Integration method from AtomicPy. Makes two Euler integration steps,",
":obj:`callable` Calculates the expected spin projection (Bloch vector) over time for a given",
"- time_coarse) rotating_wave_winding[0] = math.cos(math.tau*rotating_wave*time_sample) + 1j*math.sin(math.tau*rotating_wave*time_sample) time_sample += time_coarse get_field_jit(time_sample, sweep_parameter, field_sample[0,",
".. math:: \\\\begin{align*} J_x &= \\\\frac{1}{\\\\sqrt{2}}\\\\begin{pmatrix} 0 & 1 & 0 \\\\\\\\ 1",
"each expected spin value in parallel. For spin half: .. math:: \\\\begin{align*} \\\\langle",
"2] = 0 operator[1, 2] = 0 operator[2, 2] = 0 @jit_device def",
"= (state[time_index, 0]*conj(state[time_index, 1])).real spin[time_index, 1] = (1j*state[time_index, 0]*conj(state[time_index, 1])).real spin[time_index, 2] =",
"property is needed. Compiled for chosen device on object constrution. Parameters: * **state**",
".. math:: \\\\begin{align*} (a + ib)^* &= a - ib\\\\\\\\ a, b &\\\\in",
"of :obj:`numpy.float64` (time_index, spatial_direction) The expected spin projection (Bloch vector) over time. This",
"function so the integrator can be used for many experiments, without the need",
"result[1, 1] = left[1, 0]*right[0, 1] + left[1, 1]*right[1, 1] @jit_device def matrix_square_residual(operator,",
"result[0, 0] = Ca/ez - 1 result[1, 0] = Sa*ep result[0, 1] =",
"s_{\\\\theta} &= \\\\sin(\\\\theta). \\\\end{align*} **For spin one systems** Assumes the exponent is an",
"integration steps, one sampling the field from the start of the time step,",
"(:class:`numpy.complex128`) - The inner product of l and r. set_to(operator, result) : :obj:`callable`",
"Results(time, time_evolution_coarse, state, self.spin_calculator) return results @staticmethod @nb.njit def get_state(state_init, state, time_evolution): \"\"\"",
"== 1: # Run calculation for each coarse timestep in parallel time_index =",
"to take the adjoint of. * **result** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) -",
"0] = 1 result[1, 0] = 0 result[0, 1] = 0 result[1, 1]",
"In units of s. This is an output, so use an empty :class:`numpy.ndarray`",
"math:: (A)_{i, j} = (B)_{i, j} Parameters: * **operator** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index,",
"number of squares to make to the approximate matrix (:math:`\\\\tau` above). \"\"\" def",
"time step, one sampling the field from the end of the time step.",
"to :math:`0`. matrix_multiply(left, right, result) : :obj:`callable` Multiply matrices left and right together,",
"time_step_integration, rotating_wave, rotating_wave_winding): transform_frame(field_sample[0, :], rotating_wave, rotating_wave_winding[0]) transform_frame(field_sample[1, :], rotating_wave, rotating_wave_winding[1]) w0 =",
"# utility_set = spin_quantum_number.utility_set if not exponentiation_method: if spin_quantum_number == SpinQuantumNumber.ONE: exponentiation_method =",
"to compile the integrator to run on an Nvidia cuda compatible GPU, in",
"- The vector to right multiply in the inner product. Returns * **d**",
"# Sa = a/2 # ca = 1 # sa = -1j*a/sqrt2 #",
"math:: \\\\begin{align*} |a + ib| &= \\\\sqrt{a^2 + b^2}\\\\\\\\ a, b &\\\\in \\\\mathbb{R}",
"1 @jit_device def matrix_exponential_lie_trotter(field_sample, result, trotter_cutoff): hyper_cube_amount = math.ceil(trotter_cutoff/2) if hyper_cube_amount < 0:",
"- The vector to take the 2 norm of. Returns * **nz** (:class:`numpy.float64`)",
"time_sample += time_coarse get_field_jit(time_sample, sweep_parameter, field_sample[0, :]) time_sample = ((time_fine + 0.5*time_step_integration*(1 +",
": :obj:`callable` Calculates the expected spin projection (Bloch vector) over time for a",
"quantum state. Parameters ---------- state : :obj:`numpy.ndarray` of :obj:`numpy.complex128` (time_index, magnetic_quantum_number) The quantum",
"For three level systems. \"\"\" class IntegrationMethod(Enum): \"\"\" Options for describing which method",
"time dependent Schroedinger equation and returns the quantum state of the spin system",
"2]*right[2, 1] result[2, 1] = left[2, 0]*right[0, 1] + left[2, 1]*right[1, 1] +",
"z_index] else: state[time_index, x_index] += state_init[x_index] sqrt2 = math.sqrt(2) sqrt3 = math.sqrt(3) machine_epsilon",
"field_sample[1] = X.imag field_sample[2] = field_sample[2] - 2*rotating_wave transform_frame = transform_frame_spin_half_rotating else: @jit_device_template(\"(float64[:],",
"A &= -i(x J_x + y J_y + z J_z + q J_q),",
"Python features`_ for compilable python features, and `Supported Numpy features`_ for compilable numpy",
"1] = operator[2, 1] result[0, 2] = operator[0, 2] result[1, 2] = operator[1,",
"spin_calculator): \"\"\" Parameters ---------- time : :obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index) The times that",
"\"\"\" def __init__(self, value, index): super().__init__() self._value_ = value self.index = index ANALYTIC",
"+ (2 + operator[2, 2])*operator[2, 1] result[0, 2] = (2 + operator[0, 0])*operator[0,",
"math.tau*time_step_integration*field_sample[1, 2]/2 if dimension > 2: field_sample[2, 3] = math.tau*time_step_integration*field_sample[1, 3]/2 append_exponentiation(field_sample[2, :],",
"parallel. Compiled for chosen device on object constrution. Parameters: * **sweep_parameter** (:obj:`float`) -",
"rotating_wave_winding[0] if dimension > 2: time_evolution_coarse[time_index, 0, 2] /= rotating_wave_winding[0] time_evolution_coarse[time_index, 2, 0]",
"0].real**2 + state[time_index, 0].imag**2 - state[time_index, 2].real**2 - state[time_index, 2].imag**2 elif device_index >",
"J_x + y J_y + z J_z), \\\\end{align*} with .. math:: \\\\begin{align*} J_x",
"= operator[0, 2] result[1, 2] = operator[1, 2] result[2, 2] = operator[2, 2]",
"three arguments: * **time_sample** (:obj:`float`) - the time to sample the field at,",
"spin_quantum_number, device = None, exponentiation_method = None, use_rotating_frame = True, integration_method = IntegrationMethod.MAGNUS_CF4,",
":], rotating_wave, rotating_wave_winding[0]) field_sample[0, 0] = math.tau*time_step_integration*field_sample[0, 0] field_sample[0, 1] = math.tau*time_step_integration*field_sample[0, 1]",
"index : :obj:`int` A reference number, used when compiling the integrator, where higher",
"direction. Returns ------- results : :obj:`Results` An object containing the results of the",
":, :] # for power_index in range(hyper_cube_amount): # matrix_multiply(result, result, temporary) # matrix_multiply(temporary,",
"0] + operator[0, 1]*operator[1, 0] result[1, 0] = operator[1, 0]*operator[0, 0] + (2",
"1] result[2, 1] = operator[2, 0]*operator[0, 1] + operator[2, 1]*operator[1, 1] + (2",
"result[1, 0] = conj(operator[0, 1]) result[2, 0] = conj(operator[0, 2]) result[0, 1] =",
"2]*right[2, 2] result[1, 2] = left[1, 0]*right[0, 2] + left[1, 1]*right[1, 2] +",
"and changing this value could increase performance for your GPU. See `Achieved Occupancy`_",
":func:`numba.cuda.device_array_like()`. \"\"\" if device_index == 0: for time_index in nb.prange(spin.shape[0]): if dimension ==",
"0] = math.tau*time_step_integration*(w0*field_sample[0, 0] + w1*field_sample[1, 0]) field_sample[2, 1] = math.tau*time_step_integration*(w0*field_sample[0, 1] +",
":, :])\", max_registers) def get_time_evolution(sweep_parameter, time_coarse, time_end_points, time_step_integration, time_step_output, time_evolution_coarse): \"\"\" Find the",
"See :ref:`architecture` for some information. spin_calculator : :obj:`callable` Calculates the expected spin projection",
"2] = 0 operator[1, 2] = 0 operator[2, 2] = 1 @jit_device def",
"= math.tau*time_step_integration*(w0*field_sample[0, 2] + w1*field_sample[1, 2]) if dimension > 2: field_sample[2, 3] =",
"field_sample[1]/precision # z = field_sample[2]/precision # q = field_sample[3]/precision # cx = math.cos(x)",
"__init__(self, time, time_evolution, state, spin_calculator): \"\"\" Parameters ---------- time : :obj:`numpy.ndarray` of :obj:`numpy.float64`",
"quantum number of a system. Parameters ---------- value : :obj:`float` The numerical value",
"LLVM compiler to compile the integrator to run on a single CPU core.",
"function.\\033[0m\\n\") raise def compile_time_evolver(self, get_field, spin_quantum_number, device, use_rotating_frame = True, integration_method = IntegrationMethod.MAGNUS_CF4,",
"= temporary_group[roc.get_local_id(1), :, :] for power_index in range(hyper_cube_amount): matrix_square_residual(result, temporary) matrix_square_residual(temporary, result) #",
"float64[:, :], float64, float64, complex128[:])\") def append_exponentiation_integration_magnus_cf4(time_evolution_fine, time_evolution_coarse, field_sample, time_step_integration, rotating_wave, rotating_wave_winding): transform_frame(field_sample[0,",
"to. * **trotter_cutoff** (:obj:`int`) - The number of squares to make to the",
"in the integration. Defaults to :obj:`IntegrationMethod.MAGNUS_CF4`. See :obj:`IntegrationMethod` for more details. trotter_cutoff :",
"if self.device.index == 0: time = np.empty(time_index_max, np.float64) time_evolution_coarse = np.empty((time_index_max, self.spin_quantum_number.dimension, self.spin_quantum_number.dimension),",
"rotating_wave_winding[0] time_evolution_coarse[time_index, 0, 1] /= rotating_wave_winding[0] if dimension > 2: time_evolution_coarse[time_index, 0, 2]",
"0 & -i & 0 \\\\\\\\ i & 0 & -i \\\\\\\\ 0",
"of s. Determines the sample rate of the outputs `time_coarse` and `time_evolution_coarse`. time_evolution_coarse",
"1] = operator[1, 0]*operator[0, 1] + (2 + operator[1, 1])*operator[1, 1] + operator[1,",
"between the current and next timesteps, for each time sampled. See :math:`U(t)` in",
"&= \\\\frac{1}{2}\\\\begin{pmatrix} 0 & -i \\\\\\\\ i & 0 \\\\end{pmatrix},& J_z &= \\\\frac{1}{2}\\\\begin{pmatrix}",
"result[1, 0] = sa*eq*ep/ez result[2, 0] = -((Sa*ep/eq)*(Sa*ep/eq)) result[0, 1] = sa*eq/(ez*ep) result[1,",
"import utilities from enum import Enum import numpy as np import numba as",
"(-i s_X - c_X s_Y)}{\\\\sqrt{2}} \\\\\\\\ \\\\frac{e^{-i\\\\left(Z + \\\\frac{Q}{3}\\\\right)}(c_X - c_Y - i",
"= 0 operator[2, 1] = 0 operator[0, 2] = 0 operator[1, 2] =",
"use on the target device) used in the integrator. These device functions are",
"time_evolution_coarse) if use_rotating_frame: if dimension == 3: @jit_device_template(\"(float64[:], float64, complex128)\") def transform_frame_spin_one_rotating(field_sample, rotating_wave,",
"# elif device_index == 2: # temporary_group = roc.shared.array((threads_per_block, 2, 2), dtype =",
"field_sample[2, 3] = math.tau*time_step_integration*field_sample[0, 3]/2 append_exponentiation(field_sample[2, :], time_evolution_fine, time_evolution_coarse) field_sample[2, 0] = math.tau*time_step_integration*field_sample[1,",
"trotter_cutoff) # Premultiply to the exitsing time evolution operator set_to(time_evolution_coarse, time_evolution_old) matrix_multiply(time_evolution_fine, time_evolution_old,",
"\\\\exp(-ix J_x - iy J_y - iz J_z - iq J_q)\\\\\\\\ &= \\\\exp(2^{-\\\\tau}(-ix",
"a matrix the multiplicative identity, ie, :math:`1`. .. math:: \\\\begin{align*} (A)_{i, j} &=",
"run on an Nvidia cuda compatible GPU, in parallel. .. note :: To",
"0]/2 field_sample[2, 1] = math.tau*time_step_integration*field_sample[1, 1]/2 field_sample[2, 2] = math.tau*time_step_integration*field_sample[1, 2]/2 if dimension",
"jit_device_template elif value == \"roc\": def jit_host(template, max_registers): def jit_host(func): return roc.jit(template)(func) return",
"based integrator. \"\"\" MIDPOINT_SAMPLE = \"midpoint_sample\" \"\"\" Euler integration method. \"\"\" HALF_STEP =",
"to the number specified for control, so really this number is 64). Raising",
"spin one systems). .. note:: This function must be compilable for the device",
"of :class:`numpy.complex128`, (y_index, x_index)) - The matrix to set to :math:`1`. set_to_zero(operator) :",
"+ state[time_index, 0].imag**2 - state[time_index, 1].real**2 - state[time_index, 1].imag**2) else: spin[time_index, 0] =",
"device.index @jit_device def conj(z): return (z.real - 1j*z.imag) @jit_device def complex_abs(z): return math.sqrt(z.real**2",
"device = True)(func) return jit_device_template self.jit_device_template = jit_device_template PYTHON = (\"python\", 0) \"\"\"",
"T^{2^\\\\tau}, \\\\end{align*} with .. math:: \\\\begin{align*} X &= \\\\frac{1}{2}2^{-\\\\tau}x,\\\\\\\\ Y &= \\\\frac{1}{2}2^{-\\\\tau}y,\\\\\\\\ Z",
"set_to(operator, result): result[0, 0] = operator[0, 0] result[1, 0] = operator[1, 0] result[0,",
"in range(state.shape[1]): state[time_index, x_index] += time_evolution[time_index - 1, x_index, z_index]*state[time_index - 1, z_index]",
"except: print(\"\\033[31mspinsim error: numba could not jit get_field function into a device function.\\033[0m\\n\")",
"trotter_cutoff): pass @jit_device def matrix_exponential_lie_trotter(field_sample, result, trotter_cutoff): hyper_cube_amount = math.ceil(trotter_cutoff/2) if hyper_cube_amount <",
"dimension, label): super().__init__() self._value_ = value self.dimension = dimension self.label = label HALF",
"(y_index, x_index)) - A matrix to be filled with the result of the",
"import numba as nb from numba import cuda from numba import roc import",
"\\\\frac{e^{-i\\\\left(-Z + \\\\frac{Q}{3}\\\\right)}(c_X - c_Y + i s_Xs_Y)}{2} \\\\\\\\ \\\\frac{e^{-i\\\\left(Z + \\\\frac{Q}{3}\\\\right)} (-i",
"analytic solutions of spin system dynamics. This is not done when this option",
"F\\\\rangle(t) = \\\\begin{pmatrix} \\\\Re(\\\\sqrt{2}\\\\psi_{0}(t)^*(\\\\psi_{+1}(t) + \\\\psi_{-1}(t))\\\\\\\\ -\\\\Im(\\\\sqrt{2}\\\\psi_{0}(t)^*(\\\\psi_{+1}(t) - \\\\psi_{-1}(t))\\\\\\\\ |\\\\psi_{+1}(t)|^2 - |\\\\psi_{-1}(t)|^2 \\\\end{pmatrix}",
"jit_host self.jit_host = jit_host def jit_device(func): return nb.njit()(func) self.jit_device = jit_device def jit_device_template(template):",
"that can be used for archiving. \"\"\" def __init__(self, value, dimension, label): super().__init__()",
":class:`Device` for more information and links. spin_quantum_number : :obj:`SpinQuantumNumber` The option to select",
"field_sample = field_sample_group[roc.get_local_id(1), :, :] rotating_wave_winding_group = roc.shared.array((threads_per_block, sample_index_end), dtype = np.complex128) rotating_wave_winding",
"set to :math:`0`. matrix_multiply(left, right, result) : :obj:`callable` Multiply matrices left and right",
"math.tau*time_step_integration*(w0*field_sample[0, 1] + w1*field_sample[1, 1]) field_sample[2, 2] = math.tau*time_step_integration*(w0*field_sample[0, 2] + w1*field_sample[1, 2])",
"complex128[:, :], float64[:, :], float64, float64, complex128[:])\") def append_exponentiation_integration_half_step(time_evolution_fine, time_evolution_coarse, field_sample, time_step_integration, rotating_wave,",
"meaning some cores are inactive, and the GPU is said to have less",
"x_index)) - The matrix to set to :math:`0`. matrix_multiply(left, right, result) : :obj:`callable`",
"sample_index_max = 3 sample_index_end = 4 elif integration_method == IntegrationMethod.MIDPOINT_SAMPLE: sample_index_max = 1",
"math.sin(x) # cy = math.cos(y) # sy = math.sin(y) # cisz = math.cos(z)",
"number. .. math:: \\\\begin{align*} |a + ib| &= \\\\sqrt{a^2 + b^2}\\\\\\\\ a, b",
"from AtomicPy. Makes two Euler integration steps, one sampling the field from the",
"result[2, 2] = operator[2, 2] @jit_device def set_to_one(operator): operator[0, 0] = 1 operator[1,",
"\"\"\" if not device: if cuda.is_available(): device = Device.CUDA else: device = Device.CPU",
"device_index == 0: temporary = np.empty((2, 2), dtype = np.complex128) elif device_index ==",
"def matrix_multiply(left, right, result): result[0, 0] = left[0, 0]*right[0, 0] + left[0, 1]*right[1,",
"0] += 1 result[1, 1] += 1 # @jit_device # def matrix_exponential_lie_trotter(field_sample, result,",
"jit_host(func): return cuda.jit(template, debug = False, max_registers = max_registers)(func) return jit_host self.jit_host =",
"self.jit_device_template = jit_device_template elif value == \"cpu\": def jit_host(template, max_registers): def jit_host(func): return",
"many simulations, sweeping through bias values, by calling this method multiple times, each",
"* **time_end_points** (:obj:`numpy.ndarray` of :obj:`numpy.float64` (start/end)) - The time offset that the experiment",
"of a quantum state. Parameters ---------- state : :obj:`numpy.ndarray` of :obj:`numpy.complex128` (time_index, magnetic_quantum_number)",
"field_sample[1] z = field_sample[2] r = math.sqrt(x**2 + y**2 + z**2) if r",
"the inner product of two orthogonal vectors is 0. .. math:: \\\\begin{align*} l",
"are allocated than are available for the GPU model, the GPU must run",
"state : :class:`numpy.ndarray` of :class:`numpy.complex128` (time_index, state_index) The state (wavefunction) of the spin",
"object constrution. Parameters: * **sweep_parameter** (:obj:`float`) - The input to the `get_field` function",
"of a matrix. .. math:: \\\\begin{align*} A^\\\\dagger &\\\\equiv A^H\\\\\\\\ (A^\\\\dagger)_{y,x} &= ((A)_{x,y})^* \\\\end{align*}",
"rotating_wave_winding[0] time_evolution_coarse[time_index, 2, 1] *= rotating_wave_winding[0] time_evolution_coarse[time_index, 2, 2] *= rotating_wave_winding[0] else: time_evolution_coarse[time_index,",
"Returns ------- spin : :obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index, spatial_direction) The expected spin projection",
"left[0, 1]*right[1, 0] result[1, 0] = left[1, 0]*right[0, 0] + left[1, 1]*right[1, 0]",
"math sqrt2 = math.sqrt(2) sqrt3 = math.sqrt(3) class SpinQuantumNumber(Enum): \"\"\" Options for the",
"Parameters ---------- value : :obj:`str` A text label that can be used for",
"1]/2 field_sample[2, 2] = math.tau*time_step_integration*field_sample[0, 2]/2 if dimension > 2: field_sample[2, 3] =",
"math.cos(a/2) Sa = math.sin(a/2) ca = math.cos(a) sa = -1j*math.sin(a)/sqrt2 ez = field_sample[2]/(2*precision)",
"field_sample[0, 1] = math.tau*time_step_integration*field_sample[0, 1] field_sample[0, 2] = math.tau*time_step_integration*field_sample[0, 2] if dimension >",
"result[0, 1] = cisz*(-sy - 1j*cy*sx)/sqrt2 # result[1, 1] = cisz*cx*cy # result[2,",
"- The evaluated time evolution operator between each time step. See :ref:`architecture` for",
"of the eigenstates of the spin projection operator in the z direction. Returns",
"\\\\frac{e^{-i(Z - \\\\frac{Q}{3})} (-i s_X - c_X s_Y)}{\\\\sqrt{2}} \\\\\\\\ \\\\frac{e^{-i\\\\left(Z + \\\\frac{Q}{3}\\\\right)}(c_X -",
"self.index = index if value == \"python\": def jit_host(template, max_registers): def jit_host(func): return",
"l \\\\cdot r &\\\\equiv \\\\langle l, r \\\\rangle\\\\\\\\ l \\\\cdot r &= \\\\sum_i",
"hyper_cube_amount = 0 # precision = 4**hyper_cube_amount # x = field_sample[0]/(2*precision) # y",
"state[time_index, x_index] += state_init[x_index] sqrt2 = math.sqrt(2) sqrt3 = math.sqrt(3) machine_epsilon = np.finfo(np.float64).eps*1000",
"sqrt3)/6 field_sample[2, 0] = math.tau*time_step_integration*(w0*field_sample[0, 0] + w1*field_sample[1, 0]) field_sample[2, 1] = math.tau*time_step_integration*(w0*field_sample[0,",
"spin projection operator in the z direction. Returns: * **spin** (:obj:`numpy.ndarray` of :obj:`numpy.float64`",
"if spin_quantum_number == SpinQuantumNumber.ONE: exponentiation_method = ExponentiationMethod.LIE_TROTTER elif spin_quantum_number == SpinQuantumNumber.HALF: exponentiation_method =",
"operator[2, 0]*operator[0, 0] + operator[2, 1]*operator[1, 0] + (2 + operator[2, 2])*operator[2, 0]",
"timestep in parallel time_index = roc.get_global_id(1) if time_index < time_coarse.size: get_time_evolution_loop(time_index, time_coarse, time_step_output,",
"time using the JITed :obj:`callable` `spin_calculator`. spin_calculator : :obj:`callable` Calculates the expected spin",
"run on is Nvidia Cuda compatible, and defaults to :obj:`Device.CPU` otherwise. See :obj:`Device`",
"exponential based on the Lie Product Formula, .. math:: \\\\exp(A + B) =",
"= a/precision Ca = math.cos(a/2) Sa = -1j*math.sin(a/2) ez = field_sample[2]/(2*precision) ez =",
"set_to_one(time_evolution_coarse[time_index, :]) field_sample[0, 2] = 0 if use_rotating_frame: time_sample = time_coarse[time_index] + time_step_output/2",
"(:class:`numpy.ndarray` of :class:`numpy.complex128`, (index)) - The vector to take the 2 norm of.",
":] for power_index in range(hyper_cube_amount): matrix_square_residual(result, temporary) matrix_square_residual(temporary, result) # matrix_multiply(result, result, temporary)",
"+ time_step_integration - time_coarse rotating_wave_winding[1] = math.cos(math.tau*rotating_wave*time_sample) + 1j*math.sin(math.tau*rotating_wave*time_sample) time_sample += time_coarse get_field_jit(time_sample,",
"step. Measured in s. time_step_output : :obj:`float` The sample resolution of the output",
":], float64, float64, complex128[:])\") def append_exponentiation_integration_half_step(time_evolution_fine, time_evolution_coarse, field_sample, time_step_integration, rotating_wave, rotating_wave_winding): transform_frame(field_sample[0, :],",
"find the quantum state timeseries of the 3 level atom. Parameters ---------- state_init",
"complex128[:, :, :])\", max_registers) def get_time_evolution(sweep_parameter, time_coarse, time_end_points, time_step_integration, time_step_output, time_evolution_coarse): \"\"\" Find",
"+ y**2 + z**2) if r > 0: x /= r y /=",
"The absolute value of z. norm2(z) : :obj:`callable` The 2 norm of a",
"of the spin system over time, written in terms of the eigenstates of",
"jit_device_template(template): def jit_device_template(func): return roc.jit(template, device = True)(func) return jit_device_template self.jit_device_template = jit_device_template",
":obj:`numpy.float64` (start/end)) - The time offset that the experiment is to start at,",
"> 2: field_sample[2, 3] = math.tau*time_step_integration*(w0*field_sample[0, 3] + w1*field_sample[1, 3]) append_exponentiation(field_sample[2, :], time_evolution_fine,",
"just define `get_field()` with field functions that use the rotating wave approximation in",
"float64)\") def get_time_evolution_loop(time_index, time_coarse, time_step_output, time_step_integration, time_end_points, time_evolution_coarse, sweep_parameter): # Declare variables if",
"simulations that `spinsim` was designed for. * **field_sample** (:class:`numpy.ndarray` of :class:`numpy.float64` (spatial_index)) the",
"result[2, 1] = conj(operator[1, 2]) result[0, 2] = conj(operator[2, 0]) result[1, 2] =",
"to left multiply by. * **right** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The",
"by. * **right** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The matrix to right",
"spin of the spin system in the lab frame, for each time sampled.",
"time_evolution : :obj:`numpy.ndarray` of :obj:`numpy.float128` (time_index, y_index, x_index) The evaluated time evolution operator",
"2])*operator[2, 0] result[0, 1] = (2 + operator[0, 0])*operator[0, 1] + operator[0, 1]*operator[1,",
"= cuda.device_array(time_index_max, np.float64) time_evolution_coarse = cuda.device_array((time_index_max, self.spin_quantum_number.dimension, self.spin_quantum_number.dimension), np.complex128) blocks_per_grid = (time.size +",
"The state (wavefunction) of the spin system in the lab frame, for each",
"0 & -i \\\\\\\\ 0 & i & 0 \\\\end{pmatrix},\\\\\\\\ J_z &= \\\\begin{pmatrix}",
"elif integration_method == IntegrationMethod.HALF_STEP: @jit_device_template(\"(float64, float64, float64, float64, float64[:, :], float64, complex128[:])\") def",
"jit_host def jit_device(func): return func self.jit_device = jit_device def jit_device_template(template): def jit_device_template(func): return",
"spatial_direction) The expected spin projection (Bloch vector) over time. This is calculated just",
"integrate a spin half :obj:`SpinQuantumNumber.HALF`, or spin one :obj:`SpinQuantumNumber.ONE` quantum system. threads_per_block :",
"# z = field_sample[2]/(2*precision) # cx = math.cos(x) # sx = math.sin(x) #",
"+ left[2, 1]*right[1, 0] + left[2, 2]*right[2, 0] result[0, 1] = left[0, 0]*right[0,",
"&= \\\\begin{pmatrix} \\\\frac{e^{-i\\\\left(Z + \\\\frac{Q}{3}\\\\right)}(c_X + c_Y - i s_Xs_Y)}{2} & \\\\frac{e^{i\\\\frac{2Q}{3}} (-s_Y",
"- state[time_index, 2].imag**2 return def spin_calculator(state): \"\"\" Calculates the expected spin projection (Bloch",
"`time_step_integration`. Measured in s. state_init : :obj:`numpy.ndarray` of :obj:`numpy.complex128` (magnetic_quantum_number) The initial quantum",
":], float64[:, :])\", max_registers = max_registers) def get_spin(state, spin): \"\"\" Calculate each expected",
"1] = c + 1j*z*s else: result[0, 0] = 1 result[1, 0] =",
"the z axis by an amount defined by the field in the z",
"device_index == 1: temporary = cuda.local.array((3, 3), dtype = np.complex128) elif device_index ==",
"terms of the eigenstates of the spin projection operator in the z direction.",
"= time_evolution_coarse.copy_to_host() time = time.copy_to_host() state = np.empty((time_index_max, self.spin_quantum_number.dimension), np.complex128) self.get_state(state_init, state, time_evolution_coarse)",
"dimension), dtype = np.complex128) time_evolution_old = time_evolution_old_group[roc.get_local_id(1), :, :] # Calculate the exponential",
"rotating_wave_winding[0] = math.cos(math.tau*rotating_wave*time_step_output) + 1j*math.sin(math.tau*rotating_wave*time_step_output) time_evolution_coarse[time_index, 0, 0] /= rotating_wave_winding[0] time_evolution_coarse[time_index, 0, 1]",
"= 0 operator[1, 2] = 0 operator[2, 2] = 1 @jit_device def set_to_zero(operator):",
"in s. state_init : :obj:`numpy.ndarray` of :obj:`numpy.complex128` (magnetic_quantum_number) The initial quantum state of",
"z[1].real**2 + z[1].imag**2 + z[2].real**2 + z[2].imag**2) @jit_device def cross(left, right, result): result[0]",
"get_field, spin_quantum_number, device = None, exponentiation_method = None, use_rotating_frame = True, integration_method =",
"+ c_X s_Y)}{\\\\sqrt{2}} & e^{i\\\\frac{2Q}{3}} c_X c_Y & \\\\frac{e^{-i(Z - \\\\frac{Q}{3})} (-i s_X",
"range(state.shape[0]): # State = time evolution * previous state for x_index in nb.prange(state.shape[1]):",
"& 0 \\\\\\\\ 0 & 0 & 1 \\\\end{pmatrix} \\\\end{align*} Then the exponential",
"= get_field_integration_midpoint append_exponentiation_integration = append_exponentiation_integration_midpoint @jit_device_template(\"(int64, float64[:], float64, float64, float64[:], complex128[:, :, :],",
"spin) elif device == Device.CUDA: spin = cuda.device_array((state.shape[0], 3), np.float64) blocks_per_grid = (state.shape[0]",
"result. .. math:: \\\\begin{align*} (LR)_{i,k} = \\\\sum_j (L)_{i,j} (R)_{j,k} \\\\end{align*} Parameters: * **left**",
"self.spin_calculator = spin_calculator def evaluate(self, sweep_parameter, time_start, time_end, time_step_integration, time_step_output, state_init): \"\"\" Integrates",
"take smaller steps. .. note :: The use of a rotating frame is",
"+ operator[0, 1]*operator[1, 2] + operator[0, 2]*operator[2, 2] result[1, 2] = operator[1, 0]*operator[0,",
"(time_index, spatial_direction)) - The expected spin projection (Bloch vector) over time. \"\"\" def",
"in the z direction. Returns ------- spin : :obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index, spatial_direction)",
"left[0, 1]*right[1, 1] result[1, 1] = left[1, 0]*right[0, 1] + left[1, 1]*right[1, 1]",
"declare a :class:`numpy.ndarray` using :func:`numba.cuda.device_array_like()`. \"\"\" if device_index == 0: for time_index in",
"the spin system, written in terms of the eigenstates of the spin projection",
":class:`numpy.ndarray` of :class:`numpy.float64` (time_index, spatial_index) The expected value for hyperfine spin of the",
"cx = math.cos(x) # sx = math.sin(x) # cy = math.cos(y) # sy",
":class:`numpy.float64` (time_index, spatial_index) The expected value for hyperfine spin of the spin system",
"time_evolution_old_group = roc.shared.array((threads_per_block, dimension, dimension), dtype = np.complex128) time_evolution_old = time_evolution_old_group[roc.get_local_id(1), :, :]",
"IntegrationMethod.MAGNUS_CF4: @jit_device_template(\"(float64, float64, float64, float64, float64[:, :], float64, complex128[:])\") def get_field_integration_magnus_cf4(sweep_parameter, time_fine, time_coarse,",
"the bias field strength in `get_field`, then one can run many simulations, sweeping",
"was designed for. * **field_sample** (:class:`numpy.ndarray` of :class:`numpy.float64` (spatial_index)) the returned value of",
"# eq = math.cos(eq) + 1j*math.sin(eq) result[0, 0] = Ca/ez - 1 result[1,",
"= append_exponentiation_integration_magnus_cf4 elif integration_method == IntegrationMethod.HALF_STEP: @jit_device_template(\"(float64, float64, float64, float64, float64[:, :], float64,",
"use_rotating_frame: time_sample = time_coarse[time_index] + time_step_output/2 get_field_jit(time_sample, sweep_parameter, field_sample[0, :]) rotating_wave = field_sample[0,",
"SpinQuantumNumber(Enum): \"\"\" Options for the spin quantum number of a system. Parameters ----------",
"= True)(func) self.jit_device = jit_device def jit_device_template(template): def jit_device_template(func): return roc.jit(template, device =",
"example, if the `sweep_parameter` is used to define the bias field strength in",
"iy J_y - iz J_z)\\\\\\\\ &= \\\\exp(2^{-\\\\tau}(-ix J_x - iy J_y - iz",
"\\\\begin{align*} \\\\exp(A) &= \\\\exp(-ix J_x - iy J_y - iz J_z - iq",
"z = field_sample[2] r = math.sqrt(x**2 + y**2 + z**2) if r >",
"# eq = field_sample[3]/(6*precision) # eq = math.cos(eq) + 1j*math.sin(eq) result[0, 0] =",
"to. matrix_exponential_lie_trotter(field_sample, result) : :obj:`callable` Calculates a matrix exponential based on the Lie",
"1] = 0 operator[1, 1] = 1 @jit_device def set_to_zero(operator): operator[0, 0] =",
"time_fine - time_coarse rotating_wave_winding[0] = math.cos(math.tau*rotating_wave*time_sample) + 1j*math.sin(math.tau*rotating_wave*time_sample) time_sample += time_coarse get_field_jit(time_sample, sweep_parameter,",
"that the experiment is to finish at. Measured in s. * **time_step_integration** (:obj:`float`)",
"value, index): super().__init__() self._value_ = value self.index = index if value == \"python\":",
"(workitems) they each contain, when running on the GPU target devices :obj:`Device.CUDA` (:obj:`Device.ROC`).",
"0] = left[2, 0]*right[0, 0] + left[2, 1]*right[1, 0] + left[2, 2]*right[2, 0]",
"= None, exponentiation_method = None, use_rotating_frame = True, integration_method = IntegrationMethod.MAGNUS_CF4, trotter_cutoff =",
"\\\\rangle\\\\\\\\ l \\\\cdot r &= \\\\sum_i (l_i)^* r_i \\\\end{align*} Parameters: * **left** (:class:`numpy.ndarray`",
"Ca = math.cos(a/2) Sa = math.sin(a/2) ca = math.cos(a) sa = -1j*math.sin(a)/sqrt2 ez",
"np.complex128) field_sample = cuda.local.array((sample_index_max, lie_dimension), dtype = np.float64) rotating_wave_winding = cuda.local.array(sample_index_end, dtype =",
"True, integration_method = IntegrationMethod.MAGNUS_CF4, exponentiation_method = None, trotter_cutoff:int = 28, threads_per_block = 64,",
"0].imag**2 - state[time_index, 1].real**2 - state[time_index, 1].imag**2) else: spin[time_index, 0] = (2*conj(state[time_index, 1])*(state[time_index,",
"< time_coarse.size: get_time_evolution_loop(time_index, time_coarse, time_step_output, time_step_integration, time_end_points, time_evolution_coarse, sweep_parameter) elif device_index == 2:",
"self.set_to_one = set_to_one self.set_to_zero = set_to_zero self.matrix_multiply = matrix_multiply self.adjoint = adjoint self.matrix_exponential_analytic",
"return jit_host self.jit_host = jit_host def jit_device(func): return roc.jit(device = True)(func) self.jit_device =",
"to make to the approximate matrix (:math:`\\\\tau` above). \"\"\" def __init__(self, spin_quantum_number, device,",
"imaginary linear combination of a subspace of :math:`\\\\mathfrak{su}(2)`, being, .. math:: \\\\begin{align*} A",
":class:`numpy.ndarray` of :class:`numpy.complex128` (time_index, bra_state_index, ket_state_index) Time evolution operator (matrix) between the current",
":obj:`callable` A python function that describes the field that the spin system is",
"complex128[:, :], complex128[:, :])\") def append_exponentiation(field_sample, time_evolution_fine, time_evolution_coarse): if device_index == 0: time_evolution_old",
"= (\"cuda\", 1) \"\"\" Use the :func:`numba.cuda.jit()` LLVM compiler to compile the integrator",
"+ left[0, 1]*right[1, 0] result[1, 0] = left[1, 0]*right[0, 0] + left[1, 1]*right[1,",
"or spin one :obj:`SpinQuantumNumber.ONE` quantum system. device : :obj:`Device` The option to select",
"2]*right[2, 2] @jit_device def matrix_square_residual(operator, result): result[0, 0] = (2 + operator[0, 0])*operator[0,",
"\"\"\" def __init__(self, time, time_evolution, state, spin_calculator): \"\"\" Parameters ---------- time : :obj:`numpy.ndarray`",
"is_Xc_Y) e^{iZ} \\\\\\\\ (c_Xs_Y - is_Xc_Y) e^{-iZ} & (c_Xc_Y + is_Xs_Y) e^{iZ} \\\\end{pmatrix}^{2^\\\\tau}\\\\\\\\",
"np.complex128) # elif device_index == 2: # temporary_group = roc.shared.array((threads_per_block, 3, 3), dtype",
"to select whether the simulator will integrate a spin half :obj:`SpinQuantumNumber.HALF`, or spin",
"HALF = (1/2, 2, \"half\") \"\"\" For two level systems. \"\"\" ONE =",
"different GPU models. \"\"\" jit_device = device.jit_device device_index = device.index @jit_device def conj(z):",
"(:obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index, spatial_direction)) - The expected spin projection (Bloch vector) over",
"return func return jit_host self.jit_host = jit_host def jit_device(func): return func self.jit_device =",
"integrator is being compiled for. .. _Supported Python features: http://numba.pydata.org/numba-doc/latest/reference/pysupported.html .. _Supported Numpy",
"(wavefunction) of the spin system in the lab frame, for each time sampled.",
"1] + (2 + operator[1, 1])*operator[1, 1] + operator[1, 2]*operator[2, 1] result[2, 1]",
"1) \"\"\" Use the :func:`numba.cuda.jit()` LLVM compiler to compile the integrator to run",
":, :] for power_index in range(hyper_cube_amount): matrix_square_residual(result, temporary) matrix_square_residual(temporary, result) # matrix_multiply(result, result,",
":math:`1`. .. math:: \\\\begin{align*} (A)_{i, j} &= \\\\delta_{i, j}\\\\\\\\ &= \\\\begin{cases} 1,&i =",
"0] = (2 + operator[0, 0])*operator[0, 0] + operator[0, 1]*operator[1, 0] result[1, 0]",
"for time_index in range(state.shape[0]): # State = time evolution * previous state for",
"Modifies the field function so the integrator can be used for many experiments,",
": :obj:`callable` Multiply matrices left and right together, to be returned in result.",
"to set to :math:`1`. set_to_zero(operator) : :obj:`callable` Make a matrix the additive identity,",
"c_Y & \\\\frac{e^{-i(Z - \\\\frac{Q}{3})} (-i s_X - c_X s_Y)}{\\\\sqrt{2}} \\\\\\\\ \\\\frac{e^{-i\\\\left(Z +",
"swept over when multiple simulations need to be run. For example, it is",
"error: numba.cuda could not jit get_field function into a cuda device function.\\033[0m\\n\") raise",
"= np.empty((dimension, dimension), dtype = np.complex128) field_sample = np.empty((sample_index_max, lie_dimension), dtype = np.float64)",
"class SpinQuantumNumber(Enum): \"\"\" Options for the spin quantum number of a system. Parameters",
"= math.tau*time_step_integration*field_sample[0, 0]/2 field_sample[2, 1] = math.tau*time_step_integration*field_sample[0, 1]/2 field_sample[2, 2] = math.tau*time_step_integration*field_sample[0, 2]/2",
"in nb.prange(time_coarse.size): get_time_evolution_loop(time_index, time_coarse, time_step_output, time_step_integration, time_end_points, time_evolution_coarse, sweep_parameter) elif device_index == 1:",
"integrator moves into a frame rotating in the z axis by an amount",
"cy - 1j*sx*sy) # result[1, 0] = cisz*(-1j*sx + cx*sy)/sqrt2 # result[2, 0]",
"# result[2, 2] = 0.5*cisz*(cx + cy + 1j*sx*sy) # if device_index ==",
"code for the integrator, ie, don't compile the integrator. \"\"\" CPU_SINGLE = (\"cpu_single\",",
"`get_field` function supplied by the user. Modifies the field function so the integrator",
"GPU model. This means that if more registers are allocated than are available",
"0] = conj(operator[0, 0]) result[1, 0] = conj(operator[0, 1]) result[0, 1] = conj(operator[1,",
"can be used for archiving. index : :obj:`int` A reference number, used when",
"+ 0.5)): get_field_integration(sweep_parameter, time_fine, time_coarse[time_index], time_step_integration, field_sample, rotating_wave, rotating_wave_winding) append_exponentiation_integration(time_evolution_fine, time_evolution_coarse[time_index, :], field_sample,",
"0] result[2, 0] = left[2, 0]*right[0, 0] + left[2, 1]*right[1, 0] + left[2,",
"1/sqrt3)) - time_coarse) rotating_wave_winding[0] = math.cos(math.tau*rotating_wave*time_sample) + 1j*math.sin(math.tau*rotating_wave*time_sample) time_sample += time_coarse get_field_jit(time_sample, sweep_parameter,",
"cuda compatible GPU, in parallel. .. note :: To use this device option,",
"attr_name, spin) return self.spin raise AttributeError(\"{} has no attribute called {}.\".format(self, attr_name)) class",
"= -1j*math.sin(a/2) ez = field_sample[2]/(2*precision) ez = math.cos(ez) + 1j*math.sin(ez) # eq =",
"function.\\033[0m\\n\") raise time_evolution_coarse = time_evolution_coarse.copy_to_host() time = time.copy_to_host() state = np.empty((time_index_max, self.spin_quantum_number.dimension), np.complex128)",
".. math:: \\\\begin{align*} (LR)_{i,k} = \\\\sum_j (L)_{i,j} (R)_{j,k} \\\\end{align*} Parameters: * **left** (:class:`numpy.ndarray`",
"= jit_host def jit_device(func): return nb.njit()(func) self.jit_device = jit_device def jit_device_template(template): def jit_device_template(func):",
":obj:`numpy.float64` (time_index) The times that `state` was evaluated at. time_evolution : :obj:`numpy.ndarray` of",
"0]) result[1, 2] = conj(operator[2, 1]) result[2, 2] = conj(operator[2, 2]) @jit_device def",
"* **az** (:class:`numpy.float64`) - The absolute value of z. norm2(z) : :obj:`callable` The",
"z[0].imag**2 + z[1].real**2 + z[1].imag**2 + z[2].real**2 + z[2].imag**2) @jit_device def cross(left, right,",
"the spin projection operator in the z direction. Returns ------- spin : :obj:`numpy.ndarray`",
": :obj:`callable` Calculates a :math:`\\\\mathfrak{su}(2)` matrix exponential based on its analytic form. ..",
"analytic exponentiation method outside of spin half. Switching to a Lie Trotter method.\\033[0m\")",
":class:`numpy.complex128`, (index)) - The vector to take the 2 norm of. Returns *",
"strength in `get_field`, then one can run many simulations, sweeping through bias values,",
"integration. Parameters ---------- value : :obj:`str` A text label that can be used",
"of :class:`numpy.complex128` (time_index, state_index) The state (wavefunction) of the spin system in the",
"= left[1, 0]*right[0, 1] + left[1, 1]*right[1, 1] + left[1, 2]*right[2, 1] result[2,",
"= \"magnus_cf4\" \"\"\" Commutator free, fourth order Magnus based integrator. \"\"\" MIDPOINT_SAMPLE =",
"be able to increase execution time for different GPU models. \"\"\" jit_device =",
"1 & 0 & 1 \\\\\\\\ 0 & 1 & 0 \\\\end{pmatrix},& J_y",
"value in parallel. For spin half: .. math:: \\\\begin{align*} \\\\langle F\\\\rangle(t) = \\\\begin{pmatrix}",
"dtype = np.complex128) field_sample = cuda.local.array((sample_index_max, lie_dimension), dtype = np.float64) rotating_wave_winding = cuda.local.array(sample_index_end,",
"z spatial directions (to model a magnetic field, for example), and the fourth",
"operator[1, 1] result[2, 1] = operator[2, 1] result[0, 2] = operator[0, 2] result[1,",
"0]*operator[0, 2] + (2 + operator[1, 1])*operator[1, 2] + operator[1, 2]*operator[2, 2] result[2,",
":math:`\\\\exp(A)`. Parameters: * **field_sample** (:class:`numpy.ndarray` of :class:`numpy.float64`, (y_index, x_index)) - The values of",
"device_index == 0: time_evolution_fine = np.empty((dimension, dimension), dtype = np.complex128) field_sample = np.empty((sample_index_max,",
"of s. time_step_output : :obj:`float` The time difference between each element of `time_coarse`.",
"2]*operator[2, 1] result[2, 1] = operator[2, 0]*operator[0, 1] + operator[2, 1]*operator[1, 1] +",
"of threads (workitems) they each contain, when running on the GPU target devices",
"entry being the amplitude of the quadratic shift (only appearing, and required, in",
"each thread, out of a maximum number for the whole GPU, for each",
"= (\"analytic\", 0) \"\"\" Analytic expression of the matrix exponential. For spin half",
"1] = math.tau*time_step_integration*(w1*field_sample[0, 1] + w0*field_sample[1, 1]) field_sample[2, 2] = math.tau*time_step_integration*(w1*field_sample[0, 2] +",
"math.sin(y) # cisz = math.cos(z) + 1j*math.sin(z) # result[0, 0] = (cx*cy -",
"return @jit_host(\"(complex128[:, :], float64[:, :])\", max_registers = max_registers) def get_spin(state, spin): \"\"\" Calculate",
"in range(hyper_cube_amount): # matrix_multiply(result, result, temporary) # matrix_multiply(temporary, temporary, result) else: @jit_device def",
"to. label : :obj:`str` A text label that can be used for archiving.",
"integrator, where higher level objects like enums cannot be interpreted. \"\"\" def __init__(self,",
"(threads_per_block - 1)) // threads_per_block get_spin[blocks_per_grid, threads_per_block](roc.to_device(state), spin) spin = spin.copy_to_host() return spin",
"containing the results of the simulation. \"\"\" if math.fabs(time_step_output/time_step_integration - round(time_step_output/time_step_integration)) > 1e-6:",
"\\\\end{align*} Then the exponential can be approximated as, for large :math:`\\\\tau`, .. math::",
":], float64, complex128[:])\") def get_field_integration_magnus_cf4(sweep_parameter, time_fine, time_coarse, time_step_integration, field_sample, rotating_wave, rotating_wave_winding): time_sample =",
"-1 \\\\end{pmatrix} \\\\end{align*} Then the exponential can be approximated as, for large :math:`\\\\tau`,",
"r > 0: x /= r y /= r z /= r c",
"expected spin projection (Bloch vector) over time. \"\"\" def __init__(self, get_field, spin_quantum_number, device",
"the time evolution operator is found for. In units of s. This is",
"evolution operator is found for. In units of s. This is an output,",
"results of a an evaluation of the integrator. Attributes ---------- time : :obj:`numpy.ndarray`",
"= conj(operator[0, 1]) result[2, 0] = conj(operator[0, 2]) result[0, 1] = conj(operator[1, 0])",
"\"\"\" HALF_STEP = \"half_step\" \"\"\" Integration method from AtomicPy. Makes two Euler integration",
"J_x - iy J_y - iz J_z)\\\\\\\\ &= \\\\begin{pmatrix} \\\\cos(\\\\frac{r}{2}) - i\\\\frac{z}{r}\\\\sin(\\\\frac{r}{2}) &",
"device.jit_device jit_device_template = device.jit_device_template device_index = device.index dimension = spin_quantum_number.dimension lie_dimension = dimension",
"Sa/ep result[1, 1] = Ca*ez - 1 if device_index == 0: temporary =",
"0] = (2*conj(state[time_index, 1])*(state[time_index, 0] + state[time_index, 2])/sqrt2).real spin[time_index, 1] = (2j*conj(state[time_index, 1])*(state[time_index,",
"Sa = -1j*math.sin(a/2) ez = field_sample[2]/(2*precision) ez = math.cos(ez) + 1j*math.sin(ez) # eq",
"time_step_integration, time_end_points, time_evolution_coarse, sweep_parameter) elif device_index == 2: # Run calculation for each",
"being compiled for. .. _Supported Python features: http://numba.pydata.org/numba-doc/latest/reference/pysupported.html .. _Supported Numpy features: http://numba.pydata.org/numba-doc/latest/reference/numpysupported.html",
"evaluating the time evolution operator in parallel. Compiled for chosen device on object",
"course, use :mod:`spinsim` to integrate states in the rotating frame, using the rating",
"z J_z), \\\\end{align*} with .. math:: \\\\begin{align*} J_x &= \\\\frac{1}{2}\\\\begin{pmatrix} 0 & 1",
"value self.dimension = dimension self.label = label HALF = (1/2, 2, \"half\") \"\"\"",
"it has Cuda cores, meaning some cores are inactive, and the GPU is",
"\\\\begin{align*} A &= -i(x J_x + y J_y + z J_z + q",
"registers are allocated than are available for the GPU model, the GPU must",
"cuda.device_array((state.shape[0], 3), np.float64) blocks_per_grid = (state.shape[0] + (threads_per_block - 1)) // threads_per_block get_spin[blocks_per_grid,",
"# result[0, 0] = 0.5*cisz*(cx + cy - 1j*sx*sy) # result[1, 0] =",
"temporary) # matrix_multiply(temporary, temporary, result) result[0, 0] += 1 result[1, 1] += 1",
"whole number multiple of `time_step_integration`. Measured in s. state_init : :obj:`numpy.ndarray` of :obj:`numpy.complex128`",
"conj(z) : :obj:`callable` Conjugate of a complex number. .. math:: \\\\begin{align*} (a +",
"cy = math.cos(y) # sy = math.sin(y) # cisz = math.cos(z + q/3)",
"def jit_device(func): return cuda.jit(device = True, inline = True)(func) self.jit_device = jit_device def",
"def jit_host(template, max_registers): def jit_host(func): return nb.njit(template)(func) return jit_host self.jit_host = jit_host def",
"in the z axis by an amount defined by the field in the",
"is an imaginary linear combination of a subspace of :math:`\\\\mathfrak{su}(2)`, being, .. math::",
"value could increase performance for your GPU. See `Achieved Occupancy`_ for Nvidia's official",
"0] = (2 + operator[0, 0])*operator[0, 0] + operator[0, 1]*operator[1, 0] + operator[0,",
"y) J_y) \\\\exp(-i(2^{-\\\\tau} z J_z + (2^{-\\\\tau} q) J_q)))^{2^\\\\tau}\\\\\\\\ &= \\\\begin{pmatrix} \\\\frac{e^{-i\\\\left(Z +",
"1]*right[1, 0] result[1, 0] = left[1, 0]*right[0, 0] + left[1, 1]*right[1, 0] result[0,",
"state[time_index, 2].imag**2 elif device_index > 0: if device_index == 1: time_index = cuda.grid(1)",
"- The matrix to left multiply by. * **right** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index,",
"2] + operator[2, 1]*operator[1, 2] + (2 + operator[2, 2])*operator[2, 2] @jit_device def",
"- 1j*sx*sy) # result[1, 0] = cisz*(-1j*sx + cx*sy)/sqrt2 # result[2, 0] =",
"- c_Y + i s_Xs_Y)}{2} \\\\\\\\ \\\\frac{e^{-i\\\\left(Z + \\\\frac{Q}{3}\\\\right)} (-i s_X + c_X",
"inner (maths convention dot) product between two complex vectors. .. note:: The mathematics",
"rotating_wave_winding) append_exponentiation_integration(time_evolution_fine, time_evolution_coarse[time_index, :], field_sample, time_step_integration, rotating_wave, rotating_wave_winding) time_fine += time_step_integration if use_rotating_frame:",
"multiple of time_step_integration. Resetting time_step_integration to {time_step_output/round(time_step_output/time_step_integration):8.4e}.\\033[0m\\n\") time_step_integration = time_step_output/round(time_step_output/time_step_integration) time_end_points = np.asarray([time_start,",
"make to the approximate matrix (:math:`\\\\tau` above). \"\"\" def __init__(self, spin_quantum_number, device, threads_per_block):",
"\"\"\" def __init__(self, value, dimension, label): super().__init__() self._value_ = value self.dimension = dimension",
"operator[0, 0])*operator[0, 1] + operator[0, 1]*operator[1, 1] + operator[0, 2]*operator[2, 1] result[1, 1]",
"result, temporary) # matrix_multiply(temporary, temporary, result) else: @jit_device def norm2(z): return math.sqrt(z[0].real**2 +",
"all of the device functions (functions compiled for use on the target device)",
"\\\\end{align*} **For spin one systems** Assumes the exponent is an imaginary linear combination",
"z respectively, as described above. * **result** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) -",
"samples that the time evolution operator is found for. In units of s.",
"if device_index == 0: # temporary = np.empty((2, 2), dtype = np.complex128) #",
":]) time_sample = time_fine + time_step_integration - time_coarse rotating_wave_winding[1] = math.cos(math.tau*rotating_wave*time_sample) + 1j*math.sin(math.tau*rotating_wave*time_sample)",
"See `Supported Python features`_ for compilable python features, and `Supported Numpy features`_ for",
"operator[1, 1])*operator[1, 0] + operator[1, 2]*operator[2, 0] result[2, 0] = operator[2, 0]*operator[0, 0]",
"self.spin_calculator = spin_calculator def __getattr__(self, attr_name): if attr_name == \"spin\": spin = self.spin_calculator(self.state)",
"\\\\frac{Q}{3}\\\\right)} (-i s_X + c_X s_Y)}{\\\\sqrt{2}} & e^{i\\\\frac{2Q}{3}} c_X c_Y & \\\\frac{e^{-i(Z -",
"dimension > 2: field_sample[2, 3] = math.tau*time_step_integration*(w0*field_sample[0, 3] + w1*field_sample[1, 3]) append_exponentiation(field_sample[2, :],",
"of the exponentiation is to be written to. matrix_exponential_lie_trotter(field_sample, result) : :obj:`callable` Calculates",
"the matrix exponentiator, if :obj:`ExponentiationMethod.LIE_TROTTER` is chosen. threads_per_block : :obj:`int` The size of",
"max_registers = 63): \"\"\" Compiles the integrator and spin calculation functions of the",
"two level systems. \"\"\" ONE = (1, 3, \"one\") \"\"\" For three level",
"# sy = math.sin(y) # cisz = math.cos(z) + 1j*math.sin(z) # result[0, 0]",
"for chosen device on object constrution. Parameters: * **sweep_parameter** (:obj:`float`) - The input",
"- iy J_y - iz J_z)\\\\\\\\ &= \\\\begin{pmatrix} \\\\cos(\\\\frac{r}{2}) - i\\\\frac{z}{r}\\\\sin(\\\\frac{r}{2}) & -\\\\frac{y",
"def append_exponentiation_integration_half_step(time_evolution_fine, time_evolution_coarse, field_sample, time_step_integration, rotating_wave, rotating_wave_winding): transform_frame(field_sample[0, :], rotating_wave, rotating_wave_winding[0]) transform_frame(field_sample[1, :],",
"value increases GPU occupancy, meaning more threads run concurrently, at the expense of",
"The complex number to take the absolute value of. Returns * **az** (:class:`numpy.float64`)",
"2 # For every fine step for time_fine_index in range(math.floor(time_step_output/time_step_integration + 0.5)): get_field_integration(sweep_parameter,",
"An array to write the resultant adjoint to. matrix_exponential_analytic(field_sample, result) : :obj:`callable` Calculates",
"3 sample_index_end = 4 elif integration_method == IntegrationMethod.MIDPOINT_SAMPLE: sample_index_max = 1 sample_index_end =",
"result[1, 0] = (cx*sy -1j*sx*cy)/cisz # result[0, 1] = -(cx*sy + 1j*sx*cy)*cisz #",
"state. Must be a whole number multiple of `time_step_integration`. Measured in s. *",
"field_sample[3]/precision # cx = math.cos(x) # sx = math.sin(x) # cy = math.cos(y)",
"The target device that the integrator is being compiled for. .. _Supported Python",
"# Initialise time evolution operator to 1 set_to_one(time_evolution_coarse[time_index, :]) field_sample[0, 2] = 0",
"in the z direction. Returns: * **spin** (:obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index, spatial_direction)) -",
"calculation for each coarse timestep in parallel time_index = cuda.grid(1) if time_index <",
"exponentiation_method : :obj:`ExponentiationMethod` Which method to use for matrix exponentiation in the integration",
"(1.5 + sqrt3)/6 w1 = (1.5 - sqrt3)/6 field_sample[2, 0] = math.tau*time_step_integration*(w0*field_sample[0, 0]",
"matrix. .. math:: \\\\begin{align*} A^\\\\dagger &\\\\equiv A^H\\\\\\\\ (A^\\\\dagger)_{y,x} &= ((A)_{x,y})^* \\\\end{align*} Matrix can",
"magnetic_quantum_number)) - The quantum state of the spin system over time, written in",
"more details. get_time_evolution_raw : :obj:`callable` The internal function for evaluating the time evolution",
"block (workgroup), in terms of the number of threads (workitems) they each contain,",
"the integrator. Attributes ---------- time : :obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index) The times that",
"each time varying `sweep_parameter`. * **time_coarse** (:obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index)) - The times",
"True, inline = True)(func) self.jit_device = jit_device def jit_device_template(template): def jit_device_template(func): return cuda.jit(template,",
"& 0 & 0 \\\\\\\\ 0 & 0 & 0 \\\\\\\\ 0 &",
"return roc.jit(device = True)(func) self.jit_device = jit_device def jit_device_template(template): def jit_device_template(func): return roc.jit(template,",
"2: # temporary_group = roc.shared.array((threads_per_block, 3, 3), dtype = np.complex128) # temporary =",
"dimension + 1 # utility_set = spin_quantum_number.utility_set if not exponentiation_method: if spin_quantum_number ==",
"frame, for each time sampled. Units of :math:`\\\\hbar`. This is an output, so",
"the whole GPU, for each specific GPU model. This means that if more",
":obj:`Device.CPU` otherwise. See :obj:`Device` for all options and more details. exponentiation_method : :obj:`ExponentiationMethod`",
"to the exitsing time evolution operator set_to(time_evolution_coarse, time_evolution_old) matrix_multiply(time_evolution_fine, time_evolution_old, time_evolution_coarse) if use_rotating_frame:",
"the object. Attributes ---------- conj(z) : :obj:`callable` Conjugate of a complex number. ..",
"being, .. math:: \\\\begin{align*} A &= -i(x J_x + y J_y + z",
"1: # temporary = cuda.local.array((2, 2), dtype = np.complex128) # elif device_index ==",
"(:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The matrix to right multiply by. *",
"precision = 4**hyper_cube_amount # x = field_sample[0]/precision # y = field_sample[1]/precision # z",
"1: # Run calculation for each coarse timestep in parallel time_index = cuda.grid(1)",
"/= rotating_wave_winding[0] time_evolution_coarse[time_index, 2, 0] *= rotating_wave_winding[0] time_evolution_coarse[time_index, 2, 1] *= rotating_wave_winding[0] time_evolution_coarse[time_index,",
"= left[1, 0]*right[0, 1] + left[1, 1]*right[1, 1] @jit_device def matrix_square_residual(operator, result): result[0,",
"time_end_points[0] + time_step_output*time_index time_fine = time_coarse[time_index] # Initialise time evolution operator to 1",
"def matrix_exponential_lie_trotter(field_sample, result, trotter_cutoff): # hyper_cube_amount = math.ceil(trotter_cutoff/2) # if hyper_cube_amount < 0:",
"blocks_per_grid = (time.size + (self.threads_per_block - 1)) // self.threads_per_block try: self.get_time_evolution_raw[blocks_per_grid, self.threads_per_block](sweep_parameter, time,",
"cuda device function.\\033[0m\\n\") raise time_evolution_coarse = time_evolution_coarse.copy_to_host() time = time.copy_to_host() elif self.device ==",
"an imaginary linear combination of :math:`\\\\mathfrak{su}(2)`, being, .. math:: \\\\begin{align*} A &= -i(x",
"# Declare variables if device_index == 0: time_evolution_fine = np.empty((dimension, dimension), dtype =",
"Parameters ---------- time : :obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index) The times that `state` was",
"try: self.compile_time_evolver(get_field, spin_quantum_number, device, use_rotating_frame, integration_method, exponentiation_method, trotter_cutoff, threads_per_block, max_registers) except: print(\"\\033[31mspinsim error:",
":obj:`SpinQuantumNumber.ONE` quantum system. device : :obj:`Device` The option to select which device will",
"* **simulation_index** (:obj:`int`) - a parameter that can be swept over when multiple",
"the :func:`numba.roc.jit()` LLVM compiler to compile the integrator to run on an AMD",
"spin one: .. math:: \\\\begin{align*} \\\\langle F\\\\rangle(t) = \\\\begin{pmatrix} \\\\Re(\\\\sqrt{2}\\\\psi_{0}(t)^*(\\\\psi_{+1}(t) + \\\\psi_{-1}(t))\\\\\\\\ -\\\\Im(\\\\sqrt{2}\\\\psi_{0}(t)^*(\\\\psi_{+1}(t)",
"+ left[1, 2]*right[2, 1] result[2, 1] = left[2, 0]*right[0, 1] + left[2, 1]*right[1,",
"**For spin half systems:** Assumes the exponent is an imaginary linear combination of",
"except: print(\"\\033[31mspinsim error: numba.roc could not jit get_field function into a roc device",
"J_y) \\\\exp(-i(2^{-\\\\tau} z J_z + (2^{-\\\\tau} q) J_q)))^{2^\\\\tau}\\\\\\\\ &= \\\\begin{pmatrix} \\\\frac{e^{-i\\\\left(Z + \\\\frac{Q}{3}\\\\right)}(c_X",
"rotating frame, using the rating wave approximation: just define `get_field()` with field functions",
"hilbert space the states with this spin belong to. label : :obj:`str` A",
"take the absolute value of. Returns * **az** (:class:`numpy.float64`) - The absolute value",
"used for archiving. index : :obj:`int` A reference number, used when compiling the",
"result[1, 1] = cisz*cx*cy # result[2, 1] = cisz*(sy - 1j*cy*sx)/sqrt2 # cisz",
"= 1 + 1j*ez # eq = field_sample[3]/(6*precision) # eq = 1 +",
"\"\"\" For three level systems. \"\"\" class IntegrationMethod(Enum): \"\"\" Options for describing which",
"+ is_Xs_Y) e^{iZ} \\\\end{pmatrix}^{2^\\\\tau}\\\\\\\\ &= T^{2^\\\\tau}, \\\\end{align*} with .. math:: \\\\begin{align*} X &=",
"obtain :math:`\\\\exp(A)`. Parameters: * **field_sample** (:class:`numpy.ndarray` of :class:`numpy.float64`, (y_index, x_index)) - The values",
"and (spin_quantum_number != SpinQuantumNumber.HALF): print(\"\\033[31mspinsim warning!!!\\n_attempting to use an analytic exponentiation method outside",
"rotating_wave_winding[0] time_evolution_coarse[time_index, 2, 2] *= rotating_wave_winding[0] else: time_evolution_coarse[time_index, 1, 0] *= rotating_wave_winding[0] time_evolution_coarse[time_index,",
"2, 2), dtype = np.complex128) temporary = temporary_group[roc.get_local_id(1), :, :] for power_index in",
"for each time sampled. See :math:`U(t)` in :ref:`overview_of_simulation_method`. This is an output, so",
"2]) result[0, 1] = conj(operator[1, 0]) result[1, 1] = conj(operator[1, 1]) result[2, 1]",
"w1*field_sample[1, 2]) if dimension > 2: field_sample[2, 3] = math.tau*time_step_integration*(w0*field_sample[0, 3] + w1*field_sample[1,",
"1 set_to_one(time_evolution_coarse[time_index, :]) field_sample[0, 2] = 0 if use_rotating_frame: time_sample = time_coarse[time_index] +",
"&= \\\\sum_i (l_i)^* r_i \\\\end{align*} Parameters: * **left** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (index)) -",
"python features, and `Supported Numpy features`_ for compilable numpy features. \"\"\" CUDA =",
"time_coarse, time_step_output, time_step_integration, time_end_points, time_evolution_coarse, sweep_parameter) return @jit_host(\"(complex128[:, :], float64[:, :])\", max_registers =",
"the :func:`numba.cuda.jit()` LLVM compiler to compile the integrator to run on an Nvidia",
"matrix_multiply(left, right, result): result[0, 0] = left[0, 0]*right[0, 0] + left[0, 1]*right[1, 0]",
"+ operator[1, 1])*operator[1, 1] + operator[1, 2]*operator[2, 1] result[2, 1] = operator[2, 0]*operator[0,",
"theorem. \"\"\" class Device(Enum): \"\"\" The target device that the integrator is being",
"norm2(z): return math.sqrt(z[0].real**2 + z[0].imag**2 + z[1].real**2 + z[1].imag**2) @jit_device def inner(left, right):",
"state_init : :class:`numpy.ndarray` of :class:`numpy.complex128` The state (spin wavefunction) of the system at",
":obj:`Device.CUDA` (:obj:`Device.ROC`). Defaults to 64. Modifying might be able to increase execution time",
"left[2, 0]*right[0, 1] + left[2, 1]*right[1, 1] + left[2, 2]*right[2, 1] result[0, 2]",
"return jit_device_template self.jit_device_template = jit_device_template elif value == \"cpu_single\": def jit_host(template, max_registers): def",
"\\\\lim_{c \\\\to \\\\infty} \\\\left(\\\\exp\\\\left(\\\\frac{1}{c}A\\\\right) \\\\exp\\\\left(\\\\frac{1}{c}B\\\\right)\\\\right)^c. **For spin half systems:** Assumes the exponent is",
"describes the field that the spin system is being put under. It must",
"conj(operator[1, 1]) @jit_device def matrix_exponential_analytic(field_sample, result): x = field_sample[0] y = field_sample[1] z",
"float64[:, :], float64, float64, complex128[:])\") def append_exponentiation_integration_half_step(time_evolution_fine, time_evolution_coarse, field_sample, time_step_integration, rotating_wave, rotating_wave_winding): transform_frame(field_sample[0,",
"in range(state.shape[0]): # State = time evolution * previous state for x_index in",
"Defaults to 63 (optimal for GTX1070, the device used for testing. Note that",
"attribute called {}.\".format(self, attr_name)) class Simulator: \"\"\" Attributes ---------- spin_quantum_number : :obj:`SpinQuantumNumber` The",
"device used for testing. Note that one extra register per thread is always",
"True, inline = True)(func) return jit_device_template self.jit_device_template = jit_device_template elif value == \"roc\":",
"than it has Cuda cores, meaning some cores are inactive, and the GPU",
"technique used to get approximate analytic solutions of spin system dynamics. This is",
"device.jit_device device_index = device.index @jit_device def conj(z): return (z.real - 1j*z.imag) @jit_device def",
"0]) result[1, 0] = conj(operator[0, 1]) result[2, 0] = conj(operator[0, 2]) result[0, 1]",
"1) \"\"\" Approximation using the Lie Trotter theorem. \"\"\" class Device(Enum): \"\"\" The",
"`sweep_parameter`. time_start : :obj:`float` The time offset that the experiment is to start",
"1] + left[1, 1]*right[1, 1] @jit_device def matrix_square_residual(operator, result): result[0, 0] = (2",
"0 operator[2, 0] = 0 operator[0, 1] = 0 operator[1, 1] = 0",
"for each time sampled. time_evolution : :class:`numpy.ndarray` of :class:`numpy.complex128` (time_index, bra_state_index, ket_state_index) The",
"operator[1, 2] = 0 operator[2, 2] = 0 @jit_device def matrix_multiply(left, right, result):",
".. math:: \\\\begin{align*} \\\\langle F\\\\rangle(t) = \\\\begin{pmatrix} \\\\Re(\\\\psi_{+\\\\frac{1}{2}}(t)\\\\psi_{-\\\\frac{1}{2}}(t)^*)\\\\\\\\ -\\\\Im(\\\\psi_{+\\\\frac{1}{2}}(t)\\\\psi_{-\\\\frac{1}{2}}(t)^*)\\\\\\\\ \\\\frac{1}{2}(|\\\\psi_{+\\\\frac{1}{2}}(t)|^2 - |\\\\psi_{-\\\\frac{1}{2}}(t)|^2) \\\\end{pmatrix}",
"eigenstates of the spin projection operator in the z direction. spin_calculator : :obj:`callable`",
"the additive identity, ie, :math:`0`. .. math:: \\\\begin{align*} (A)_{i, j} = 0 \\\\end{align*}",
"y = field_sample[1] z = field_sample[2] r = math.sqrt(x**2 + y**2 + z**2)",
"\"\"\" The implementation to use for matrix exponentiation within the integrator. Parameters ----------",
"\\\\\\\\ 0 & 0 & -1 \\\\end{pmatrix},& J_q &= \\\\frac{1}{3}\\\\begin{pmatrix} 1 & 0",
"if attr_name == \"spin\": spin = self.spin_calculator(self.state) setattr(self, attr_name, spin) return self.spin raise",
"\\\\frac{e^{-i\\\\left(-Z + \\\\frac{Q}{3}\\\\right)}(c_X + c_Y + i s_Xs_Y)}{2} \\\\end{pmatrix}^{2^\\\\tau}\\\\\\\\ &= T^{2^\\\\tau}, \\\\end{align*} with",
"eq = math.cos(eq) + 1j*math.sin(eq) result[0, 0] = Ca/ez - 1 result[1, 0]",
"1j*eq result[0, 0] = (Ca/(eq*ez))*(Ca/(eq*ez)) - 1 result[1, 0] = sa*eq*ep/ez result[2, 0]",
"+ (threads_per_block - 1)) // threads_per_block get_spin[blocks_per_grid, threads_per_block](roc.to_device(state), spin) spin = spin.copy_to_host() return",
"size of each thread block (workgroup), in terms of the number of threads",
"|a + ib| &= \\\\sqrt{a^2 + b^2}\\\\\\\\ a, b &\\\\in \\\\mathbb{R} \\\\end{align*} Parameters:",
"numpy features. \"\"\" CUDA = (\"cuda\", 1) \"\"\" Use the :func:`numba.cuda.jit()` LLVM compiler",
"a - ib\\\\\\\\ a, b &\\\\in \\\\mathbb{R} \\\\end{align*} Parameters: * **z** (:class:`numpy.complex128`) -",
"IntegrationMethod.HALF_STEP: @jit_device_template(\"(float64, float64, float64, float64, float64[:, :], float64, complex128[:])\") def get_field_integration_half_step(sweep_parameter, time_fine, time_coarse,",
"cores, in parallel. .. note :: To use this device option, the user",
"note:: This function must be compilable for the device that the integrator is",
"squares to make to the approximate matrix (:math:`\\\\tau` above). \"\"\" def __init__(self, spin_quantum_number,",
":obj:`numpy.ndarray` of :obj:`numpy.complex128` (magnetic_quantum_number) The initial quantum state of the spin system, written",
"s_Xs_Y)}{2} \\\\end{pmatrix}^{2^\\\\tau}\\\\\\\\ &= T^{2^\\\\tau}, \\\\end{align*} with .. math:: \\\\begin{align*} X &= 2^{-\\\\tau}x,\\\\\\\\ Y",
"is being compiled for. See :class:`Device` for more information and links. spin_quantum_number :",
"try: self.get_time_evolution_raw[blocks_per_grid, self.threads_per_block](sweep_parameter, time, time_end_points, time_step_integration, time_step_output, time_evolution_coarse) except: print(\"\\033[31mspinsim error: numba.cuda could",
"0] result[0, 1] = operator[0, 1] result[1, 1] = operator[1, 1] @jit_device def",
"\\\\end{pmatrix},& J_q &= \\\\frac{1}{3}\\\\begin{pmatrix} 1 & 0 & 0 \\\\\\\\ 0 & -2",
"is Nvidia Cuda compatible, and defaults to :obj:`Device.CPU` otherwise. See :obj:`Device` for all",
"0 \\\\\\\\ 1 & 0 & 1 \\\\\\\\ 0 & 1 & 0",
"state. This :obj:`callable` is passed to the :obj:`Results` object returned from :func:`Simulator.evaluate()`, and",
"jit_host(func): return nb.njit(template, parallel = True)(func) return jit_host self.jit_host = jit_host def jit_device(func):",
"time_evolution_fine) elif exponentiation_method_index == 1: matrix_exponential_lie_trotter(field_sample, time_evolution_fine, trotter_cutoff) # Premultiply to the exitsing",
"(:class:`numpy.complex128`) - The complex number to take the absolute value of. Returns *",
"Ca = math.cos(a/2) Sa = -1j*math.sin(a/2) ez = field_sample[2]/(2*precision) ez = math.cos(ez) +",
"0] result[1, 0] = operator[1, 0]*operator[0, 0] + (2 + operator[1, 1])*operator[1, 0]",
"0) \"\"\" Use pure python interpreted code for the integrator, ie, don't compile",
"must run fewer threads concurrently than it has Cuda cores, meaning some cores",
"times, each time varying `sweep_parameter`. time_start : :obj:`float` The time offset that the",
"result, trotter_cutoff): pass @jit_device def matrix_exponential_lie_trotter(field_sample, result, trotter_cutoff): hyper_cube_amount = math.ceil(trotter_cutoff/2) if hyper_cube_amount",
"= jit_device_template PYTHON = (\"python\", 0) \"\"\" Use pure python interpreted code for",
"of registers allocated per thread when using :obj:`Device.CUDA` as the target device, and",
"+ 1j*field_sample[1])/rotating_wave_winding field_sample[0] = X.real field_sample[1] = X.imag field_sample[2] = field_sample[2] - rotating_wave",
"and defaults to :obj:`Device.CPU` otherwise. See :obj:`Device` for all options and more details.",
"system it is being run on is Nvidia Cuda compatible, and defaults to",
"for. .. _Supported Python features: http://numba.pydata.org/numba-doc/latest/reference/pysupported.html .. _Supported Numpy features: http://numba.pydata.org/numba-doc/latest/reference/numpysupported.html .. _Supported",
"# elif device_index == 1: # temporary = cuda.local.array((3, 3), dtype = np.complex128)",
"dimension > 2: time_evolution_coarse[time_index, 0, 2] /= rotating_wave_winding[0] time_evolution_coarse[time_index, 2, 0] *= rotating_wave_winding[0]",
":obj:`callable` `spin_calculator`. spin_calculator : :obj:`callable` Calculates the expected spin projection (Bloch vector) over",
"0 operator[1, 2] = 0 operator[2, 2] = 1 @jit_device def set_to_zero(operator): operator[0,",
"return transform_frame = transform_frame_lab get_field_jit = jit_device(get_field) if integration_method == IntegrationMethod.MAGNUS_CF4: @jit_device_template(\"(float64, float64,",
"sample_index_max, lie_dimension), dtype = np.float64) field_sample = field_sample_group[roc.get_local_id(1), :, :] rotating_wave_winding_group = roc.shared.array((threads_per_block,",
"`max_registers` for each model of GPU running :mod:`spinsim`, balancing more threads vs faster",
"systems. Assumes the exponent is an imaginary linear combination of :math:`\\\\mathfrak{su}(2)`, being, ..",
"- q/3) + 1j*math.sin(z - q/3) # result[0, 2] = 0.5*cisz*(cx - cy",
"&= \\\\begin{cases} 1,&i = j\\\\\\\\ 0,&i\\\\neq j \\\\end{cases} \\\\end{align*} Parameters: * **operator** (:class:`numpy.ndarray`",
"math.tau*time_step_integration*field_sample[0, 0]/2 field_sample[2, 1] = math.tau*time_step_integration*field_sample[0, 1]/2 field_sample[2, 2] = math.tau*time_step_integration*field_sample[0, 2]/2 if",
"0].real**2 + state[time_index, 0].imag**2 - state[time_index, 1].real**2 - state[time_index, 1].imag**2) else: spin[time_index, 0]",
"1] + left[1, 1]*right[1, 1] + left[1, 2]*right[2, 1] result[2, 1] = left[2,",
"= conj(operator[2, 0]) result[1, 2] = conj(operator[2, 1]) result[2, 2] = conj(operator[2, 2])",
"get_field : :obj:`callable` A python function that describes the field that the spin",
"0] result[2, 0] = operator[2, 0] result[0, 1] = operator[0, 1] result[1, 1]",
"2^{-\\\\tau}z,\\\\\\\\ Q &= 2^{-\\\\tau}q,\\\\\\\\ c_{\\\\theta} &= \\\\cos(\\\\theta),\\\\\\\\ s_{\\\\theta} &= \\\\sin(\\\\theta). \\\\end{align*} Once :math:`T`",
"sy = math.sin(y) # cisz = math.cos(z) + 1j*math.sin(z) # result[0, 0] =",
"the experiment is to finish at. Measured in s. * **time_step_integration** (:obj:`float`) -",
"- cy - 1j*sx*sy) # cisz = math.cos(2*q/3) + 1j*math.sin(2*q/3) # result[0, 1]",
"state[time_index, 0].imag**2 - state[time_index, 1].real**2 - state[time_index, 1].imag**2) else: spin[time_index, 0] = (2*conj(state[time_index,",
"\"\"\" Integration method from AtomicPy. Makes two Euler integration steps, one sampling the",
"= 0 operator[2, 2] = 1 @jit_device def set_to_zero(operator): operator[0, 0] = 0",
"Assumes the exponent is an imaginary linear combination of :math:`\\\\mathfrak{su}(2)`, being, .. math::",
"series of a quantum state. This :obj:`callable` is passed to the :obj:`Results` object",
"2] + (2 + operator[1, 1])*operator[1, 2] + operator[1, 2]*operator[2, 2] result[2, 2]",
"= operator[2, 0]*operator[0, 2] + operator[2, 1]*operator[1, 2] + (2 + operator[2, 2])*operator[2,",
".. math:: \\\\begin{align*} A^\\\\dagger &\\\\equiv A^H\\\\\\\\ (A^\\\\dagger)_{y,x} &= ((A)_{x,y})^* \\\\end{align*} Matrix can be",
"= 1 result[1, 0] = 0 result[0, 1] = 0 result[1, 1] =",
"jit_device def jit_device_template(template): def jit_device_template(func): return cuda.jit(template, device = True, inline = True)(func)",
"= cuda.grid(1) elif device_index == 1: time_index = roc.get_global_id(1) if time_index < spin.shape[0]:",
"(2 + operator[1, 1])*operator[1, 2] + operator[1, 2]*operator[2, 2] result[2, 2] = operator[2,",
"trotter_cutoff): # hyper_cube_amount = math.ceil(trotter_cutoff/2) # if hyper_cube_amount < 0: # hyper_cube_amount =",
"append_exponentiation_integration_magnus_cf4(time_evolution_fine, time_evolution_coarse, field_sample, time_step_integration, rotating_wave, rotating_wave_winding): transform_frame(field_sample[0, :], rotating_wave, rotating_wave_winding[0]) transform_frame(field_sample[1, :], rotating_wave,",
":], time_evolution_fine, time_evolution_coarse) field_sample[2, 0] = math.tau*time_step_integration*field_sample[1, 0]/2 field_sample[2, 1] = math.tau*time_step_integration*field_sample[1, 1]/2",
"\"\"\" Calculates the expected spin projection (Bloch vector) over time for a given",
"projection (Bloch vector) over time. \"\"\" def __init__(self, time, time_evolution, state, spin_calculator): \"\"\"",
"left[0, 0]*right[0, 2] + left[0, 1]*right[1, 2] + left[0, 2]*right[2, 2] result[1, 2]",
"time step. See :ref:`architecture` for some information. spin_calculator : :obj:`callable` Calculates the expected",
"iy J_y - iz J_z - iq J_q)\\\\\\\\ &= \\\\exp(2^{-\\\\tau}(-ix J_x - iy",
"x = field_sample[0] y = field_sample[1] z = field_sample[2] r = math.sqrt(x**2 +",
"0] = left[1, 0]*right[0, 0] + left[1, 1]*right[1, 0] result[0, 1] = left[0,",
"4 elif integration_method == IntegrationMethod.HALF_STEP: sample_index_max = 3 sample_index_end = 4 elif integration_method",
"0.5*time_step_integration*(1 - 1/sqrt3)) - time_coarse) rotating_wave_winding[0] = math.cos(math.tau*rotating_wave*time_sample) + 1j*math.sin(math.tau*rotating_wave*time_sample) time_sample += time_coarse",
"- state[time_index, 1].real**2 - state[time_index, 1].imag**2) else: spin[time_index, 0] = (2*conj(state[time_index, 1])*(state[time_index, 0]",
"to take the absolute value of. Returns * **az** (:class:`numpy.float64`) - The absolute",
"if time_index < time_coarse.size: get_time_evolution_loop(time_index, time_coarse, time_step_output, time_step_integration, time_end_points, time_evolution_coarse, sweep_parameter) return @jit_host(\"(complex128[:,",
"squares made by the matrix exponentiator, if :obj:`ExponentiationMethod.LIE_TROTTER` is chosen. threads_per_block : :obj:`int`",
"vector to right multiply in the inner product. Returns * **d** (:class:`numpy.complex128`) -",
"def append_exponentiation_integration_midpoint(time_evolution_fine, time_evolution_coarse, field_sample, time_step_integration, rotating_wave, rotating_wave_winding): transform_frame(field_sample[0, :], rotating_wave, rotating_wave_winding[0]) field_sample[0, 0]",
".. note :: The use of a rotating frame is commonly associated with",
"units of s. time_step_integration : :obj:`float` The time step used within the integration",
"&= \\\\frac{1}{2}2^{-\\\\tau}x,\\\\\\\\ Y &= \\\\frac{1}{2}2^{-\\\\tau}y,\\\\\\\\ Z &= \\\\frac{1}{2}2^{-\\\\tau}z,\\\\\\\\ c_{\\\\theta} &= \\\\cos(\\\\theta),\\\\\\\\ s_{\\\\theta} &=",
"control, so really this number is 64). Raising this value allocates more registers",
"of `max_registers` for each model of GPU running :mod:`spinsim`, balancing more threads vs",
"performance for your GPU. See `Achieved Occupancy`_ for Nvidia's official explanation. \"\"\" utilities",
"(y_index, x_index)) - The matrix to right multiply by. * **result** (:class:`numpy.ndarray` of",
":func:`numba.jit()` LLVM compiler to compile the integrator to run on a single CPU",
"- 1/sqrt3)) - time_coarse) rotating_wave_winding[0] = math.cos(math.tau*rotating_wave*time_sample) + 1j*math.sin(math.tau*rotating_wave*time_sample) time_sample += time_coarse get_field_jit(time_sample,",
"temporary = cuda.local.array((3, 3), dtype = np.complex128) elif device_index == 2: temporary_group =",
"= 0.5*(state[time_index, 0].real**2 + state[time_index, 0].imag**2 - state[time_index, 1].real**2 - state[time_index, 1].imag**2) else:",
"0] = operator[1, 0]*operator[0, 0] + (2 + operator[1, 1])*operator[1, 0] + operator[1,",
"sample_index_max = 3 sample_index_end = 4 elif integration_method == IntegrationMethod.HALF_STEP: sample_index_max = 3",
"0] = (cx*cy - 1j*sx*sy)/cisz # result[1, 0] = (cx*sy -1j*sx*cy)/cisz # result[0,",
"during the simulations that `spinsim` was designed for. * **field_sample** (:class:`numpy.ndarray` of :class:`numpy.float64`",
"& 1 \\\\\\\\ 1 & 0 \\\\end{pmatrix},& J_y &= \\\\frac{1}{2}\\\\begin{pmatrix} 0 & -i",
"float64[:, :], float64, float64, complex128[:])\") def append_exponentiation_integration_midpoint(time_evolution_fine, time_evolution_coarse, field_sample, time_step_integration, rotating_wave, rotating_wave_winding): transform_frame(field_sample[0,",
"* **field_sample** (:class:`numpy.ndarray` of :class:`numpy.float64`, (y_index, x_index)) - The values of x, y",
"= roc.shared.array((threads_per_block, dimension, dimension), dtype = np.complex128) time_evolution_old = time_evolution_old_group[roc.get_local_id(1), :, :] #",
"option to select whether the simulator will integrate a spin half :obj:`SpinQuantumNumber.HALF`, or",
"0 operator[2, 2] = 0 @jit_device def matrix_multiply(left, right, result): result[0, 0] =",
"|\\\\psi_{-\\\\frac{1}{2}}(t)|^2) \\\\end{pmatrix} \\\\end{align*} For spin one: .. math:: \\\\begin{align*} \\\\langle F\\\\rangle(t) = \\\\begin{pmatrix}",
"(\"cpu_single\", 0) \"\"\" Use the :func:`numba.jit()` LLVM compiler to compile the integrator to",
"-1 \\\\end{pmatrix} \\\\end{align*} Then the exponential can be calculated as .. math:: \\\\begin{align*}",
"(time_index, spatial_direction) The expected spin projection (Bloch vector) over time. \"\"\" if device.index",
"2] + operator[0, 2]*operator[2, 2] result[1, 2] = operator[1, 0]*operator[0, 2] + (2",
"- 1, x_index, z_index]*state[time_index - 1, z_index] else: state[time_index, x_index] += state_init[x_index] sqrt2",
"operator[0, 1] = 0 operator[1, 1] = 0 @jit_device def matrix_multiply(left, right, result):",
"cuda.local.array((2, 2), dtype = np.complex128) # elif device_index == 2: # temporary_group =",
"c - 1j*z*s result[1, 0] = (y - 1j*x)*s result[0, 1] = -(y",
"0 & 1 & 0 \\\\end{pmatrix},& J_y &= \\\\frac{1}{\\\\sqrt{2}}\\\\begin{pmatrix} 0 & -i &",
"\\\\end{align*} with .. math:: \\\\begin{align*} X &= 2^{-\\\\tau}x,\\\\\\\\ Y &= 2^{-\\\\tau}y,\\\\\\\\ Z &=",
"1j*field_sample[1])/(rotating_wave_winding**2) field_sample[0] = X.real field_sample[1] = X.imag field_sample[2] = field_sample[2] - 2*rotating_wave transform_frame",
"rotating_wave_winding): time_sample = time_fine + 0.5*time_step_integration - time_coarse rotating_wave_winding[0] = math.cos(math.tau*rotating_wave*time_sample) + 1j*math.sin(math.tau*rotating_wave*time_sample)",
"time_evolution_coarse.copy_to_host() time = time.copy_to_host() state = np.empty((time_index_max, self.spin_quantum_number.dimension), np.complex128) self.get_state(state_init, state, time_evolution_coarse) results",
"time. Parameters ---------- sweep_parameter : :obj:`float` The input to the `get_field` function supplied",
"of the exponentiation is to be written to. * **trotter_cutoff** (:obj:`int`) - The",
"of z. complex_abs(z) : :obj:`callable` The absolute value of a complex number. ..",
"&= 2^{-\\\\tau}q,\\\\\\\\ c_{\\\\theta} &= \\\\cos(\\\\theta),\\\\\\\\ s_{\\\\theta} &= \\\\sin(\\\\theta). \\\\end{align*} Once :math:`T` is calculated,",
"needed. Compiled for chosen device on object constrution. Parameters: * **state** (:obj:`numpy.ndarray` of",
"vector) over time. \"\"\" self.time = time self.time_evolution = time_evolution self.state = state",
"1])).real spin[time_index, 2] = 0.5*(state[time_index, 0].real**2 + state[time_index, 0].imag**2 - state[time_index, 1].real**2 -",
"utilities.adjoint matrix_exponential_analytic = utilities.matrix_exponential_analytic matrix_exponential_lie_trotter = utilities.matrix_exponential_lie_trotter jit_host = device.jit_host jit_device = device.jit_device",
"numba import roc import math sqrt2 = math.sqrt(2) sqrt3 = math.sqrt(3) class SpinQuantumNumber(Enum):",
":, :] field_sample_group = roc.shared.array((threads_per_block, sample_index_max, lie_dimension), dtype = np.float64) field_sample = field_sample_group[roc.get_local_id(1),",
"1] + w0*field_sample[1, 1]) field_sample[2, 2] = math.tau*time_step_integration*(w1*field_sample[0, 2] + w0*field_sample[1, 2]) if",
"number for the whole GPU, for each specific GPU model. This means that",
"lie_dimension), dtype = np.float64) rotating_wave_winding = cuda.local.array(sample_index_end, dtype = np.complex128) elif device_index ==",
"time_index = cuda.grid(1) elif device_index == 1: time_index = roc.get_global_id(1) if time_index <",
"= operator[1, 0]*operator[0, 0] + (2 + operator[1, 1])*operator[1, 0] + operator[1, 2]*operator[2,",
"spin one systems. Assumes the exponent is an imaginary linear combination of :math:`\\\\mathfrak{su}(2)`,",
"Device.ROC: time = roc.device_array(time_index_max, np.float64) time_evolution_coarse = roc.device_array((time_index_max, self.spin_quantum_number.dimension, self.spin_quantum_number.dimension), np.complex128) blocks_per_grid =",
"on all CPU cores, in parallel. .. note :: To use this device",
"Parameters: * **z** (:class:`numpy.complex128`) - The complex number to take the conjugate of.",
"dimension == 3: @jit_device_template(\"(float64[:], float64, complex128)\") def transform_frame_spin_one_rotating(field_sample, rotating_wave, rotating_wave_winding): X = (field_sample[0]",
":ref:`architecture` for some information. state : :obj:`numpy.ndarray` of :obj:`numpy.complex128` (time_index, magnetic_quantum_number) The evaluated",
"over time, written in terms of the eigenstates of the spin projection operator",
"left[1, 0]*right[0, 1] + left[1, 1]*right[1, 1] + left[1, 2]*right[2, 1] result[2, 1]",
"spin_quantum_number, device, use_rotating_frame, integration_method, exponentiation_method, trotter_cutoff, threads_per_block, max_registers) except: print(\"\\033[31mspinsim error: numba could",
"which increases the accuracy of the output since the integrator will on average",
"\\\\frac{Q}{3}\\\\right)}(c_X - c_Y - i s_Xs_Y)}{2} & \\\\frac{e^{i\\\\frac{2Q}{3}} (s_Y -i c_Y s_X)}{\\\\sqrt{2}} &",
"The matrix which the result of the exponentiation is to be written to.",
"time_step_integration = time_step_output/round(time_step_output/time_step_integration) time_end_points = np.asarray([time_start, time_end], np.float64) state_init = np.asarray(state_init, np.complex128) time_index_max",
"nb.prange(state.shape[1]): state[time_index, x_index] = 0 if time_index > 0: for z_index in range(state.shape[1]):",
"= \\\\lim_{c \\\\to \\\\infty} \\\\left(\\\\exp\\\\left(\\\\frac{1}{c}A\\\\right) \\\\exp\\\\left(\\\\frac{1}{c}B\\\\right)\\\\right)^c. **For spin half systems:** Assumes the exponent",
"# sy = math.sin(y) # cisz = math.cos(z + q/3) - 1j*math.sin(z +",
"value of the spin quantum number. dimension : :obj:`int` Dimension of the hilbert",
"== 2: time_evolution_old_group = roc.shared.array((threads_per_block, dimension, dimension), dtype = np.complex128) time_evolution_old = time_evolution_old_group[roc.get_local_id(1),",
"\\\\mathbb{R} \\\\end{align*} Parameters: * **z** (:class:`numpy.complex128`) - The complex number to take the",
"= (field_sample[0] + 1j*field_sample[1])/(rotating_wave_winding**2) field_sample[0] = X.real field_sample[1] = X.imag field_sample[2] = field_sample[2]",
"vs faster running threads, and changing this value could increase performance for your",
"spin half :obj:`SpinQuantumNumber.HALF`, or spin one :obj:`SpinQuantumNumber.ONE` quantum system. device : :obj:`Device` The",
"the (possibly large) z component of the field, which increases the accuracy of",
"rotating_wave_winding[0] time_evolution_coarse[time_index, 1, 1] *= rotating_wave_winding[0] @jit_host(\"(float64, float64[:], float64[:], float64, float64, complex128[:, :,",
"2] @jit_device def set_to_one(operator): operator[0, 0] = 1 operator[1, 0] = 0 operator[2,",
"# eq = field_sample[3]/(6*precision) # eq = 1 + 1j*eq result[0, 0] =",
"time_evolution_coarse): if device_index == 0: time_evolution_old = np.empty((dimension, dimension), dtype = np.complex128) elif",
"result[1, 0] = operator[1, 0]*operator[0, 0] + (2 + operator[1, 1])*operator[1, 0] +",
"3]/2 append_exponentiation(field_sample[2, :], time_evolution_fine, time_evolution_coarse) field_sample[2, 0] = math.tau*time_step_integration*field_sample[1, 0]/2 field_sample[2, 1] =",
"not done when this option is set to :obj:`True` - no such approximations",
"1j*math.sin(z) # result[0, 0] = (cx*cy - 1j*sx*sy)/cisz # result[1, 0] = (cx*sy",
"be interpreted. \"\"\" def __init__(self, value, index): super().__init__() self._value_ = value self.index =",
"field_sample[2, 1] = math.tau*time_step_integration*(w0*field_sample[0, 1] + w1*field_sample[1, 1]) field_sample[2, 2] = math.tau*time_step_integration*(w0*field_sample[0, 2]",
"can be approximated as, for large :math:`\\\\tau`, .. math:: \\\\begin{align*} \\\\exp(A) &= \\\\exp(-ix",
"field_sample[2, 2] = math.tau*time_step_integration*(w1*field_sample[0, 2] + w0*field_sample[1, 2]) if dimension > 2: field_sample[2,",
"0]*right[0, 1] + left[0, 1]*right[1, 1] + left[0, 2]*right[2, 1] result[1, 1] =",
"time_evolution_fine, time_evolution_coarse) get_field_integration = get_field_integration_midpoint append_exponentiation_integration = append_exponentiation_integration_midpoint @jit_device_template(\"(int64, float64[:], float64, float64, float64[:],",
"1j*sx*cy)*cisz # result[1, 1] = (cx*cy + 1j*sx*sy)*cisz # if device_index == 0:",
"cuda.jit(template, debug = False, max_registers = max_registers)(func) return jit_host self.jit_host = jit_host def",
"rotating in the z axis by an amount defined by the field in",
":obj:`int` Dimension of the hilbert space the states with this spin belong to.",
"the result of the exponentiation is to be written to. * **trotter_cutoff** (:obj:`int`)",
"float64, complex128)\") def transform_frame_spin_one_rotating(field_sample, rotating_wave, rotating_wave_winding): X = (field_sample[0] + 1j*field_sample[1])/rotating_wave_winding field_sample[0] =",
"&\\\\in \\\\mathbb{R} \\\\end{align*} Parameters: * **z** (:class:`numpy.complex128`) - The complex number to take",
"operator[0, 0])*operator[0, 0] + operator[0, 1]*operator[1, 0] + operator[0, 2]*operator[2, 0] result[1, 0]",
"= cuda.local.array((2, 2), dtype = np.complex128) # elif device_index == 2: # temporary_group",
"of a subspace of :math:`\\\\mathfrak{su}(3)`, being, .. math:: \\\\begin{align*} A &= -i(x J_x",
"evolution operator between each time step. See :ref:`architecture` for some information. spin_calculator :",
"# matrix_multiply(temporary, temporary, result) else: @jit_device def norm2(z): return math.sqrt(z[0].real**2 + z[0].imag**2 +",
"2]*right[2, 2] result[2, 2] = left[2, 0]*right[0, 2] + left[2, 1]*right[1, 2] +",
"& 0 \\\\end{pmatrix},\\\\\\\\ J_z &= \\\\begin{pmatrix} 1 & 0 & 0 \\\\\\\\ 0",
"operator[0, 0] = 1 operator[1, 0] = 0 operator[0, 1] = 0 operator[1,",
"= (2 + operator[0, 0])*operator[0, 0] + operator[0, 1]*operator[1, 0] + operator[0, 2]*operator[2,",
"def jit_device(func): return roc.jit(device = True)(func) self.jit_device = jit_device def jit_device_template(template): def jit_device_template(func):",
"the field in the z direction. This removes the (possibly large) z component",
"matrix_multiply(left, right, result) : :obj:`callable` Multiply matrices left and right together, to be",
"field_sample, time_step_integration, rotating_wave, rotating_wave_winding): transform_frame(field_sample[0, :], rotating_wave, rotating_wave_winding[0]) transform_frame(field_sample[1, :], rotating_wave, rotating_wave_winding[1]) w0",
"matrix_exponential_analytic(field_sample, result): x = field_sample[0] y = field_sample[1] z = field_sample[2] r =",
"the :func:`numba.jit()` LLVM compiler to compile the integrator to run on all CPU",
"+ B) = \\\\lim_{c \\\\to \\\\infty} \\\\left(\\\\exp\\\\left(\\\\frac{1}{c}A\\\\right) \\\\exp\\\\left(\\\\frac{1}{c}B\\\\right)\\\\right)^c. **For spin half systems:** Assumes",
"== 0: matrix_exponential_analytic(field_sample, time_evolution_fine) elif exponentiation_method_index == 1: matrix_exponential_lie_trotter(field_sample, time_evolution_fine, trotter_cutoff) # Premultiply",
":obj:`SpinQuantumNumber` The option to select whether the simulator will integrate a spin half",
"= True)(func) self.jit_device = jit_device def jit_device_template(template): def jit_device_template(func): return cuda.jit(template, device =",
"Lowering the value increases GPU occupancy, meaning more threads run concurrently, at the",
"float64, float64, complex128[:])\") def append_exponentiation_integration_half_step(time_evolution_fine, time_evolution_coarse, field_sample, time_step_integration, rotating_wave, rotating_wave_winding): transform_frame(field_sample[0, :], rotating_wave,",
"1] result[1, 1] = left[1, 0]*right[0, 1] + left[1, 1]*right[1, 1] + left[1,",
"rotating_wave_winding[0]) field_sample[0, 0] = math.tau*time_step_integration*field_sample[0, 0] field_sample[0, 1] = math.tau*time_step_integration*field_sample[0, 1] field_sample[0, 2]",
"0 & 1 \\\\\\\\ 1 & 0 \\\\end{pmatrix},& J_y &= \\\\frac{1}{2}\\\\begin{pmatrix} 0 &",
"IntegrationMethod.MAGNUS_CF4, trotter_cutoff = 32, threads_per_block = 64, max_registers = 63): \"\"\" .. _Achieved",
"rotating_wave, rotating_wave_winding[0]) transform_frame(field_sample[1, :], rotating_wave, rotating_wave_winding[1]) field_sample[2, 0] = math.tau*time_step_integration*field_sample[0, 0]/2 field_sample[2, 1]",
"which the result of the exponentiation is to be written to. matrix_exponential_lie_trotter(field_sample, result)",
"multiple simulations need to be run. For example, it is used to sweep",
"max_registers)(func) return jit_host self.jit_host = jit_host def jit_device(func): return cuda.jit(device = True, inline",
"result[1, 1] = operator[1, 0]*operator[0, 1] + (2 + operator[1, 1])*operator[1, 1] +",
"eq = field_sample[3]/(6*precision) # eq = 1 + 1j*eq result[0, 0] = (Ca/(eq*ez))*(Ca/(eq*ez))",
"the start of the simulation. state : :class:`numpy.ndarray` of :class:`numpy.complex128` (time_index, state_index) The",
"+ 1j*math.sin(z) # result[0, 0] = (cx*cy - 1j*sx*sy)/cisz # result[1, 0] =",
"= operator[2, 2] @jit_device def set_to_one(operator): operator[0, 0] = 1 operator[1, 0] =",
": :obj:`float` The numerical value of the spin quantum number. dimension : :obj:`int`",
"squared :math:`\\\\tau` times to obtain :math:`\\\\exp(A)`. Parameters: * **field_sample** (:class:`numpy.ndarray` of :class:`numpy.float64`, (y_index,",
"# temporary = temporary_group[roc.get_local_id(1), :, :] # for power_index in range(hyper_cube_amount): # matrix_multiply(result,",
"= 1 + 1j*eq result[0, 0] = (Ca/(eq*ez))*(Ca/(eq*ez)) - 1 result[1, 0] =",
"\"\"\" utilities = Utilities(spin_quantum_number, device, threads_per_block) conj = utilities.conj complex_abs = utilities.complex_abs norm2",
"elif device_index == 2: temporary_group = roc.shared.array((threads_per_block, 2, 2), dtype = np.complex128) temporary",
"time_evolution_old = time_evolution_old_group[roc.get_local_id(1), :, :] # Calculate the exponential if exponentiation_method_index == 0:",
"\\\\sum_i (l_i)^* r_i \\\\end{align*} Parameters: * **left** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (index)) - The",
"- iy J_y - iz J_z - iq J_q)\\\\\\\\ &= \\\\exp(2^{-\\\\tau}(-ix J_x -",
"of time_step_integration. Resetting time_step_integration to {time_step_output/round(time_step_output/time_step_integration):8.4e}.\\033[0m\\n\") time_step_integration = time_step_output/round(time_step_output/time_step_integration) time_end_points = np.asarray([time_start, time_end],",
"product. adjoint(operator) : :obj:`callable` Takes the hermitian adjoint of a matrix. .. math::",
"`Supported CUDA Python features`_ for compilable python features. \"\"\" ROC = (\"roc\", 2)",
"more registers are allocated than are available for the GPU model, the GPU",
"spin projection (Bloch vector) over time. \"\"\" if device.index == 0: spin =",
"(Bloch vector) over time. \"\"\" if device.index == 0: spin = np.empty((state.shape[0], 3),",
"The implementation to use for matrix exponentiation within the integrator. Parameters ---------- value",
"= operator[0, 1] result[1, 1] = operator[1, 1] result[2, 1] = operator[2, 1]",
"Use the :func:`numba.roc.jit()` LLVM compiler to compile the integrator to run on an",
"if use_rotating_frame: if dimension == 3: @jit_device_template(\"(float64[:], float64, complex128)\") def transform_frame_spin_one_rotating(field_sample, rotating_wave, rotating_wave_winding):",
"s. time_end : :obj:`float` The time that the experiment is to finish at.",
"time (0) or end time (1)) The time values for when the experiment",
"system in the lab frame, for each time sampled. time_evolution : :class:`numpy.ndarray` of",
"utilities.set_to set_to_one = utilities.set_to_one set_to_zero = utilities.set_to_zero matrix_multiply = utilities.matrix_multiply adjoint = utilities.adjoint",
"= math.cos(math.tau*rotating_wave*time_sample) + 1j*math.sin(math.tau*rotating_wave*time_sample) time_sample += time_coarse get_field_jit(time_sample, sweep_parameter, field_sample[0, :]) @jit_device_template(\"(complex128[:, :],",
"use the rotating frame optimisation. Defaults to :obj:`True`. If set to :obj:`True`, the",
"two orthogonal vectors is 0. .. math:: \\\\begin{align*} l \\\\cdot r &\\\\equiv \\\\langle",
"archiving. \"\"\" MAGNUS_CF4 = \"magnus_cf4\" \"\"\" Commutator free, fourth order Magnus based integrator.",
"jit get_field function into a roc device function.\\033[0m\\n\") raise time_evolution_coarse = time_evolution_coarse.copy_to_host() time",
"\\\\frac{Q}{3})} (-i s_X - c_X s_Y)}{\\\\sqrt{2}} \\\\\\\\ \\\\frac{e^{-i\\\\left(Z + \\\\frac{Q}{3}\\\\right)}(c_X - c_Y -",
"1 a = a/precision Ca = math.cos(a/2) Sa = -1j*math.sin(a/2) ez = field_sample[2]/(2*precision)",
"def jit_host(template, max_registers): def jit_host(func): return func return jit_host self.jit_host = jit_host def",
":]) @jit_device_template(\"(complex128[:, :], complex128[:, :], float64[:, :], float64, float64, complex128[:])\") def append_exponentiation_integration_midpoint(time_evolution_fine, time_evolution_coarse,",
"= np.empty((time_index_max, self.spin_quantum_number.dimension), np.complex128) self.get_state(state_init, state, time_evolution_coarse) results = Results(time, time_evolution_coarse, state, self.spin_calculator)",
":] time_coarse[time_index] = time_end_points[0] + time_step_output*time_index time_fine = time_coarse[time_index] # Initialise time evolution",
"the field from the end of the time step. The equivalent of the",
"Device(Enum): \"\"\" The target device that the integrator is being compiled for. ..",
"= c - 1j*z*s result[1, 0] = (y - 1j*x)*s result[0, 1] =",
"integration time step. Measured in s. * **time_step_output** (:obj:`float`) - The sample resolution",
"time_step_integration, rotating_wave, rotating_wave_winding): transform_frame(field_sample[0, :], rotating_wave, rotating_wave_winding[0]) field_sample[0, 0] = math.tau*time_step_integration*field_sample[0, 0] field_sample[0,",
"calculated just in time using the JITed :obj:`callable` `spin_calculator`. spin_calculator : :obj:`callable` Calculates",
"being put under. It must have three arguments: * **time_sample** (:obj:`float`) - the",
"exponent is an imaginary linear combination of a subspace of :math:`\\\\mathfrak{su}(2)`, being, ..",
"complex_abs self.norm2 = norm2 self.inner = inner self.set_to = set_to self.set_to_one = set_to_one",
"+ w0*field_sample[1, 2]) if dimension > 2: field_sample[2, 3] = math.tau*time_step_integration*(w1*field_sample[0, 3] +",
"out of a maximum number for the whole GPU, for each specific GPU",
"= dimension self.label = label HALF = (1/2, 2, \"half\") \"\"\" For two",
"= field_sample[0]/(2*precision) # y = field_sample[1]/(2*precision) # z = field_sample[2]/(2*precision) # cx =",
"device function.\\033[0m\\n\") raise def compile_time_evolver(self, get_field, spin_quantum_number, device, use_rotating_frame = True, integration_method =",
"set_to_one self.set_to_zero = set_to_zero self.matrix_multiply = matrix_multiply self.adjoint = adjoint self.matrix_exponential_analytic = matrix_exponential_analytic",
"= state self.spin_calculator = spin_calculator def __getattr__(self, attr_name): if attr_name == \"spin\": spin",
"z direction. spin : :obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index, spatial_direction) The expected spin projection",
"execution time for different GPU models. device : :obj:`Device` The option to select",
"be used for archiving. \"\"\" def __init__(self, value, dimension, label): super().__init__() self._value_ =",
"get_time_evolution_loop(time_index, time_coarse, time_step_output, time_step_integration, time_end_points, time_evolution_coarse, sweep_parameter) elif device_index == 2: # Run",
"to find the quantum state timeseries of the 3 level atom. Parameters ----------",
"time evolution * previous state for x_index in nb.prange(state.shape[1]): state[time_index, x_index] = 0",
"for Nvidia's official explanation. \"\"\" if not device: if cuda.is_available(): device = Device.CUDA",
"1 result[1, 1] += 1 # @jit_device # def matrix_exponential_lie_trotter(field_sample, result, trotter_cutoff): #",
"append_exponentiation(field_sample[2, :], time_evolution_fine, time_evolution_coarse) field_sample[2, 0] = math.tau*time_step_integration*field_sample[1, 0]/2 field_sample[2, 1] = math.tau*time_step_integration*field_sample[1,",
"2] + left[0, 2]*right[2, 2] result[1, 2] = left[1, 0]*right[0, 2] + left[1,",
"the GPU model, the GPU must run fewer threads concurrently than it has",
"def jit_device_template(func): return roc.jit(template, device = True)(func) return jit_device_template self.jit_device_template = jit_device_template PYTHON",
"(magnetic_quantum_number) The initial quantum state of the spin system, written in terms of",
"1j*cy*sx)/sqrt2 # cisz = math.cos(z - q/3) + 1j*math.sin(z - q/3) # result[0,",
"-\\\\Im(\\\\psi_{+\\\\frac{1}{2}}(t)\\\\psi_{-\\\\frac{1}{2}}(t)^*)\\\\\\\\ \\\\frac{1}{2}(|\\\\psi_{+\\\\frac{1}{2}}(t)|^2 - |\\\\psi_{-\\\\frac{1}{2}}(t)|^2) \\\\end{pmatrix} \\\\end{align*} For spin one: .. math:: \\\\begin{align*} \\\\langle",
"state[time_index, x_index] += time_evolution[time_index - 1, x_index, z_index]*state[time_index - 1, z_index] else: state[time_index,",
"Approximation using the Lie Trotter theorem. \"\"\" class Device(Enum): \"\"\" The target device",
"1 @jit_device def set_to_zero(operator): operator[0, 0] = 0 operator[1, 0] = 0 operator[0,",
"= sa*eq*ez/ep result[2, 2] = (Ca*ez/eq)*(Ca*ez/eq) - 1 if device_index == 0: temporary",
"opperator. Parameters ---------- sweep_parameter : :obj:`float` time_coarse : :class:`numpy.ndarray` of :class:`numpy.float64` (time_index) A",
"@jit_device def conj(z): return (z.real - 1j*z.imag) @jit_device def complex_abs(z): return math.sqrt(z.real**2 +",
"True)(func) return jit_device_template self.jit_device_template = jit_device_template elif value == \"roc\": def jit_host(template, max_registers):",
"frame, for each time sampled. See :math:`\\\\psi(t)` in :ref:`overview_of_simulation_method`. spin : :class:`numpy.ndarray` of",
"roc.get_global_id(1) if time_index < time_coarse.size: get_time_evolution_loop(time_index, time_coarse, time_step_output, time_step_integration, time_end_points, time_evolution_coarse, sweep_parameter) return",
"X = (field_sample[0] + 1j*field_sample[1])/(rotating_wave_winding**2) field_sample[0] = X.real field_sample[1] = X.imag field_sample[2] =",
"Make a matrix the multiplicative identity, ie, :math:`1`. .. math:: \\\\begin{align*} (A)_{i, j}",
"def set_to_one(operator): operator[0, 0] = 1 operator[1, 0] = 0 operator[2, 0] =",
"exponential can be approximated as, for large :math:`\\\\tau`, .. math:: \\\\begin{align*} \\\\exp(A) &=",
"= get_field_integration_magnus_cf4 append_exponentiation_integration = append_exponentiation_integration_magnus_cf4 elif integration_method == IntegrationMethod.HALF_STEP: @jit_device_template(\"(float64, float64, float64, float64,",
"The complex number to take the conjugate of. Returns * **cz** (:class:`numpy.complex128`) -",
"field_sample, rotating_wave, rotating_wave_winding): time_sample = time_fine - time_coarse rotating_wave_winding[0] = math.cos(math.tau*rotating_wave*time_sample) + 1j*math.sin(math.tau*rotating_wave*time_sample)",
"= math.cos(2*q/3) + 1j*math.sin(2*q/3) # result[0, 1] = cisz*(-sy - 1j*cy*sx)/sqrt2 # result[1,",
"set_to_one(operator): operator[0, 0] = 1 operator[1, 0] = 0 operator[0, 1] = 0",
":class:`numpy.complex128` (time_index, state_index) The state (wavefunction) of the spin system in the lab",
"z[2].real**2 + z[2].imag**2) @jit_device def cross(left, right, result): result[0] = conj(left[1]*right[2] - left[2]*right[1])",
":obj:`Device` for all options and more details. get_time_evolution_raw : :obj:`callable` The internal function",
"\\\\frac{1}{2}2^{-\\\\tau}z,\\\\\\\\ c_{\\\\theta} &= \\\\cos(\\\\theta),\\\\\\\\ s_{\\\\theta} &= \\\\sin(\\\\theta). \\\\end{align*} **For spin one systems** Assumes",
"identity, ie, :math:`0`. .. math:: \\\\begin{align*} (A)_{i, j} = 0 \\\\end{align*} Parameters: *",
"the integrator moves into a frame rotating in the z axis by an",
"result of the exponentiation is to be written to. * **trotter_cutoff** (:obj:`int`) -",
"2]*right[2, 0] result[1, 0] = left[1, 0]*right[0, 0] + left[1, 1]*right[1, 0] +",
"ExponentiationMethod.ANALYTIC) and (spin_quantum_number != SpinQuantumNumber.HALF): print(\"\\033[31mspinsim warning!!!\\n_attempting to use an analytic exponentiation method",
"2].real**2 - state[time_index, 2].imag**2 return def spin_calculator(state): \"\"\" Calculates the expected spin projection",
"0: # temporary = np.empty((2, 2), dtype = np.complex128) # elif device_index ==",
"left[2, 1]*right[1, 0] + left[2, 2]*right[2, 0] result[0, 1] = left[0, 0]*right[0, 1]",
"try: self.get_time_evolution_raw[blocks_per_grid, self.threads_per_block](sweep_parameter, time, time_end_points, time_step_integration, time_step_output, time_evolution_coarse) except: print(\"\\033[31mspinsim error: numba.roc could",
"this value allocates more registers (fast memory) to each thread, out of a",
"based on the Lie Product Formula, .. math:: \\\\exp(A + B) = \\\\lim_{c",
"which device will be targeted for integration. That is, whether the integrator is",
"# result[0, 0] = (cx*cy - 1j*sx*sy)/cisz # result[1, 0] = (cx*sy -1j*sx*cy)/cisz",
"1] = operator[0, 1] result[1, 1] = operator[1, 1] result[2, 1] = operator[2,",
"math.ceil(trotter_cutoff/2) # if hyper_cube_amount < 0: # hyper_cube_amount = 0 # precision =",
"== 0: temporary = np.empty((2, 2), dtype = np.complex128) elif device_index == 1:",
"**nz** (:class:`numpy.float64`) - The 2 norm of z. inner(left, right) : :obj:`callable` The",
"= 0 @jit_device def matrix_multiply(left, right, result): result[0, 0] = left[0, 0]*right[0, 0]",
"\\\\begin{pmatrix} \\\\Re(\\\\psi_{+\\\\frac{1}{2}}(t)\\\\psi_{-\\\\frac{1}{2}}(t)^*)\\\\\\\\ -\\\\Im(\\\\psi_{+\\\\frac{1}{2}}(t)\\\\psi_{-\\\\frac{1}{2}}(t)^*)\\\\\\\\ \\\\frac{1}{2}(|\\\\psi_{+\\\\frac{1}{2}}(t)|^2 - |\\\\psi_{-\\\\frac{1}{2}}(t)|^2) \\\\end{pmatrix} \\\\end{align*} For spin one: .. math::",
"result) result[0, 0] += 1 result[1, 1] += 1 result[2, 2] += 1",
"elif device == Device.CUDA: spin = cuda.device_array((state.shape[0], 3), np.float64) blocks_per_grid = (state.shape[0] +",
"time = roc.device_array(time_index_max, np.float64) time_evolution_coarse = roc.device_array((time_index_max, self.spin_quantum_number.dimension, self.spin_quantum_number.dimension), np.complex128) blocks_per_grid = (time.size",
"cuda.local.array((dimension, dimension), dtype = np.complex128) field_sample = cuda.local.array((sample_index_max, lie_dimension), dtype = np.float64) rotating_wave_winding",
"compatible GPU, in parallel. .. note :: To use this device option, the",
"cuda.is_available(): device = Device.CUDA else: device = Device.CPU self.threads_per_block = threads_per_block self.spin_quantum_number =",
":class:`numpy.ndarray` of :class:`numpy.float64` (start time (0) or end time (1)) The time values",
":class:`numpy.ndarray` using :func:`numba.cuda.device_array_like()`. time_end_points : :class:`numpy.ndarray` of :class:`numpy.float64` (start time (0) or end",
"in parallel. Compiled for chosen device on object constrution. Parameters: * **sweep_parameter** (:obj:`float`)",
"threads_per_block : :obj:`int` The size of each thread block (workgroup), in terms of",
"operator[0, 0] = 0 operator[1, 0] = 0 operator[2, 0] = 0 operator[0,",
"+ conj(left[2])*right[2] @jit_device def set_to(operator, result): result[0, 0] = operator[0, 0] result[1, 0]",
"Calculate the exponential if exponentiation_method_index == 0: matrix_exponential_analytic(field_sample, time_evolution_fine) elif exponentiation_method_index == 1:",
"time_step_output/2 get_field_jit(time_sample, sweep_parameter, field_sample[0, :]) rotating_wave = field_sample[0, 2] if dimension == 2:",
"right, result) : :obj:`callable` Multiply matrices left and right together, to be returned",
"temporary = np.empty((3, 3), dtype = np.complex128) elif device_index == 1: temporary =",
"= (\"cpu_single\", 0) \"\"\" Use the :func:`numba.jit()` LLVM compiler to compile the integrator",
"conj(left[1]*right[2] - left[2]*right[1]) result[1] = conj(left[2]*right[0] - left[0]*right[2]) result[2] = conj(left[0]*right[1] - left[1]*right[0])",
"\"\"\" A on object that contains definitions of all of the device functions",
"integrator is being compiled for. See :class:`Device` for more information and links. spin_quantum_number",
":], complex128[:, :], float64[:, :], float64, float64, complex128[:])\") def append_exponentiation_integration_magnus_cf4(time_evolution_fine, time_evolution_coarse, field_sample, time_step_integration,",
"elif device_index == 1: time_index = roc.get_global_id(1) if time_index < spin.shape[0]: if dimension",
"& 1 & 0 \\\\end{pmatrix},& J_y &= \\\\frac{1}{\\\\sqrt{2}}\\\\begin{pmatrix} 0 & -i & 0",
"timesteps, for each time sampled. See :math:`U(t)` in :ref:`overview_of_simulation_method`. This is an output,",
"a CPU or GPU. Defaults to :obj:`Device.CUDA` if the system it is being",
"of the matrix exponential. For spin half :obj:`SpinQuantumNumber.HALF` systems only. \"\"\" LIE_TROTTER =",
":ref:`overview_of_simulation_method`. This is an output, so use an empty :class:`numpy.ndarray` with :func:`numpy.empty()`, or",
"\\\\sin(\\\\theta). \\\\end{align*} **For spin one systems** Assumes the exponent is an imaginary linear",
"a/2 # ca = 1 # sa = -1j*a/sqrt2 # ez = field_sample[2]/(2*precision)",
"ib| &= \\\\sqrt{a^2 + b^2}\\\\\\\\ a, b &\\\\in \\\\mathbb{R} \\\\end{align*} Parameters: * **z**",
"Python features: http://numba.pydata.org/numba-doc/latest/reference/pysupported.html .. _Supported Numpy features: http://numba.pydata.org/numba-doc/latest/reference/numpysupported.html .. _Supported CUDA Python features:",
"device option, the user defined field function must be :func:`numba.jit()` compilable. See `Supported",
"rotating_wave_winding): time_sample = ((time_fine + 0.5*time_step_integration*(1 - 1/sqrt3)) - time_coarse) rotating_wave_winding[0] = math.cos(math.tau*rotating_wave*time_sample)",
"0]*right[0, 0] + left[0, 1]*right[1, 0] + left[0, 2]*right[2, 0] result[1, 0] =",
"time_evolution_fine_group = roc.shared.array((threads_per_block, dimension, dimension), dtype = np.complex128) time_evolution_fine = time_evolution_fine_group[roc.get_local_id(1), :, :]",
"model. Defaults to 63 (optimal for GTX1070, the device used for testing. Note",
"get_field_integration_half_step append_exponentiation_integration = append_exponentiation_integration_half_step elif integration_method == IntegrationMethod.MIDPOINT_SAMPLE: @jit_device_template(\"(float64, float64, float64, float64, float64[:,",
"1] = (2 + operator[0, 0])*operator[0, 1] + operator[0, 1]*operator[1, 1] result[1, 1]",
"approximations are made, and the output state in given out of the rotating",
"0] + left[2, 2]*right[2, 0] result[0, 1] = left[0, 0]*right[0, 1] + left[0,",
"The 2 norm of z. inner(left, right) : :obj:`callable` The inner (maths convention",
"of the quadratic shift (only appearing, and required, in spin one systems). ..",
"\"spin\": spin = self.spin_calculator(self.state) setattr(self, attr_name, spin) return self.spin raise AttributeError(\"{} has no",
"result[0, 0] += 1 result[1, 1] += 1 result[2, 2] += 1 #",
"if device_index == 0: for time_index in nb.prange(time_coarse.size): get_time_evolution_loop(time_index, time_coarse, time_step_output, time_step_integration, time_end_points,",
"\\\\exp(-i(2^{-\\\\tau} z) J_z)^{2^\\\\tau}\\\\\\\\ &= \\\\begin{pmatrix} (c_Xc_Y - is_Xs_Y) e^{-iZ} & -(c_Xs_Y + is_Xc_Y)",
"= 32, threads_per_block = 64, max_registers = 63): \"\"\" .. _Achieved Occupancy: https://docs.nvidia.com/gameworks/content/developertools/desktop/analysis/report/cudaexperiments/kernellevel/achievedoccupancy.htm",
"be in :math:`\\\\mathbb{C}^{2\\\\times2}` or :math:`\\\\mathbb{C}^{3\\\\times3}`. Parameters: * **operator** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index))",
"Sa = math.sin(a/2) ca = math.cos(a) sa = -1j*math.sin(a)/sqrt2 ez = field_sample[2]/(2*precision) ez",
"2] + left[1, 1]*right[1, 2] + left[1, 2]*right[2, 2] result[2, 2] = left[2,",
":obj:`callable` The 2 norm of a complex vector. .. math:: \\|a + ib\\|_2",
"0]*operator[0, 1] + operator[2, 1]*operator[1, 1] + (2 + operator[2, 2])*operator[2, 1] result[0,",
"J_z)^{2^\\\\tau}\\\\\\\\ &= \\\\begin{pmatrix} (c_Xc_Y - is_Xs_Y) e^{-iZ} & -(c_Xs_Y + is_Xc_Y) e^{iZ} \\\\\\\\",
"time if the `spin` property is needed. Compiled for chosen device on object",
":math:`\\\\hbar`. This is an output, so use an empty :class:`numpy.ndarray` with :func:`numpy.empty()`, or",
"== \"python\": def jit_host(template, max_registers): def jit_host(func): return func return jit_host self.jit_host =",
"right, result): result[0] = conj(left[1]*right[2] - left[2]*right[1]) result[1] = conj(left[2]*right[0] - left[0]*right[2]) result[2]",
"defined by the field in the z direction. This removes the (possibly large)",
"quantum state of the spin system, written in terms of the eigenstates of",
"ket_state_index) The evaluated time evolution operator between each time step. See :ref:`architecture` for",
"== 0: temporary = np.empty((3, 3), dtype = np.complex128) elif device_index == 1:",
"return math.sqrt(z.real**2 + z.imag**2) if spin_quantum_number == SpinQuantumNumber.HALF: @jit_device def norm2(z): return math.sqrt(z[0].real**2",
"and next timesteps, for each time sampled. See :math:`U(t)` in :ref:`overview_of_simulation_method`. This is",
"blocks_per_grid = (state.shape[0] + (threads_per_block - 1)) // threads_per_block get_spin[blocks_per_grid, threads_per_block](cuda.to_device(state), spin) spin",
"1] = 1 @jit_device def matrix_exponential_lie_trotter(field_sample, result, trotter_cutoff): hyper_cube_amount = math.ceil(trotter_cutoff/2) if hyper_cube_amount",
"np.float64) field_sample = field_sample_group[roc.get_local_id(1), :, :] rotating_wave_winding_group = roc.shared.array((threads_per_block, sample_index_end), dtype = np.complex128)",
"x = field_sample[0]/(2*precision) # y = field_sample[1]/(2*precision) # z = field_sample[2]/(2*precision) # cx",
"0] result[0, 1] = operator[0, 1] result[1, 1] = operator[1, 1] result[2, 1]",
"dtype = np.complex128) # elif device_index == 2: # temporary_group = roc.shared.array((threads_per_block, 2,",
"field function must be :func:`numba.cuda.jit()` compilable. See `Supported CUDA Python features`_ for compilable",
"= utilities.set_to_zero matrix_multiply = utilities.matrix_multiply adjoint = utilities.adjoint matrix_exponential_analytic = utilities.matrix_exponential_analytic matrix_exponential_lie_trotter =",
"to :obj:`ExponentiationMethod.ANALYTIC` when `spin_quantum_number` is set to :obj:`SpinQuantumNumber.HALF`. See :obj:`ExponentiationMethod` for more details.",
"* **trotter_cutoff** (:obj:`int`) - The number of squares to make to the approximate",
"0] = Ca/ez - 1 result[1, 0] = Sa*ep result[0, 1] = Sa/ep",
"1] = 0 operator[1, 1] = 1 operator[2, 1] = 0 operator[0, 2]",
"the integrator to run on a single CPU core. .. note :: To",
"1: matrix_exponential_lie_trotter(field_sample, time_evolution_fine, trotter_cutoff) # Premultiply to the exitsing time evolution operator set_to(time_evolution_coarse,",
"z direction. Returns ------- results : :obj:`Results` An object containing the results of",
"q = field_sample[3]/precision # cx = math.cos(x) # sx = math.sin(x) # cy",
"**left** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (index)) - The vector to left multiply in the",
"# def matrix_exponential_lie_trotter(field_sample, result, trotter_cutoff): # hyper_cube_amount = math.ceil(trotter_cutoff/2) # if hyper_cube_amount <",
"\\\\frac{1}{2}\\\\begin{pmatrix} 0 & -i \\\\\\\\ i & 0 \\\\end{pmatrix},& J_z &= \\\\frac{1}{2}\\\\begin{pmatrix} 1",
"frame, for each time sampled. time_evolution : :class:`numpy.ndarray` of :class:`numpy.complex128` (time_index, bra_state_index, ket_state_index)",
"2])/sqrt2).real spin[time_index, 1] = (2j*conj(state[time_index, 1])*(state[time_index, 0] - state[time_index, 2])/sqrt2).real spin[time_index, 2] =",
"Sa*ep result[0, 1] = Sa/ep result[1, 1] = Ca*ez - 1 if device_index",
"0] + w0*field_sample[1, 0]) field_sample[2, 1] = math.tau*time_step_integration*(w1*field_sample[0, 1] + w0*field_sample[1, 1]) field_sample[2,",
"The 2 norm of a complex vector. .. math:: \\|a + ib\\|_2 =",
"\"\"\" MIDPOINT_SAMPLE = \"midpoint_sample\" \"\"\" Euler integration method. \"\"\" HALF_STEP = \"half_step\" \"\"\"",
"system over time, written in terms of the eigenstates of the spin projection",
"used to get approximate analytic solutions of spin system dynamics. This is not",
"- 1 result[1, 0] = sa*eq*ep/ez result[2, 0] = -((Sa*ep/eq)*(Sa*ep/eq)) result[0, 1] =",
"complex128[:])\") def get_field_integration_midpoint(sweep_parameter, time_fine, time_coarse, time_step_integration, field_sample, rotating_wave, rotating_wave_winding): time_sample = time_fine +",
"+ operator[0, 2]*operator[2, 0] result[1, 0] = operator[1, 0]*operator[0, 0] + (2 +",
"- 1)) // threads_per_block get_spin[blocks_per_grid, threads_per_block](roc.to_device(state), spin) spin = spin.copy_to_host() return spin self.get_time_evolution_raw",
"result[1, 1] = Ca*ez - 1 if device_index == 0: temporary = np.empty((2,",
"transform_frame(field_sample[0, :], rotating_wave, rotating_wave_winding[0]) transform_frame(field_sample[1, :], rotating_wave, rotating_wave_winding[1]) field_sample[2, 0] = math.tau*time_step_integration*field_sample[0, 0]/2",
"of rotating frame rotating_wave_winding[0] = math.cos(math.tau*rotating_wave*time_step_output) + 1j*math.sin(math.tau*rotating_wave*time_step_output) time_evolution_coarse[time_index, 0, 0] /= rotating_wave_winding[0]",
":obj:`SpinQuantumNumber.ONE`, and defaults to :obj:`ExponentiationMethod.ANALYTIC` when `spin_quantum_number` is set to :obj:`SpinQuantumNumber.HALF`. See :obj:`ExponentiationMethod`",
"spin[time_index, 0] = (state[time_index, 0]*conj(state[time_index, 1])).real spin[time_index, 1] = (1j*state[time_index, 0]*conj(state[time_index, 1])).real spin[time_index,",
"1] = conj(operator[1, 0]) result[1, 1] = conj(operator[1, 1]) @jit_device def matrix_exponential_analytic(field_sample, result):",
"- left[1]*right[0]) @jit_device def inner(left, right): return conj(left[0])*right[0] + conj(left[1])*right[1] + conj(left[2])*right[2] @jit_device",
":obj:`Device.CPU` otherwise. See :obj:`Device` for all options and more details. threads_per_block : :obj:`int`",
"of x, y and z respectively, as described above. * **result** (:class:`numpy.ndarray` of",
"**right** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The matrix to right multiply by.",
"the JITed :obj:`callable` `spin_calculator`. spin_calculator : :obj:`callable` Calculates the expected spin projection (Bloch",
"for use with spin half systems. Will not work with spin one systems.",
"ROCm compatible GPU, in parallel. .. warning :: Work in progress, not currently",
"float64, float64[:], complex128[:, :, :], float64)\") def get_time_evolution_loop(time_index, time_coarse, time_step_output, time_step_integration, time_end_points, time_evolution_coarse,",
"= utilities.matrix_multiply adjoint = utilities.adjoint matrix_exponential_analytic = utilities.matrix_exponential_analytic matrix_exponential_lie_trotter = utilities.matrix_exponential_lie_trotter jit_host =",
"result[1, 1] += 1 # @jit_device # def matrix_exponential_lie_trotter(field_sample, result, trotter_cutoff): # hyper_cube_amount",
"= np.empty(time_index_max, np.float64) time_evolution_coarse = np.empty((time_index_max, self.spin_quantum_number.dimension, self.spin_quantum_number.dimension), np.complex128) self.get_time_evolution_raw(sweep_parameter, time, time_end_points, time_step_integration,",
"matrix_exponential_lie_trotter = utilities.matrix_exponential_lie_trotter jit_host = device.jit_host jit_device = device.jit_device jit_device_template = device.jit_device_template device_index",
"Z &= \\\\frac{1}{2}2^{-\\\\tau}z,\\\\\\\\ c_{\\\\theta} &= \\\\cos(\\\\theta),\\\\\\\\ s_{\\\\theta} &= \\\\sin(\\\\theta). \\\\end{align*} **For spin one",
"for the GPU model, the GPU must run fewer threads concurrently than it",
"time_evolution_coarse): \"\"\" Find the stepwise time evolution opperator. Parameters ---------- sweep_parameter : :obj:`float`",
"resgiters being avaliable to each thread, meaning slower memory must be used. Thus,",
"the exponential can be approximated as, for large :math:`\\\\tau`, .. math:: \\\\begin{align*} \\\\exp(A)",
"of squares made by the matrix exponentiator, if :obj:`ExponentiationMethod.LIE_TROTTER` is chosen. threads_per_block :",
"& 0 \\\\\\\\ 0 & -1 \\\\end{pmatrix} \\\\end{align*} Then the exponential can be",
"wave approximation in the rotating frame. integration_method : :obj:`IntegrationMethod` Which integration method to",
"= field_sample[3]/(6*precision) eq = math.cos(eq) + 1j*math.sin(eq) # Ca = 1 # Sa",
"half :obj:`SpinQuantumNumber.HALF`, or spin one :obj:`SpinQuantumNumber.ONE` quantum system. device : :obj:`Device` The option",
"&= \\\\exp(-ix J_x - iy J_y - iz J_z)\\\\\\\\ &= \\\\begin{pmatrix} \\\\cos(\\\\frac{r}{2}) -",
"# temporary_group = roc.shared.array((threads_per_block, 2, 2), dtype = np.complex128) # temporary = temporary_group[roc.get_local_id(1),",
"0]) field_sample[2, 1] = math.tau*time_step_integration*(w0*field_sample[0, 1] + w1*field_sample[1, 1]) field_sample[2, 2] = math.tau*time_step_integration*(w0*field_sample[0,",
"over time. \"\"\" self.time = time self.time_evolution = time_evolution self.state = state self.spin_calculator",
"1j*field_sample[1])/rotating_wave_winding field_sample[0] = X.real field_sample[1] = X.imag field_sample[2] = field_sample[2] - rotating_wave transform_frame",
"ROC = (\"roc\", 2) \"\"\" Use the :func:`numba.roc.jit()` LLVM compiler to compile the",
"MAGNUS_CF4 = \"magnus_cf4\" \"\"\" Commutator free, fourth order Magnus based integrator. \"\"\" MIDPOINT_SAMPLE",
"\\\\begin{align*} \\\\exp(A) &= \\\\exp(-ix J_x - iy J_y - iz J_z)\\\\\\\\ &= \\\\begin{pmatrix}",
"result[2, 1] = operator[2, 0]*operator[0, 1] + operator[2, 1]*operator[1, 1] + (2 +",
"a whole number multiple of `time_step_integration`. Measured in s. state_init : :obj:`numpy.ndarray` of",
"rate of the outputs `time_coarse` and `time_evolution_coarse`. time_evolution_coarse : :class:`numpy.ndarray` of :class:`numpy.complex128` (time_index,",
"rotating_wave_winding[1] = math.cos(math.tau*rotating_wave*time_sample) + 1j*math.sin(math.tau*rotating_wave*time_sample) time_sample += time_coarse get_field_jit(time_sample, sweep_parameter, field_sample[1, :]) @jit_device_template(\"(complex128[:,",
"+ 1j*ez # eq = field_sample[3]/(6*precision) # eq = 1 + 1j*eq result[0,",
"`state` was evaluated at. time_evolution : :obj:`numpy.ndarray` of :obj:`numpy.float128` (time_index, y_index, x_index) The",
"for GTX1070, the device used for testing. Note that one extra register per",
":math:`U(t)` in :ref:`overview_of_simulation_method`. This is an output, so use an empty :class:`numpy.ndarray` with",
"= np.complex128) temporary = temporary_group[roc.get_local_id(1), :, :] for power_index in range(hyper_cube_amount): matrix_square_residual(result, temporary)",
"J_y - iz J_z)\\\\\\\\ &= \\\\begin{pmatrix} \\\\cos(\\\\frac{r}{2}) - i\\\\frac{z}{r}\\\\sin(\\\\frac{r}{2}) & -\\\\frac{y + ix}{r}\\\\sin(\\\\frac{r}{2})\\\\\\\\",
"\\\\cos(\\\\frac{r}{2}) - i\\\\frac{z}{r}\\\\sin(\\\\frac{r}{2}) & -\\\\frac{y + ix}{r}\\\\sin(\\\\frac{r}{2})\\\\\\\\ \\\\frac{y - ix}{r}\\\\sin(\\\\frac{r}{2}) & \\\\cos(\\\\frac{r}{2}) +",
"+ operator[0, 0])*operator[0, 2] + operator[0, 1]*operator[1, 2] + operator[0, 2]*operator[2, 2] result[1,",
"norm of a complex vector. .. math:: \\|a + ib\\|_2 = \\\\sqrt {\\\\left(\\\\sum_i",
"float64, float64, float64[:, :], float64, complex128[:])\") def get_field_integration_midpoint(sweep_parameter, time_fine, time_coarse, time_step_integration, field_sample, rotating_wave,",
"z direction. Returns: * **spin** (:obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index, spatial_direction)) - The expected",
"1] = math.tau*time_step_integration*(w0*field_sample[0, 1] + w1*field_sample[1, 1]) field_sample[2, 2] = math.tau*time_step_integration*(w0*field_sample[0, 2] +",
"experiments, without the need for slow recompilation. For example, if the `sweep_parameter` is",
"select which device will be targeted for integration. That is, whether the integrator",
"else: device = Device.CPU self.threads_per_block = threads_per_block self.spin_quantum_number = spin_quantum_number self.device = device",
"-2 & 0 \\\\\\\\ 0 & 0 & 1 \\\\end{pmatrix} \\\\end{align*} Then the",
"0]*operator[0, 0] + operator[2, 1]*operator[1, 0] + (2 + operator[2, 2])*operator[2, 0] result[0,",
"math.cos(z) + 1j*math.sin(z) # result[0, 0] = (cx*cy - 1j*sx*sy)/cisz # result[1, 0]",
"integrator. Parameters ---------- value : :obj:`str` A text label that can be used",
"temporary_group = roc.shared.array((threads_per_block, 2, 2), dtype = np.complex128) temporary = temporary_group[roc.get_local_id(1), :, :]",
"threads_per_block self.spin_quantum_number = spin_quantum_number self.device = device self.get_time_evolution_raw = None self.get_spin_raw = None",
":math:`\\\\mathfrak{su}(3)`, being, .. math:: \\\\begin{align*} A &= -i(x J_x + y J_y +",
"(Ca/(eq*ez))*(Ca/(eq*ez)) - 1 result[1, 0] = sa*eq*ep/ez result[2, 0] = -((Sa*ep/eq)*(Sa*ep/eq)) result[0, 1]",
"else: @jit_device def norm2(z): return math.sqrt(z[0].real**2 + z[0].imag**2 + z[1].real**2 + z[1].imag**2 +",
"to sweep over dressing frequencies during the simulations that `spinsim` was designed for.",
"the integrator. Parameters ---------- value : :obj:`str` A text label that can be",
"or end time (1)) The time values for when the experiment is to",
"= max_registers) def get_spin(state, spin): \"\"\" Calculate each expected spin value in parallel.",
"python interpreted code for the integrator, ie, don't compile the integrator. \"\"\" CPU_SINGLE",
"rotating_wave = field_sample[0, 2] if dimension == 2: rotating_wave /= 2 # For",
"the field. This is a four dimensional vector, with the first three entries",
"running threads, and changing this value could increase performance for your GPU. See",
"contains definitions of all of the device functions (functions compiled for use on",
"that the time evolution operator is found for. In units of s. This",
"(time_index, bra_state_index, ket_state_index) Time evolution operator (matrix) between the current and next timesteps,",
"operator[1, 1] = 1 operator[2, 1] = 0 operator[0, 2] = 0 operator[1,",
"0])*operator[0, 1] + operator[0, 1]*operator[1, 1] result[1, 1] = operator[1, 0]*operator[0, 1] +",
"array to write the resultant adjoint to. matrix_exponential_analytic(field_sample, result) : :obj:`callable` Calculates a",
"sweep_parameter): # Declare variables if device_index == 0: time_evolution_fine = np.empty((dimension, dimension), dtype",
"= -(cx*sy + 1j*sx*cy)*cisz # result[1, 1] = (cx*cy + 1j*sx*sy)*cisz # if",
"self.get_time_evolution_raw = None self.get_spin_raw = None try: self.compile_time_evolver(get_field, spin_quantum_number, device, use_rotating_frame, integration_method, exponentiation_method,",
"= np.complex128) elif device_index == 1: time_evolution_old = cuda.local.array((dimension, dimension), dtype = np.complex128)",
"some cores are inactive, and the GPU is said to have less occupancy.",
"set_to_zero(operator): operator[0, 0] = 0 operator[1, 0] = 0 operator[0, 1] = 0",
"sweep_parameter, time_start, time_end, time_step_integration, time_step_output, state_init): \"\"\" Integrates the time dependent Schroedinger equation",
"y_index, x_index)) - The evaluated time evolution operator between each time step. See",
"device_index == 2: temporary_group = roc.shared.array((threads_per_block, 2, 2), dtype = np.complex128) temporary =",
"3), np.float64) blocks_per_grid = (state.shape[0] + (threads_per_block - 1)) // threads_per_block get_spin[blocks_per_grid, threads_per_block](roc.to_device(state),",
"running :mod:`spinsim`, balancing more threads vs faster running threads, and changing this value",
"the simulation. state : :class:`numpy.ndarray` of :class:`numpy.complex128` (time_index, state_index) The state (wavefunction) of",
"Lie Trotter theorem. \"\"\" class Device(Enum): \"\"\" The target device that the integrator",
"w1*field_sample[1, 1]) field_sample[2, 2] = math.tau*time_step_integration*(w0*field_sample[0, 2] + w1*field_sample[1, 2]) if dimension >",
"options and more details. exponentiation_method : :obj:`ExponentiationMethod` Which method to use for matrix",
"0]*right[0, 1] + left[1, 1]*right[1, 1] + left[1, 2]*right[2, 1] result[2, 1] =",
"= 1 a = a/precision Ca = math.cos(a/2) Sa = math.sin(a/2) ca =",
"result) else: @jit_device def norm2(z): return math.sqrt(z[0].real**2 + z[0].imag**2 + z[1].real**2 + z[1].imag**2",
"result[0, 0] = (cx*cy - 1j*sx*sy)/cisz # result[1, 0] = (cx*sy -1j*sx*cy)/cisz #",
"= time_evolution self.state = state self.spin_calculator = spin_calculator def __getattr__(self, attr_name): if attr_name",
"= cuda.local.array(sample_index_end, dtype = np.complex128) elif device_index == 2: time_evolution_fine_group = roc.shared.array((threads_per_block, dimension,",
"during the integration. Parameters ---------- value : :obj:`str` A text label that can",
"not jit get_field function into a device function.\\033[0m\\n\") raise def compile_time_evolver(self, get_field, spin_quantum_number,",
"matrix_exponential_analytic(field_sample, time_evolution_fine) elif exponentiation_method_index == 1: matrix_exponential_lie_trotter(field_sample, time_evolution_fine, trotter_cutoff) # Premultiply to the",
"to left multiply in the inner product. * **right** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (index))",
"\\\\\\\\ \\\\frac{e^{-i\\\\left(Z + \\\\frac{Q}{3}\\\\right)} (-i s_X + c_X s_Y)}{\\\\sqrt{2}} & e^{i\\\\frac{2Q}{3}} c_X c_Y",
":class:`numpy.float64`, (y_index, x_index)) - The values of x, y and z respectively, as",
"\\\\end{pmatrix}^{2^\\\\tau}\\\\\\\\ &= T^{2^\\\\tau}, \\\\end{align*} with .. math:: \\\\begin{align*} X &= 2^{-\\\\tau}x,\\\\\\\\ Y &=",
"and is executed there just in time if the `spin` property is needed.",
"linear combination of a subspace of :math:`\\\\mathfrak{su}(2)`, being, .. math:: \\\\begin{align*} A &=",
"return cuda.jit(template, device = True, inline = True)(func) return jit_device_template self.jit_device_template = jit_device_template",
"1]*right[1, 2] + left[0, 2]*right[2, 2] result[1, 2] = left[1, 0]*right[0, 2] +",
"jit_device_template self.jit_device_template = jit_device_template elif value == \"cpu\": def jit_host(template, max_registers): def jit_host(func):",
"math:: \\\\begin{align*} A &= -i(x J_x + y J_y + z J_z +",
"operator[1, 2] = 0 operator[2, 2] = 1 @jit_device def set_to_zero(operator): operator[0, 0]",
"device_index == 2: time_evolution_fine_group = roc.shared.array((threads_per_block, dimension, dimension), dtype = np.complex128) time_evolution_fine =",
".. math:: \\\\begin{align*} A &= -i(x J_x + y J_y + z J_z),",
"1]*operator[1, 2] + (2 + operator[2, 2])*operator[2, 2] @jit_device def adjoint(operator, result): result[0,",
"\"half_step\" \"\"\" Integration method from AtomicPy. Makes two Euler integration steps, one sampling",
"exitsing time evolution operator set_to(time_evolution_coarse, time_evolution_old) matrix_multiply(time_evolution_fine, time_evolution_old, time_evolution_coarse) if use_rotating_frame: if dimension",
"ix}{r}\\\\sin(\\\\frac{r}{2}) & \\\\cos(\\\\frac{r}{2}) + i\\\\frac{z}{r}\\\\sin(\\\\frac{r}{2}) \\\\end{pmatrix} \\\\end{align*} with :math:`r = \\\\sqrt{x^2 + y^2",
"spin_calculator : :obj:`callable` Calculates the expected spin projection (Bloch vector) over time for",
"\\\\delta_{i, j}\\\\\\\\ &= \\\\begin{cases} 1,&i = j\\\\\\\\ 0,&i\\\\neq j \\\\end{cases} \\\\end{align*} Parameters: *",
":], field_sample, time_step_integration, rotating_wave, rotating_wave_winding) time_fine += time_step_integration if use_rotating_frame: # Take out",
"number to take the conjugate of. Returns * **cz** (:class:`numpy.complex128`) - The conjugate",
"of the eigenstates of the spin projection operator in the z direction. spin",
"approximate matrix (:math:`\\\\tau` above). \"\"\" def __init__(self, spin_quantum_number, device, threads_per_block): \"\"\" Parameters ----------",
"time_step_integration to {time_step_output/round(time_step_output/time_step_integration):8.4e}.\\033[0m\\n\") time_step_integration = time_step_output/round(time_step_output/time_step_integration) time_end_points = np.asarray([time_start, time_end], np.float64) state_init =",
": :obj:`callable` The 2 norm of a complex vector. .. math:: \\|a +",
"time_evolution_coarse : :class:`numpy.ndarray` of :class:`numpy.complex128` (time_index, bra_state_index, ket_state_index) Time evolution operator (matrix) between",
"of :class:`numpy.complex128`, (y_index, x_index)) - The matrix to right multiply by. * **result**",
"increase execution time for different GPU models. max_registers : :obj:`int` The maximum number",
"three entries being x, y, z spatial directions (to model a magnetic field,",
"time_index > 0: for z_index in range(state.shape[1]): state[time_index, x_index] += time_evolution[time_index - 1,",
"a technique used to get approximate analytic solutions of spin system dynamics. This",
"1] result[1, 1] = operator[1, 0]*operator[0, 1] + (2 + operator[1, 1])*operator[1, 1]",
"+ w1*field_sample[1, 3]) append_exponentiation(field_sample[2, :], time_evolution_fine, time_evolution_coarse) field_sample[2, 0] = math.tau*time_step_integration*(w1*field_sample[0, 0] +",
"r &= \\\\sum_i (l_i)^* r_i \\\\end{align*} Parameters: * **left** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (index))",
"1] = left[0, 0]*right[0, 1] + left[0, 1]*right[1, 1] result[1, 1] = left[1,",
"a quantum state. Parameters ---------- state : :obj:`numpy.ndarray` of :obj:`numpy.complex128` (time_index, magnetic_quantum_number) The",
"0] = operator[2, 0] result[0, 1] = operator[0, 1] result[1, 1] = operator[1,",
"device, threads_per_block) conj = utilities.conj complex_abs = utilities.complex_abs norm2 = utilities.norm2 inner =",
"the GPU must run fewer threads concurrently than it has Cuda cores, meaning",
"@jit_device def matrix_exponential_lie_trotter(field_sample, result, trotter_cutoff): hyper_cube_amount = math.ceil(trotter_cutoff/2) if hyper_cube_amount < 0: hyper_cube_amount",
"result[1, 0] = operator[1, 0]*operator[0, 0] + (2 + operator[1, 1])*operator[1, 0] result[0,",
"time_coarse, time_step_integration, field_sample, rotating_wave, rotating_wave_winding): time_sample = ((time_fine + 0.5*time_step_integration*(1 - 1/sqrt3)) -",
"+ ix}{r}\\\\sin(\\\\frac{r}{2})\\\\\\\\ \\\\frac{y - ix}{r}\\\\sin(\\\\frac{r}{2}) & \\\\cos(\\\\frac{r}{2}) + i\\\\frac{z}{r}\\\\sin(\\\\frac{r}{2}) \\\\end{pmatrix} \\\\end{align*} with :math:`r",
"math.cos(math.tau*rotating_wave*time_sample) + 1j*math.sin(math.tau*rotating_wave*time_sample) time_sample += time_coarse get_field_jit(time_sample, sweep_parameter, field_sample[0, :]) time_sample = time_fine",
"- i\\\\frac{z}{r}\\\\sin(\\\\frac{r}{2}) & -\\\\frac{y + ix}{r}\\\\sin(\\\\frac{r}{2})\\\\\\\\ \\\\frac{y - ix}{r}\\\\sin(\\\\frac{r}{2}) & \\\\cos(\\\\frac{r}{2}) + i\\\\frac{z}{r}\\\\sin(\\\\frac{r}{2})",
"time_end : :obj:`float` The time that the experiment is to finish at. Measured",
"math:: \\\\begin{align*} (A)_{i, j} &= \\\\delta_{i, j}\\\\\\\\ &= \\\\begin{cases} 1,&i = j\\\\\\\\ 0,&i\\\\neq",
"def matrix_exponential_lie_trotter(field_sample, result, trotter_cutoff): hyper_cube_amount = math.ceil(trotter_cutoff/2) if hyper_cube_amount < 0: hyper_cube_amount =",
"&= \\\\begin{pmatrix} (c_Xc_Y - is_Xs_Y) e^{-iZ} & -(c_Xs_Y + is_Xc_Y) e^{iZ} \\\\\\\\ (c_Xs_Y",
"Measured in s. time_step_output : :obj:`float` The sample resolution of the output timeseries",
"that use the rotating wave approximation in the rotating frame. integration_method : :obj:`IntegrationMethod`",
"field_sample[2] r = math.sqrt(x**2 + y**2 + z**2) if r > 0: x",
"otherwise. See :obj:`Device` for all options and more details. exponentiation_method : :obj:`ExponentiationMethod` Which",
"Z &= 2^{-\\\\tau}z,\\\\\\\\ Q &= 2^{-\\\\tau}q,\\\\\\\\ c_{\\\\theta} &= \\\\cos(\\\\theta),\\\\\\\\ s_{\\\\theta} &= \\\\sin(\\\\theta). \\\\end{align*}",
"\\\\exp(A) &= \\\\exp(-ix J_x - iy J_y - iz J_z - iq J_q)\\\\\\\\",
":obj:`numpy.ndarray` of :obj:`numpy.float128` (time_index, y_index, x_index) The evaluated time evolution operator between each",
"spin projection operator in the z direction. Returns ------- spin : :obj:`numpy.ndarray` of",
"1] + (2 + operator[2, 2])*operator[2, 1] result[0, 2] = (2 + operator[0,",
"parallel time_index = cuda.grid(1) if time_index < time_coarse.size: get_time_evolution_loop(time_index, time_coarse, time_step_output, time_step_integration, time_end_points,",
"z**2) if r > 0: x /= r y /= r z /=",
"raise time_evolution_coarse = time_evolution_coarse.copy_to_host() time = time.copy_to_host() elif self.device == Device.ROC: time =",
"empty :class:`numpy.ndarray` with :func:`numpy.empty()`, or declare a :class:`numpy.ndarray` using :func:`numba.cuda.device_array_like()`. time_end_points : :class:`numpy.ndarray`",
": :obj:`callable` Calculates a matrix exponential based on the Lie Product Formula, ..",
"func return jit_host self.jit_host = jit_host def jit_device(func): return func self.jit_device = jit_device",
"multiply by. * **right** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The matrix to",
"# Calculate the exponential if exponentiation_method_index == 0: matrix_exponential_analytic(field_sample, time_evolution_fine) elif exponentiation_method_index ==",
"eq = field_sample[3]/(6*precision) # eq = math.cos(eq) + 1j*math.sin(eq) result[0, 0] = Ca/ez",
"(2 + operator[1, 1])*operator[1, 1] @jit_device def adjoint(operator, result): result[0, 0] = conj(operator[0,",
"+ z[2].real**2 + z[2].imag**2) @jit_device def cross(left, right, result): result[0] = conj(left[1]*right[2] -",
"Units of :math:`\\\\hbar`. This is an output, so use an empty :class:`numpy.ndarray` with",
"\"\"\" Integrates the time dependent Schroedinger equation and returns the quantum state of",
"The evaluated time evolution operator between each time step. See :ref:`architecture` for some",
"is compiled for a CPU or GPU. Defaults to :obj:`Device.CUDA` if the system",
"there just in time if the `spin` property is needed. Compiled for chosen",
"+ 1/sqrt3)) - time_coarse) rotating_wave_winding[1] = math.cos(math.tau*rotating_wave*time_sample) + 1j*math.sin(math.tau*rotating_wave*time_sample) time_sample += time_coarse get_field_jit(time_sample,",
"integrator and spin calculation functions of the simulator. Parameters ---------- get_field : :obj:`callable`",
"else: @jit_device_template(\"(float64[:], float64, complex128)\") def transform_frame_spin_half_rotating(field_sample, rotating_wave, rotating_wave_winding): X = (field_sample[0] + 1j*field_sample[1])/(rotating_wave_winding**2)",
"into another. .. math:: (A)_{i, j} = (B)_{i, j} Parameters: * **operator** (:class:`numpy.ndarray`",
"- An array to write the resultant adjoint to. matrix_exponential_analytic(field_sample, result) : :obj:`callable`",
"of squares to make to the approximate matrix (:math:`\\\\tau` above). \"\"\" def __init__(self,",
"times that `state` was evaluated at. time_evolution : :obj:`numpy.ndarray` of :obj:`numpy.float128` (time_index, y_index,",
"initial quantum state of the spin system, written in terms of the eigenstates",
"+ w0*field_sample[1, 1]) field_sample[2, 2] = math.tau*time_step_integration*(w1*field_sample[0, 2] + w0*field_sample[1, 2]) if dimension",
"the :func:`numba.jit()` LLVM compiler to compile the integrator to run on a single",
"value == \"cpu_single\": def jit_host(template, max_registers): def jit_host(func): return nb.njit(template)(func) return jit_host self.jit_host",
"field_sample[0, :]) rotating_wave = field_sample[0, 2] if dimension == 2: rotating_wave /= 2",
"half: .. math:: \\\\begin{align*} \\\\langle F\\\\rangle(t) = \\\\begin{pmatrix} \\\\Re(\\\\psi_{+\\\\frac{1}{2}}(t)\\\\psi_{-\\\\frac{1}{2}}(t)^*)\\\\\\\\ -\\\\Im(\\\\psi_{+\\\\frac{1}{2}}(t)\\\\psi_{-\\\\frac{1}{2}}(t)^*)\\\\\\\\ \\\\frac{1}{2}(|\\\\psi_{+\\\\frac{1}{2}}(t)|^2 - |\\\\psi_{-\\\\frac{1}{2}}(t)|^2)",
"Nvidia's official explanation. \"\"\" if not device: if cuda.is_available(): device = Device.CUDA else:",
"= field_sample[3]/(6*precision) # eq = math.cos(eq) + 1j*math.sin(eq) result[0, 0] = Ca/ez -",
"field_sample[2, 3] = math.tau*time_step_integration*(w0*field_sample[0, 3] + w1*field_sample[1, 3]) append_exponentiation(field_sample[2, :], time_evolution_fine, time_evolution_coarse) field_sample[2,",
"The internal function for evaluating the time evolution operator in parallel. Compiled for",
"CPU core. .. note :: To use this device option, the user defined",
"2: temporary_group = roc.shared.array((threads_per_block, 3, 3), dtype = np.complex128) temporary = temporary_group[roc.get_local_id(1), :,",
"\"\"\" if math.fabs(time_step_output/time_step_integration - round(time_step_output/time_step_integration)) > 1e-6: print(f\"\\033[33mspinsim warning: time_step_output not an integer",
"jit_host(template, max_registers): def jit_host(func): return roc.jit(template)(func) return jit_host self.jit_host = jit_host def jit_device(func):",
"field_sample[1]/(2*precision) # z = field_sample[2]/(2*precision) # cx = math.cos(x) # sx = math.sin(x)",
"= X.imag field_sample[2] = field_sample[2] - rotating_wave transform_frame = transform_frame_spin_one_rotating else: @jit_device_template(\"(float64[:], float64,",
"conj(operator[0, 2]) result[0, 1] = conj(operator[1, 0]) result[1, 1] = conj(operator[1, 1]) result[2,",
":obj:`callable` Calculates a :math:`\\\\mathfrak{su}(2)` matrix exponential based on its analytic form. .. warning::",
"matrix the multiplicative identity, ie, :math:`1`. .. math:: \\\\begin{align*} (A)_{i, j} &= \\\\delta_{i,",
"result[2, 0] = left[2, 0]*right[0, 0] + left[2, 1]*right[1, 0] + left[2, 2]*right[2,",
": :class:`numpy.ndarray` of :class:`numpy.complex128` (time_index, bra_state_index, ket_state_index) Time evolution operator (matrix) between the",
"* **field_sample** (:class:`numpy.ndarray` of :class:`numpy.float64` (spatial_index)) the returned value of the field. This",
"result): result[0, 0] = left[0, 0]*right[0, 0] + left[0, 1]*right[1, 0] + left[0,",
"fourth order Magnus based integrator. \"\"\" MIDPOINT_SAMPLE = \"midpoint_sample\" \"\"\" Euler integration method.",
"math.cos(y) # sy = math.sin(y) # cisz = math.cos(z) + 1j*math.sin(z) # result[0,",
"\\\\sum_j (L)_{i,j} (R)_{j,k} \\\\end{align*} Parameters: * **left** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) -",
"j} &= \\\\delta_{i, j}\\\\\\\\ &= \\\\begin{cases} 1,&i = j\\\\\\\\ 0,&i\\\\neq j \\\\end{cases} \\\\end{align*}",
"jit_device_template(template): def jit_device_template(func): return nb.njit(template)(func) return jit_device_template self.jit_device_template = jit_device_template elif value ==",
"+ left[0, 1]*right[1, 2] + left[0, 2]*right[2, 2] result[1, 2] = left[1, 0]*right[0,",
"`spin` the first time it is referenced by the user. Parameters: * **state**",
"the chosen target device on construction of the object. Attributes ---------- conj(z) :",
"the exitsing time evolution operator set_to(time_evolution_coarse, time_evolution_old) matrix_multiply(time_evolution_fine, time_evolution_old, time_evolution_coarse) if use_rotating_frame: if",
"spin_calculator def __getattr__(self, attr_name): if attr_name == \"spin\": spin = self.spin_calculator(self.state) setattr(self, attr_name,",
"is passed to the :obj:`Results` object returned from :func:`Simulator.evaluate()`, and is executed there",
"The sample resolution of the output timeseries for the state. Must be a",
"nb.njit(template, parallel = True)(func) return jit_host self.jit_host = jit_host def jit_device(func): return nb.njit()(func)",
"operator[1, 0] result[2, 0] = operator[2, 0] result[0, 1] = operator[0, 1] result[1,",
"X.imag field_sample[2] = field_sample[2] - rotating_wave transform_frame = transform_frame_spin_one_rotating else: @jit_device_template(\"(float64[:], float64, complex128)\")",
"- iy J_y - iz J_z))^{2^\\\\tau}\\\\\\\\ &\\\\approx (\\\\exp(-i(2^{-\\\\tau} x) J_x) \\\\exp(-i(2^{-\\\\tau} y) J_y)",
"& -1 \\\\end{pmatrix},& J_q &= \\\\frac{1}{3}\\\\begin{pmatrix} 1 & 0 & 0 \\\\\\\\ 0",
"temporary) matrix_square_residual(temporary, result) result[0, 0] += 1 result[1, 1] += 1 result[2, 2]",
"0 & -1 \\\\end{pmatrix} \\\\end{align*} Then the exponential can be approximated as, for",
"= (cx*sy -1j*sx*cy)/cisz # result[0, 1] = -(cx*sy + 1j*sx*cy)*cisz # result[1, 1]",
"calculation for each coarse timestep in parallel time_index = roc.get_global_id(1) if time_index <",
"1] += 1 result[2, 2] += 1 # @jit_device # def matrix_exponential_lie_trotter(field_sample, result,",
"the quantum state of the spin system over time. Parameters ---------- sweep_parameter :",
"= cuda.grid(1) if time_index < time_coarse.size: get_time_evolution_loop(time_index, time_coarse, time_step_output, time_step_integration, time_end_points, time_evolution_coarse, sweep_parameter)",
"state[time_index, 2])/sqrt2).real spin[time_index, 1] = (2j*conj(state[time_index, 1])*(state[time_index, 0] - state[time_index, 2])/sqrt2).real spin[time_index, 2]",
"0] + operator[0, 1]*operator[1, 0] + operator[0, 2]*operator[2, 0] result[1, 0] = operator[1,",
"return jit_device_template self.jit_device_template = jit_device_template elif value == \"cuda\": def jit_host(template, max_registers): def",
"x_index)) - The operator to take the adjoint of. * **result** (:class:`numpy.ndarray` of",
"values of x, y and z respectively, as described above. * **result** (:class:`numpy.ndarray`",
"= operator[1, 0]*operator[0, 2] + (2 + operator[1, 1])*operator[1, 2] + operator[1, 2]*operator[2,",
"- a parameter that can be swept over when multiple simulations need to",
"evolution opperator. Parameters ---------- sweep_parameter : :obj:`float` time_coarse : :class:`numpy.ndarray` of :class:`numpy.float64` (time_index)",
"= operator[2, 0] result[0, 1] = operator[0, 1] result[1, 1] = operator[1, 1]",
"time sampled. See :math:`\\\\psi(t)` in :ref:`overview_of_simulation_method`. spin : :class:`numpy.ndarray` of :class:`numpy.float64` (time_index, spatial_index)",
"- rotating_wave transform_frame = transform_frame_spin_one_rotating else: @jit_device_template(\"(float64[:], float64, complex128)\") def transform_frame_spin_half_rotating(field_sample, rotating_wave, rotating_wave_winding):",
"state_init : :obj:`numpy.ndarray` of :obj:`numpy.complex128` (magnetic_quantum_number) The initial quantum state of the spin",
"self.complex_abs = complex_abs self.norm2 = norm2 self.inner = inner self.set_to = set_to self.set_to_one",
"1] = math.tau*time_step_integration*field_sample[0, 1] field_sample[0, 2] = math.tau*time_step_integration*field_sample[0, 2] if dimension > 2:",
"The values of x, y and z respectively, as described above. * **result**",
"`Achieved Occupancy`_ for Nvidia's official explanation. \"\"\" if not device: if cuda.is_available(): device",
"rotating_wave /= 2 # For every fine step for time_fine_index in range(math.floor(time_step_output/time_step_integration +",
"of the number of threads (workitems) they each contain, when running on the",
": :obj:`int` The maximum number of registers allocated per thread when using :obj:`Device.CUDA`",
"math:: \\\\begin{align*} A^\\\\dagger &\\\\equiv A^H\\\\\\\\ (A^\\\\dagger)_{y,x} &= ((A)_{x,y})^* \\\\end{align*} Matrix can be in",
"time samples that the time evolution operator is found for. In units of",
"return jit_device_template self.jit_device_template = jit_device_template elif value == \"cpu\": def jit_host(template, max_registers): def",
"+ \\\\psi_{-1}(t))\\\\\\\\ -\\\\Im(\\\\sqrt{2}\\\\psi_{0}(t)^*(\\\\psi_{+1}(t) - \\\\psi_{-1}(t))\\\\\\\\ |\\\\psi_{+1}(t)|^2 - |\\\\psi_{-1}(t)|^2 \\\\end{pmatrix} \\\\end{align*} Parameters ---------- state",
"float64, complex128[:])\") def append_exponentiation_integration_magnus_cf4(time_evolution_fine, time_evolution_coarse, field_sample, time_step_integration, rotating_wave, rotating_wave_winding): transform_frame(field_sample[0, :], rotating_wave, rotating_wave_winding[0])",
"- cy + 1j*sx*sy) # result[1, 2] = cisz*(-1j*sx - cx*sy)/sqrt2 # result[2,",
"(:obj:`numpy.ndarray` of :obj:`numpy.complex128` (time_index, magnetic_quantum_number)) - The quantum state of the spin system",
"integrator can be used for many experiments, without the need for slow recompilation.",
"= time_evolution_old_group[roc.get_local_id(1), :, :] # Calculate the exponential if exponentiation_method_index == 0: matrix_exponential_analytic(field_sample,",
"`spin_calculator`. spin_calculator : :obj:`callable` Calculates the expected spin projection (Bloch vector) over time",
"np.float64) get_spin(state, spin) elif device == Device.CUDA: spin = cuda.device_array((state.shape[0], 3), np.float64) blocks_per_grid",
"time (1)) The time values for when the experiment is to start and",
"**time_step_integration** (:obj:`float`) - The integration time step. Measured in s. * **time_step_output** (:obj:`float`)",
"+ q/3) - 1j*math.sin(z + q/3) # result[0, 0] = 0.5*cisz*(cx + cy",
"that can be used for archiving. \"\"\" MAGNUS_CF4 = \"magnus_cf4\" \"\"\" Commutator free,",
"state self.spin_calculator = spin_calculator def __getattr__(self, attr_name): if attr_name == \"spin\": spin =",
"+ (2 + operator[1, 1])*operator[1, 1] + operator[1, 2]*operator[2, 1] result[2, 1] =",
"0] + (2 + operator[1, 1])*operator[1, 0] result[0, 1] = (2 + operator[0,",
"__init__(self, value, dimension, label): super().__init__() self._value_ = value self.dimension = dimension self.label =",
"(time_index, magnetic_quantum_number) The quantum state of the spin system over time, written in",
"0] = (y - 1j*x)*s result[0, 1] = -(y + 1j*x)*s result[1, 1]",
"product. * **right** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (index)) - The vector to right multiply",
"expected spin projection (Bloch vector) over time. \"\"\" self.time = time self.time_evolution =",
"in range(hyper_cube_amount): # matrix_multiply(result, result, temporary) # matrix_multiply(temporary, temporary, result) self.conj = conj",
": :obj:`float` The time that the experiment is to finish at. Measured in",
"jit_device_template(template): def jit_device_template(func): return cuda.jit(template, device = True, inline = True)(func) return jit_device_template",
"left[1, 1]*right[1, 0] result[0, 1] = left[0, 0]*right[0, 1] + left[0, 1]*right[1, 1]",
"time_end_points, time_step_integration, time_step_output, time_evolution_coarse) except: print(\"\\033[31mspinsim error: numba.roc could not jit get_field function",
"inner self.set_to = set_to self.set_to_one = set_to_one self.set_to_zero = set_to_zero self.matrix_multiply = matrix_multiply",
"\\\\frac{1}{2}2^{-\\\\tau}y,\\\\\\\\ Z &= \\\\frac{1}{2}2^{-\\\\tau}z,\\\\\\\\ c_{\\\\theta} &= \\\\cos(\\\\theta),\\\\\\\\ s_{\\\\theta} &= \\\\sin(\\\\theta). \\\\end{align*} **For spin",
"2]*operator[2, 2] result[1, 2] = operator[1, 0]*operator[0, 2] + (2 + operator[1, 1])*operator[1,",
"s. time_step_output : :obj:`float` The sample resolution of the output timeseries for the",
"device, use_rotating_frame = True, integration_method = IntegrationMethod.MAGNUS_CF4, exponentiation_method = None, trotter_cutoff:int = 28,",
"1 & 0 \\\\\\\\ 0 & -1 \\\\end{pmatrix} \\\\end{align*} Then the exponential can",
"&= 2^{-\\\\tau}x,\\\\\\\\ Y &= 2^{-\\\\tau}y,\\\\\\\\ Z &= 2^{-\\\\tau}z,\\\\\\\\ Q &= 2^{-\\\\tau}q,\\\\\\\\ c_{\\\\theta} &=",
"a system. Parameters ---------- value : :obj:`float` The numerical value of the spin",
"# result[1, 1] = cisz*cx*cy # result[2, 1] = cisz*(sy - 1j*cy*sx)/sqrt2 #",
"= (1/2, 2, \"half\") \"\"\" For two level systems. \"\"\" ONE = (1,",
"chosen target device on construction of the object. Attributes ---------- conj(z) : :obj:`callable`",
"+ z.imag**2) if spin_quantum_number == SpinQuantumNumber.HALF: @jit_device def norm2(z): return math.sqrt(z[0].real**2 + z[0].imag**2",
"b &\\\\in \\\\mathbb{R} \\\\end{align*} Parameters: * **z** (:class:`numpy.complex128`) - The complex number to",
"it is then recursively squared :math:`\\\\tau` times to obtain :math:`\\\\exp(A)`. Parameters: * **field_sample**",
"\"\"\" Use the stepwise time evolution operators in succession to find the quantum",
"hyper_cube_amount = 0 precision = 4**hyper_cube_amount a = math.sqrt(field_sample[0]*field_sample[0] + field_sample[1]*field_sample[1]) if a",
"x = field_sample[0]/precision # y = field_sample[1]/precision # z = field_sample[2]/precision # q",
"method to use for matrix exponentiation in the integration algorithm. Defaults to :obj:`ExponentiationMethod.LIE_TROTTER`",
"J_q), \\\\end{align*} with .. math:: \\\\begin{align*} J_x &= \\\\frac{1}{\\\\sqrt{2}}\\\\begin{pmatrix} 0 & 1 &",
"integrator to run on an Nvidia cuda compatible GPU, in parallel. .. note",
"The equivalent of the trapezoidal method. \"\"\" class ExponentiationMethod(Enum): \"\"\" The implementation to",
"complex_abs(z) : :obj:`callable` The absolute value of a complex number. .. math:: \\\\begin{align*}",
"result[1, 2] = left[1, 0]*right[0, 2] + left[1, 1]*right[1, 2] + left[1, 2]*right[2,",
"spatial directions (to model a magnetic field, for example), and the fourth entry",
"(:obj:`float`) - The sample resolution of the output timeseries for the state. Must",
"need to be run. For example, it is used to sweep over dressing",
"to take the 2 norm of. Returns * **nz** (:class:`numpy.float64`) - The 2",
"x, y and z (and q for spin one) respectively, as described above.",
"sweep_parameter, field_sample[0, :]) @jit_device_template(\"(complex128[:, :], complex128[:, :], float64[:, :], float64, float64, complex128[:])\") def",
"field_sample[2] - 2*rotating_wave transform_frame = transform_frame_spin_half_rotating else: @jit_device_template(\"(float64[:], float64, complex128)\") def transform_frame_lab(field_sample, rotating_wave,",
"Cuda compatible, and defaults to :obj:`Device.CPU` otherwise. See :obj:`Device` for all options and",
"set to :math:`1`. set_to_zero(operator) : :obj:`callable` Make a matrix the additive identity, ie,",
"0]*right[0, 0] + left[1, 1]*right[1, 0] + left[1, 2]*right[2, 0] result[2, 0] =",
"2])*operator[2, 2] @jit_device def adjoint(operator, result): result[0, 0] = conj(operator[0, 0]) result[1, 0]",
"= math.tau*time_step_integration*(w1*field_sample[0, 2] + w0*field_sample[1, 2]) if dimension > 2: field_sample[2, 3] =",
"1] + operator[0, 1]*operator[1, 1] result[1, 1] = operator[1, 0]*operator[0, 1] + (2",
"= math.cos(y) # sy = math.sin(y) # cisz = math.cos(z) + 1j*math.sin(z) #",
"temporary = np.empty((2, 2), dtype = np.complex128) # elif device_index == 1: #",
"# matrix_multiply(result, result, temporary) # matrix_multiply(temporary, temporary, result) result[0, 0] += 1 result[1,",
"= cuda.device_array((time_index_max, self.spin_quantum_number.dimension, self.spin_quantum_number.dimension), np.complex128) blocks_per_grid = (time.size + (self.threads_per_block - 1)) //",
"self.get_spin_raw = None try: self.compile_time_evolver(get_field, spin_quantum_number, device, use_rotating_frame, integration_method, exponentiation_method, trotter_cutoff, threads_per_block, max_registers)",
"1]*operator[1, 0] + operator[0, 2]*operator[2, 0] result[1, 0] = operator[1, 0]*operator[0, 0] +",
"0] + left[0, 2]*right[2, 0] result[1, 0] = left[1, 0]*right[0, 0] + left[1,",
"over time for a given time series of a quantum state. This :obj:`callable`",
"1, 0] *= rotating_wave_winding[0] time_evolution_coarse[time_index, 1, 1] *= rotating_wave_winding[0] @jit_host(\"(float64, float64[:], float64[:], float64,",
"complex128[:, :], float64[:, :], float64, float64, complex128[:])\") def append_exponentiation_integration_midpoint(time_evolution_fine, time_evolution_coarse, field_sample, time_step_integration, rotating_wave,",
"self.time = time self.time_evolution = time_evolution self.state = state self.spin_calculator = spin_calculator def",
"result[0, 0] = (Ca/(eq*ez))*(Ca/(eq*ez)) - 1 result[1, 0] = sa*eq*ep/ez result[2, 0] =",
"/= r z /= r c = math.cos(r/2) s = math.sin(r/2) result[0, 0]",
"currently functional! \"\"\" class Results: \"\"\" The results of a an evaluation of",
"= 0 operator[1, 1] = 1 operator[2, 1] = 0 operator[0, 2] =",
"operator set_to(time_evolution_coarse, time_evolution_old) matrix_multiply(time_evolution_fine, time_evolution_old, time_evolution_coarse) if use_rotating_frame: if dimension == 3: @jit_device_template(\"(float64[:],",
"0 if time_index > 0: for z_index in range(state.shape[1]): state[time_index, x_index] += time_evolution[time_index",
"the Lie Product Formula, .. math:: \\\\exp(A + B) = \\\\lim_{c \\\\to \\\\infty}",
"2]*operator[2, 2] result[2, 2] = operator[2, 0]*operator[0, 2] + operator[2, 1]*operator[1, 2] +",
"start at, and the time that the experiment is to finish at. Measured",
"= 1 @jit_device def set_to_zero(operator): operator[0, 0] = 0 operator[1, 0] = 0",
"is commonly associated with the use of a rotating wave approximation, a technique",
"3), dtype = np.complex128) temporary = temporary_group[roc.get_local_id(1), :, :] for power_index in range(hyper_cube_amount):",
"2: field_sample[2, 3] = math.tau*time_step_integration*(w0*field_sample[0, 3] + w1*field_sample[1, 3]) append_exponentiation(field_sample[2, :], time_evolution_fine, time_evolution_coarse)",
"not currently functional! \"\"\" class Results: \"\"\" The results of a an evaluation",
"iq J_q))^{2^\\\\tau}\\\\\\\\ &\\\\approx (\\\\exp(-i(2^{-\\\\tau} x) J_x) \\\\exp(-i(2^{-\\\\tau} y) J_y) \\\\exp(-i(2^{-\\\\tau} z J_z +",
"and links. spin_quantum_number : :obj:`SpinQuantumNumber` The option to select whether the simulator will",
"1] = left[1, 0]*right[0, 1] + left[1, 1]*right[1, 1] + left[1, 2]*right[2, 1]",
"3] = math.tau*time_step_integration*field_sample[0, 3] append_exponentiation(field_sample[0, :], time_evolution_fine, time_evolution_coarse) get_field_integration = get_field_integration_midpoint append_exponentiation_integration =",
"operator[2, 0] = 0 operator[0, 1] = 0 operator[1, 1] = 0 operator[2,",
"__getattr__(self, attr_name): if attr_name == \"spin\": spin = self.spin_calculator(self.state) setattr(self, attr_name, spin) return",
"time_fine + time_step_integration - time_coarse rotating_wave_winding[1] = math.cos(math.tau*rotating_wave*time_sample) + 1j*math.sin(math.tau*rotating_wave*time_sample) time_sample += time_coarse",
"+ left[2, 1]*right[1, 2] + left[2, 2]*right[2, 2] @jit_device def matrix_square_residual(operator, result): result[0,",
"in terms of the number of threads (workitems) they each contain, when running",
"threads_per_block](cuda.to_device(state), spin) spin = spin.copy_to_host() elif device == Device.ROC: spin = roc.device_array((state.shape[0], 3),",
"= set_to self.set_to_one = set_to_one self.set_to_zero = set_to_zero self.matrix_multiply = matrix_multiply self.adjoint =",
"to be returned in result. .. math:: \\\\begin{align*} (LR)_{i,k} = \\\\sum_j (L)_{i,j} (R)_{j,k}",
"s_X)}{\\\\sqrt{2}} & \\\\frac{e^{-i\\\\left(-Z + \\\\frac{Q}{3}\\\\right)}(c_X - c_Y + i s_Xs_Y)}{2} \\\\\\\\ \\\\frac{e^{-i\\\\left(Z +",
":func:`numba.cuda.jit()` compilable. See `Supported CUDA Python features`_ for compilable python features. \"\"\" ROC",
"left[2, 1]*right[1, 2] + left[2, 2]*right[2, 2] @jit_device def matrix_square_residual(operator, result): result[0, 0]",
"0: for time_index in nb.prange(spin.shape[0]): if dimension == 2: spin[time_index, 0] = (state[time_index,",
"= math.ceil(trotter_cutoff/2) # if hyper_cube_amount < 0: # hyper_cube_amount = 0 # precision",
"* **time_coarse** (:obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index)) - The times that `state` was evaluated",
"2), dtype = np.complex128) # elif device_index == 1: # temporary = cuda.local.array((2,",
"1j*field_sample[1])/a else: ep = 1 a = a/precision Ca = math.cos(a/2) Sa =",
"result[2] = conj(left[0]*right[1] - left[1]*right[0]) @jit_device def inner(left, right): return conj(left[0])*right[0] + conj(left[1])*right[1]",
": :obj:`bool` Whether or not to use the rotating frame optimisation. Defaults to",
"+ 1j*field_sample[1])/a else: ep = 1 a = a/precision Ca = math.cos(a/2) Sa",
"spin) return self.spin raise AttributeError(\"{} has no attribute called {}.\".format(self, attr_name)) class Simulator:",
"\"\"\" .. _Achieved Occupancy: https://docs.nvidia.com/gameworks/content/developertools/desktop/analysis/report/cudaexperiments/kernellevel/achievedoccupancy.htm Parameters ---------- get_field : :obj:`callable` A python function",
"J_x - iy J_y - iz J_z - iq J_q))^{2^\\\\tau}\\\\\\\\ &\\\\approx (\\\\exp(-i(2^{-\\\\tau} x)",
"the user defined field function must be :func:`numba.cuda.jit()` compilable. See `Supported CUDA Python",
"+ left[0, 2]*right[2, 2] result[1, 2] = left[1, 0]*right[0, 2] + left[1, 1]*right[1,",
"= time_end_points[0] + time_step_output*time_index time_fine = time_coarse[time_index] # Initialise time evolution operator to",
"self.spin_quantum_number.dimension), np.complex128) self.get_state(state_init, state, time_evolution_coarse) results = Results(time, time_evolution_coarse, state, self.spin_calculator) return results",
"1]/2 field_sample[2, 2] = math.tau*time_step_integration*field_sample[1, 2]/2 if dimension > 2: field_sample[2, 3] =",
"1 & 0 & 0 \\\\\\\\ 0 & -2 & 0 \\\\\\\\ 0",
"A matrix to be filled with the result of the product. adjoint(operator) :",
"spin) spin = spin.copy_to_host() return spin self.get_time_evolution_raw = get_time_evolution self.spin_calculator = spin_calculator def",
": :obj:`float` The time difference between each element of `time_coarse`. In units of",
"**time_end_points** (:obj:`numpy.ndarray` of :obj:`numpy.float64` (start/end)) - The time offset that the experiment is",
"+ left[1, 2]*right[2, 2] result[2, 2] = left[2, 0]*right[0, 2] + left[2, 1]*right[1,",
"spin_calculator def evaluate(self, sweep_parameter, time_start, time_end, time_step_integration, time_step_output, state_init): \"\"\" Integrates the time",
"(2 + operator[1, 1])*operator[1, 0] + operator[1, 2]*operator[2, 0] result[2, 0] = operator[2,",
"multiply by. * **result** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - A matrix to",
"of :math:`\\\\mathfrak{su}(2)`, being, .. math:: \\\\begin{align*} A &= -i(x J_x + y J_y",
"& 0 & 1 \\\\end{pmatrix} \\\\end{align*} Then the exponential can be approximated as,",
"if time_index < spin.shape[0]: if dimension == 2: spin[time_index, 0] = (state[time_index, 0]*conj(state[time_index,",
"evaluated quantum state of the spin system over time, written in terms of",
"the `spin` property is needed. Compiled for chosen device on object constrution. Parameters:",
"& 1 \\\\end{pmatrix} \\\\end{align*} Then the exponential can be approximated as, for large",
"1] + left[0, 1]*right[1, 1] result[1, 1] = left[1, 0]*right[0, 1] + left[1,",
"0] + w1*field_sample[1, 0]) field_sample[2, 1] = math.tau*time_step_integration*(w0*field_sample[0, 1] + w1*field_sample[1, 1]) field_sample[2,",
":class:`numpy.ndarray` of :class:`numpy.complex128` The state (spin wavefunction) of the system at the start",
"sa*eq*ez/ep result[2, 2] = (Ca*ez/eq)*(Ca*ez/eq) - 1 if device_index == 0: temporary =",
"if device_index == 0: temporary = np.empty((2, 2), dtype = np.complex128) elif device_index",
"= math.sin(y) # cisz = math.cos(z + q/3) - 1j*math.sin(z + q/3) #",
"+= time_step_integration if use_rotating_frame: # Take out of rotating frame rotating_wave_winding[0] = math.cos(math.tau*rotating_wave*time_step_output)",
"the system at the start of the simulation. state : :class:`numpy.ndarray` of :class:`numpy.complex128`",
"increase execution time for different GPU models. device : :obj:`Device` The option to",
"super().__init__() self._value_ = value self.index = index ANALYTIC = (\"analytic\", 0) \"\"\" Analytic",
"run on an AMD ROCm compatible GPU, in parallel. .. warning :: Work",
"- The matrix to right multiply by. * **result** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index,",
"left[2, 0]*right[0, 0] + left[2, 1]*right[1, 0] + left[2, 2]*right[2, 0] result[0, 1]",
"== 1: temporary = cuda.local.array((2, 2), dtype = np.complex128) elif device_index == 2:",
"bias field strength in `get_field`, then one can run many simulations, sweeping through",
"matrix to left multiply by. * **right** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) -",
"def set_to_zero(operator): operator[0, 0] = 0 operator[1, 0] = 0 operator[0, 1] =",
"0.5*cisz*(cx + cy + 1j*sx*sy) # if device_index == 0: # temporary =",
"and `Supported Numpy features`_ for compilable numpy features. \"\"\" CUDA = (\"cuda\", 1)",
"& e^{i\\\\frac{2Q}{3}} c_X c_Y & \\\\frac{e^{-i(Z - \\\\frac{Q}{3})} (-i s_X - c_X s_Y)}{\\\\sqrt{2}}",
"rotating_wave, rotating_wave_winding): transform_frame(field_sample[0, :], rotating_wave, rotating_wave_winding[0]) transform_frame(field_sample[1, :], rotating_wave, rotating_wave_winding[1]) w0 = (1.5",
"ANALYTIC = (\"analytic\", 0) \"\"\" Analytic expression of the matrix exponential. For spin",
"jit_device(func): return roc.jit(device = True)(func) self.jit_device = jit_device def jit_device_template(template): def jit_device_template(func): return",
"operator[0, 1] result[1, 1] = operator[1, 1] @jit_device def set_to_one(operator): operator[0, 0] =",
"{time_step_output/round(time_step_output/time_step_integration):8.4e}.\\033[0m\\n\") time_step_integration = time_step_output/round(time_step_output/time_step_integration) time_end_points = np.asarray([time_start, time_end], np.float64) state_init = np.asarray(state_init, np.complex128)",
"is to be written to. * **trotter_cutoff** (:obj:`int`) - The number of squares",
"transform_frame(field_sample[1, :], rotating_wave, rotating_wave_winding[1]) field_sample[2, 0] = math.tau*time_step_integration*field_sample[0, 0]/2 field_sample[2, 1] = math.tau*time_step_integration*field_sample[0,",
"Device.CUDA: time = cuda.device_array(time_index_max, np.float64) time_evolution_coarse = cuda.device_array((time_index_max, self.spin_quantum_number.dimension, self.spin_quantum_number.dimension), np.complex128) blocks_per_grid =",
"= math.cos(math.tau*rotating_wave*time_step_output) + 1j*math.sin(math.tau*rotating_wave*time_step_output) time_evolution_coarse[time_index, 0, 0] /= rotating_wave_winding[0] time_evolution_coarse[time_index, 0, 1] /=",
"(2 + operator[0, 0])*operator[0, 0] + operator[0, 1]*operator[1, 0] result[1, 0] = operator[1,",
"time_evolution_coarse[time_index, 1, 1] *= rotating_wave_winding[0] @jit_host(\"(float64, float64[:], float64[:], float64, float64, complex128[:, :, :])\",",
"= np.empty((3, 3), dtype = np.complex128) elif device_index == 1: temporary = cuda.local.array((3,",
"Measured in s. state_init : :obj:`numpy.ndarray` of :obj:`numpy.complex128` (magnetic_quantum_number) The initial quantum state",
"J_y - iz J_z)\\\\\\\\ &= \\\\exp(2^{-\\\\tau}(-ix J_x - iy J_y - iz J_z))^{2^\\\\tau}\\\\\\\\",
"time_evolution_coarse = time_evolution_coarse.copy_to_host() time = time.copy_to_host() elif self.device == Device.ROC: time = roc.device_array(time_index_max,",
"def jit_host(func): return func return jit_host self.jit_host = jit_host def jit_device(func): return func",
"if math.fabs(time_step_output/time_step_integration - round(time_step_output/time_step_integration)) > 1e-6: print(f\"\\033[33mspinsim warning: time_step_output not an integer multiple",
"can, of course, use :mod:`spinsim` to integrate states in the rotating frame, using",
"device_index == 1: # temporary = cuda.local.array((3, 3), dtype = np.complex128) # elif",
"adjoint = utilities.adjoint matrix_exponential_analytic = utilities.matrix_exponential_analytic matrix_exponential_lie_trotter = utilities.matrix_exponential_lie_trotter jit_host = device.jit_host jit_device",
"not jit get_field function into a cuda device function.\\033[0m\\n\") raise time_evolution_coarse = time_evolution_coarse.copy_to_host()",
"field_sample[2, 1] = math.tau*time_step_integration*(w1*field_sample[0, 1] + w0*field_sample[1, 1]) field_sample[2, 2] = math.tau*time_step_integration*(w1*field_sample[0, 2]",
"time for different GPU models. max_registers : :obj:`int` The maximum number of registers",
"is needed. Compiled for chosen device on object constrution. Parameters: * **state** (:obj:`numpy.ndarray`",
"trotter_cutoff:int = 28, threads_per_block = 64, max_registers = 63): \"\"\" Compiles the integrator",
"\\\\exp(A + B) = \\\\lim_{c \\\\to \\\\infty} \\\\left(\\\\exp\\\\left(\\\\frac{1}{c}A\\\\right) \\\\exp\\\\left(\\\\frac{1}{c}B\\\\right)\\\\right)^c. **For spin half systems:**",
"blocks_per_grid = (state.shape[0] + (threads_per_block - 1)) // threads_per_block get_spin[blocks_per_grid, threads_per_block](roc.to_device(state), spin) spin",
"return def spin_calculator(state): \"\"\" Calculates the expected spin projection (Bloch vector) over time",
"Occupancy`_ for Nvidia's official explanation. \"\"\" if not device: if cuda.is_available(): device =",
"field in the z direction. This removes the (possibly large) z component of",
"with this spin belong to. label : :obj:`str` A text label that can",
"(state.shape[0] + (threads_per_block - 1)) // threads_per_block get_spin[blocks_per_grid, threads_per_block](roc.to_device(state), spin) spin = spin.copy_to_host()",
"< 0: hyper_cube_amount = 0 precision = 4**hyper_cube_amount a = math.sqrt(field_sample[0]*field_sample[0] + field_sample[1]*field_sample[1])",
"1] result[1, 1] = left[1, 0]*right[0, 1] + left[1, 1]*right[1, 1] @jit_device def",
"sweep_parameter) elif device_index == 2: # Run calculation for each coarse timestep in",
"**z** (:class:`numpy.complex128`) - The complex number to take the absolute value of. Returns",
"state. Used to calculate `spin` the first time it is referenced by the",
"= conj(operator[1, 0]) result[1, 1] = conj(operator[1, 1]) result[2, 1] = conj(operator[1, 2])",
"over time for a given time series of a quantum state. Parameters ----------",
":class:`numpy.ndarray` using :func:`numba.cuda.device_array_like()`. \"\"\" if device_index == 0: for time_index in nb.prange(spin.shape[0]): if",
"time evolution operators in succession to find the quantum state timeseries of the",
"def __init__(self, time, time_evolution, state, spin_calculator): \"\"\" Parameters ---------- time : :obj:`numpy.ndarray` of",
"spatial_index) The expected value for hyperfine spin of the spin system in the",
"integration method. \"\"\" HALF_STEP = \"half_step\" \"\"\" Integration method from AtomicPy. Makes two",
"== 2: # temporary_group = roc.shared.array((threads_per_block, 2, 2), dtype = np.complex128) # temporary",
"@jit_device_template(\"(float64[:], complex128[:, :], complex128[:, :])\") def append_exponentiation(field_sample, time_evolution_fine, time_evolution_coarse): if device_index == 0:",
"result[0, 2] = conj(operator[2, 0]) result[1, 2] = conj(operator[2, 1]) result[2, 2] =",
"3 level atom. Parameters ---------- state_init : :class:`numpy.ndarray` of :class:`numpy.complex128` The state (spin",
"+ (threads_per_block - 1)) // threads_per_block get_spin[blocks_per_grid, threads_per_block](cuda.to_device(state), spin) spin = spin.copy_to_host() elif",
"integrator. These device functions are compiled for the chosen target device on construction",
"+ z**2) if r > 0: x /= r y /= r z",
"target device that the integrator is being compiled for. .. _Supported Python features:",
"**operator** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The matrix to set to :math:`0`.",
"option, the user defined field function must be :func:`numba.jit()` compilable. See `Supported Python",
"time. \"\"\" if device.index == 0: spin = np.empty((state.shape[0], 3), np.float64) get_spin(state, spin)",
"0 & 1 \\\\end{pmatrix} \\\\end{align*} Then the exponential can be approximated as, for",
"Copy the contents of one matrix into another. .. math:: (A)_{i, j} =",
"previous state for x_index in nb.prange(state.shape[1]): state[time_index, x_index] = 0 if time_index >",
"j \\\\end{cases} \\\\end{align*} Parameters: * **operator** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The",
"result[1, 0] = 0 result[0, 1] = 0 result[1, 1] = 1 @jit_device",
"which the result of the exponentiation is to be written to. * **trotter_cutoff**",
"1: time_index = cuda.grid(1) elif device_index == 1: time_index = roc.get_global_id(1) if time_index",
"field_sample[1, :]) @jit_device_template(\"(complex128[:, :], complex128[:, :], float64[:, :], float64, float64, complex128[:])\") def append_exponentiation_integration_magnus_cf4(time_evolution_fine,",
"0 # precision = 4**hyper_cube_amount # x = field_sample[0]/precision # y = field_sample[1]/precision",
":class:`numpy.float64` (start time (0) or end time (1)) The time values for when",
"_Supported Numpy features: http://numba.pydata.org/numba-doc/latest/reference/numpysupported.html .. _Supported CUDA Python features: http://numba.pydata.org/numba-doc/latest/cuda/cudapysupported.html \"\"\" def __init__(self,",
"jit_device_template elif value == \"cpu_single\": def jit_host(template, max_registers): def jit_host(func): return nb.njit(template)(func) return",
"finish at. Measured in s. The duration of the experiment is `time_end -",
"= operator[1, 0] result[2, 0] = operator[2, 0] result[0, 1] = operator[0, 1]",
"@jit_device def adjoint(operator, result): result[0, 0] = conj(operator[0, 0]) result[1, 0] = conj(operator[0,",
"in time if the `spin` property is needed. Compiled for chosen device on",
"device_index == 2: # temporary_group = roc.shared.array((threads_per_block, 3, 3), dtype = np.complex128) #",
"& -1 \\\\end{pmatrix} \\\\end{align*} Then the exponential can be calculated as .. math::",
"by an amount defined by the field in the z direction. This removes",
"expected spin projection (Bloch vector) over time. \"\"\" if device.index == 0: spin",
"== 2: # Run calculation for each coarse timestep in parallel time_index =",
"multiple of `time_step_integration`. Measured in s. state_init : :obj:`numpy.ndarray` of :obj:`numpy.complex128` (magnetic_quantum_number) The",
"= cisz*(-1j*sx - cx*sy)/sqrt2 # result[2, 2] = 0.5*cisz*(cx + cy + 1j*sx*sy)",
"the GPU target devices :obj:`Device.CUDA` (:obj:`Device.ROC`). Defaults to 64. Modifying might be able",
"left[1, 2]*right[2, 0] result[2, 0] = left[2, 0]*right[0, 0] + left[2, 1]*right[1, 0]",
"be an optimal value of `max_registers` for each model of GPU running :mod:`spinsim`,",
"math.sqrt(field_sample[0]*field_sample[0] + field_sample[1]*field_sample[1]) if a > 0: ep = (field_sample[0] + 1j*field_sample[1])/a else:",
"using :func:`numba.cuda.device_array_like()`. time_end_points : :class:`numpy.ndarray` of :class:`numpy.float64` (start time (0) or end time",
"= device.jit_host jit_device = device.jit_device jit_device_template = device.jit_device_template device_index = device.index dimension =",
"The expected spin projection (Bloch vector) over time. \"\"\" if device.index == 0:",
"cannot be interpreted. \"\"\" def __init__(self, value, index): super().__init__() self._value_ = value self.index",
"eigenstates of the spin projection operator in the z direction. Returns: * **spin**",
"operator[1, 0]*operator[0, 0] + (2 + operator[1, 1])*operator[1, 0] + operator[1, 2]*operator[2, 0]",
"result): result[0] = conj(left[1]*right[2] - left[2]*right[1]) result[1] = conj(left[2]*right[0] - left[0]*right[2]) result[2] =",
"0: spin = np.empty((state.shape[0], 3), np.float64) get_spin(state, spin) elif device == Device.CUDA: spin",
"roc.shared.array((threads_per_block, 2, 2), dtype = np.complex128) # temporary = temporary_group[roc.get_local_id(1), :, :] #",
"GTX1070, the device used for testing. Note that one extra register per thread",
"= roc.shared.array((threads_per_block, 3, 3), dtype = np.complex128) # temporary = temporary_group[roc.get_local_id(1), :, :]",
"math:: \\\\begin{align*} J_x &= \\\\frac{1}{\\\\sqrt{2}}\\\\begin{pmatrix} 0 & 1 & 0 \\\\\\\\ 1 &",
"= math.sqrt(field_sample[0]*field_sample[0] + field_sample[1]*field_sample[1]) if a > 0: ep = (field_sample[0] + 1j*field_sample[1])/a",
"Device.CUDA else: device = Device.CPU self.threads_per_block = threads_per_block self.spin_quantum_number = spin_quantum_number self.device =",
"AMD ROCm compatible GPU, in parallel. .. warning :: Work in progress, not",
"left[1, 1]*right[1, 1] @jit_device def matrix_square_residual(operator, result): result[0, 0] = (2 + operator[0,",
"threads run concurrently, at the expense of fewer resgiters being avaliable to each",
"= (2 + operator[0, 0])*operator[0, 1] + operator[0, 1]*operator[1, 1] result[1, 1] =",
"sqrt3 = math.sqrt(3) class SpinQuantumNumber(Enum): \"\"\" Options for the spin quantum number of",
"= temporary_group[roc.get_local_id(1), :, :] # for power_index in range(hyper_cube_amount): # matrix_multiply(result, result, temporary)",
":obj:`IntegrationMethod` Which integration method to use in the integration. Defaults to :obj:`IntegrationMethod.MAGNUS_CF4`. See",
"& -1 \\\\end{pmatrix} \\\\end{align*} Then the exponential can be approximated as, for large",
"64, max_registers = 63): \"\"\" .. _Achieved Occupancy: https://docs.nvidia.com/gameworks/content/developertools/desktop/analysis/report/cudaexperiments/kernellevel/achievedoccupancy.htm Parameters ---------- get_field :",
"= (1j*state[time_index, 0]*conj(state[time_index, 1])).real spin[time_index, 2] = 0.5*(state[time_index, 0].real**2 + state[time_index, 0].imag**2 -",
"`Supported Numpy features`_ for compilable numpy features. \"\"\" CUDA = (\"cuda\", 1) \"\"\"",
"\\\\end{align*} with .. math:: \\\\begin{align*} J_x &= \\\\frac{1}{2}\\\\begin{pmatrix} 0 & 1 \\\\\\\\ 1",
"+= time_coarse get_field_jit(time_sample, sweep_parameter, field_sample[1, :]) @jit_device_template(\"(complex128[:, :], complex128[:, :], float64[:, :], float64,",
"not to use the rotating frame optimisation. Defaults to :obj:`True`. If set to",
"matrix_multiply(temporary, temporary, result) else: @jit_device def norm2(z): return math.sqrt(z[0].real**2 + z[0].imag**2 + z[1].real**2",
"def evaluate(self, sweep_parameter, time_start, time_end, time_step_integration, time_step_output, state_init): \"\"\" Integrates the time dependent",
"return roc.jit(template, device = True)(func) return jit_device_template self.jit_device_template = jit_device_template PYTHON = (\"python\",",
":class:`numpy.complex128`, (index)) - The vector to left multiply in the inner product. *",
"**field_sample** (:class:`numpy.ndarray` of :class:`numpy.float64`, (y_index, x_index)) - The values of x, y and",
"# matrix_multiply(temporary, temporary, result) result[0, 0] += 1 result[1, 1] += 1 #",
"For spin half :obj:`SpinQuantumNumber.HALF` systems only. \"\"\" LIE_TROTTER = (\"lie_trotter\", 1) \"\"\" Approximation",
"time_step_integration, rotating_wave, rotating_wave_winding): transform_frame(field_sample[0, :], rotating_wave, rotating_wave_winding[0]) transform_frame(field_sample[1, :], rotating_wave, rotating_wave_winding[1]) field_sample[2, 0]",
"time evolution operator to 1 set_to_one(time_evolution_coarse[time_index, :]) field_sample[0, 2] = 0 if use_rotating_frame:",
"(\"roc\", 2) \"\"\" Use the :func:`numba.roc.jit()` LLVM compiler to compile the integrator to",
"times that `state` was evaluated at. * **time_end_points** (:obj:`numpy.ndarray` of :obj:`numpy.float64` (start/end)) -",
"s_Xs_Y)}{2} \\\\\\\\ \\\\frac{e^{-i\\\\left(Z + \\\\frac{Q}{3}\\\\right)} (-i s_X + c_X s_Y)}{\\\\sqrt{2}} & e^{i\\\\frac{2Q}{3}} c_X",
"(2 + operator[0, 0])*operator[0, 1] + operator[0, 1]*operator[1, 1] result[1, 1] = operator[1,",
"the start of the time step, one sampling the field from the end",
"def transform_frame_spin_half_rotating(field_sample, rotating_wave, rotating_wave_winding): X = (field_sample[0] + 1j*field_sample[1])/(rotating_wave_winding**2) field_sample[0] = X.real field_sample[1]",
"# Premultiply to the exitsing time evolution operator set_to(time_evolution_coarse, time_evolution_old) matrix_multiply(time_evolution_fine, time_evolution_old, time_evolution_coarse)",
"direction. Returns ------- spin : :obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index, spatial_direction) The expected spin",
"in succession to find the quantum state timeseries of the 3 level atom.",
"used within the integration algorithm. In units of s. time_step_output : :obj:`float` The",
"1e-6: print(f\"\\033[33mspinsim warning: time_step_output not an integer multiple of time_step_integration. Resetting time_step_integration to",
"the trapezoidal method. \"\"\" class ExponentiationMethod(Enum): \"\"\" The implementation to use for matrix",
"must be compilable for the device that the integrator is being compiled for.",
"&= \\\\exp(2^{-\\\\tau}(-ix J_x - iy J_y - iz J_z - iq J_q))^{2^\\\\tau}\\\\\\\\ &\\\\approx",
"used for archiving. \"\"\" MAGNUS_CF4 = \"magnus_cf4\" \"\"\" Commutator free, fourth order Magnus",
"is being run on is Nvidia Cuda compatible, and defaults to :obj:`Device.CPU` otherwise.",
": :obj:`IntegrationMethod` Which integration method to use in the integration. Defaults to :obj:`IntegrationMethod.MAGNUS_CF4`.",
"y, z spatial directions (to model a magnetic field, for example), and the",
"0] = operator[0, 0] result[1, 0] = operator[1, 0] result[0, 1] = operator[0,",
"- The matrix to copy to. set_to_one(operator) : :obj:`callable` Make a matrix the",
"Then the exponential can be calculated as .. math:: \\\\begin{align*} \\\\exp(A) &= \\\\exp(-ix",
"`spinsim` was designed for. * **field_sample** (:class:`numpy.ndarray` of :class:`numpy.float64` (spatial_index)) the returned value",
"2: spin[time_index, 0] = (state[time_index, 0]*conj(state[time_index, 1])).real spin[time_index, 1] = (1j*state[time_index, 0]*conj(state[time_index, 1])).real",
"to right multiply in the inner product. Returns * **d** (:class:`numpy.complex128`) - The",
"get_field_jit(time_sample, sweep_parameter, field_sample[0, :]) rotating_wave = field_sample[0, 2] if dimension == 2: rotating_wave",
"spin) spin = spin.copy_to_host() elif device == Device.ROC: spin = roc.device_array((state.shape[0], 3), np.float64)",
"That is, whether the integrator is compiled for a CPU or GPU. Defaults",
"field_sample[0, 2] = math.tau*time_step_integration*field_sample[0, 2] if dimension > 2: field_sample[0, 3] = math.tau*time_step_integration*field_sample[0,",
"get_field_integration_magnus_cf4(sweep_parameter, time_fine, time_coarse, time_step_integration, field_sample, rotating_wave, rotating_wave_winding): time_sample = ((time_fine + 0.5*time_step_integration*(1 -",
"cx*sy)/sqrt2 # result[2, 2] = 0.5*cisz*(cx + cy + 1j*sx*sy) # if device_index",
"= math.tau*time_step_integration*field_sample[0, 3]/2 append_exponentiation(field_sample[2, :], time_evolution_fine, time_evolution_coarse) field_sample[2, 0] = math.tau*time_step_integration*field_sample[1, 0]/2 field_sample[2,",
"Parameters: * **z** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (index)) - The vector to take the",
"1] = (cx*cy + 1j*sx*sy)*cisz # if device_index == 0: # temporary =",
"approximated as, for large :math:`\\\\tau`, .. math:: \\\\begin{align*} \\\\exp(A) &= \\\\exp(-ix J_x -",
":math:`T` is calculated, it is then recursively squared :math:`\\\\tau` times to obtain :math:`\\\\exp(A)`.",
"if device_index == 0: # temporary = np.empty((3, 3), dtype = np.complex128) #",
"adjoint(operator) : :obj:`callable` Takes the hermitian adjoint of a matrix. .. math:: \\\\begin{align*}",
"- The number of squares to make to the approximate matrix (:math:`\\\\tau` above).",
"has Cuda cores, meaning some cores are inactive, and the GPU is said",
"append_exponentiation_integration_midpoint(time_evolution_fine, time_evolution_coarse, field_sample, time_step_integration, rotating_wave, rotating_wave_winding): transform_frame(field_sample[0, :], rotating_wave, rotating_wave_winding[0]) field_sample[0, 0] =",
"a quantum state. This :obj:`callable` is passed to the :obj:`Results` object returned from",
"= time self.time_evolution = time_evolution self.state = state self.spin_calculator = spin_calculator def __getattr__(self,",
"a matrix. .. math:: \\\\begin{align*} A^\\\\dagger &\\\\equiv A^H\\\\\\\\ (A^\\\\dagger)_{y,x} &= ((A)_{x,y})^* \\\\end{align*} Matrix",
":class:`numpy.ndarray` with :func:`numpy.empty()`, or declare a :class:`numpy.ndarray` using :func:`numba.cuda.device_array_like()`. \"\"\" if device_index ==",
"field_sample[2, 0] = math.tau*time_step_integration*(w1*field_sample[0, 0] + w0*field_sample[1, 0]) field_sample[2, 1] = math.tau*time_step_integration*(w1*field_sample[0, 1]",
"\\\\end{align*} with .. math:: \\\\begin{align*} J_x &= \\\\frac{1}{\\\\sqrt{2}}\\\\begin{pmatrix} 0 & 1 & 0",
"\"\"\" class Results: \"\"\" The results of a an evaluation of the integrator.",
"of a maximum number for the whole GPU, for each specific GPU model.",
"for the device that the integrator is being compiled for. See :class:`Device` for",
"result[0, 2] = (2 + operator[0, 0])*operator[0, 2] + operator[0, 1]*operator[1, 2] +",
"- The matrix to set to :math:`1`. set_to_zero(operator) : :obj:`callable` Make a matrix",
"# precision = 4**hyper_cube_amount # x = field_sample[0]/(2*precision) # y = field_sample[1]/(2*precision) #",
"\"half\") \"\"\" For two level systems. \"\"\" ONE = (1, 3, \"one\") \"\"\"",
"be able to increase execution time for different GPU models. device : :obj:`Device`",
"@jit_device def matrix_exponential_analytic(field_sample, result, trotter_cutoff): pass @jit_device def matrix_exponential_lie_trotter(field_sample, result, trotter_cutoff): hyper_cube_amount =",
"state_init): \"\"\" Integrates the time dependent Schroedinger equation and returns the quantum state",
"\"cpu_single\": def jit_host(template, max_registers): def jit_host(func): return nb.njit(template)(func) return jit_host self.jit_host = jit_host",
"return nb.njit(template)(func) return jit_device_template self.jit_device_template = jit_device_template elif value == \"cpu\": def jit_host(template,",
"to each thread, out of a maximum number for the whole GPU, for",
"timeseries for the state. Must be a whole number multiple of `time_step_integration`. Measured",
"a :class:`numpy.ndarray` using :func:`numba.cuda.device_array_like()`. time_end_points : :class:`numpy.ndarray` of :class:`numpy.float64` (start time (0) or",
"approximate analytic solutions of spin system dynamics. This is not done when this",
"device = None, exponentiation_method = None, use_rotating_frame = True, integration_method = IntegrationMethod.MAGNUS_CF4, trotter_cutoff",
"= append_exponentiation_integration_half_step elif integration_method == IntegrationMethod.MIDPOINT_SAMPLE: @jit_device_template(\"(float64, float64, float64, float64, float64[:, :], float64,",
"features`_ for compilable numpy features. \"\"\" CUDA = (\"cuda\", 1) \"\"\" Use the",
":func:`numba.cuda.jit()` LLVM compiler to compile the integrator to run on an Nvidia cuda",
"memory must be used. Thus, there will be an optimal value of `max_registers`",
"features`_ for compilable numpy features. \"\"\" CPU = (\"cpu\", 0) \"\"\" Use the",
"is an output, so use an empty :class:`numpy.ndarray` with :func:`numpy.empty()`, or declare a",
"(2 + operator[0, 0])*operator[0, 1] + operator[0, 1]*operator[1, 1] + operator[0, 2]*operator[2, 1]",
"& 0 & 0 \\\\\\\\ 0 & 0 & -1 \\\\end{pmatrix},& J_q &=",
"sample_index_max = 1 sample_index_end = 1 exponentiation_method_index = exponentiation_method.index if (exponentiation_method == ExponentiationMethod.ANALYTIC)",
"system in the lab frame, for each time sampled. See :math:`\\\\psi(t)` in :ref:`overview_of_simulation_method`.",
"is_Xc_Y) e^{-iZ} & (c_Xc_Y + is_Xs_Y) e^{iZ} \\\\end{pmatrix}^{2^\\\\tau}\\\\\\\\ &= T^{2^\\\\tau}, \\\\end{align*} with ..",
"- 1j*cy*sx)/sqrt2 # result[1, 1] = cisz*cx*cy # result[2, 1] = cisz*(sy -",
".. math:: \\\\begin{align*} (A)_{i, j} = 0 \\\\end{align*} Parameters: * **operator** (:class:`numpy.ndarray` of",
"calling this method multiple times, each time varying `sweep_parameter`. * **time_coarse** (:obj:`numpy.ndarray` of",
"result[1, 1] = operator[1, 1] @jit_device def set_to_one(operator): operator[0, 0] = 1 operator[1,",
"The maximum number of registers allocated per thread when using :obj:`Device.CUDA` as the",
"the integrator is being compiled for. .. _Supported Python features: http://numba.pydata.org/numba-doc/latest/reference/pysupported.html .. _Supported",
"explanation. \"\"\" utilities = Utilities(spin_quantum_number, device, threads_per_block) conj = utilities.conj complex_abs = utilities.complex_abs",
"= math.cos(eq) + 1j*math.sin(eq) # Ca = 1 # Sa = a/2 #",
"the inner product. Returns * **d** (:class:`numpy.complex128`) - The inner product of l",
"arguments: * **time_sample** (:obj:`float`) - the time to sample the field at, in",
"63): \"\"\" .. _Achieved Occupancy: https://docs.nvidia.com/gameworks/content/developertools/desktop/analysis/report/cudaexperiments/kernellevel/achievedoccupancy.htm Parameters ---------- get_field : :obj:`callable` A python",
"0] + left[0, 1]*right[1, 0] + left[0, 2]*right[2, 0] result[1, 0] = left[1,",
": :obj:`int` Dimension of the hilbert space the states with this spin belong",
"1] = -(cx*sy + 1j*sx*cy)*cisz # result[1, 1] = (cx*cy + 1j*sx*sy)*cisz #",
"b^2}\\\\\\\\ a, b &\\\\in \\\\mathbb{R} \\\\end{align*} Parameters: * **z** (:class:`numpy.complex128`) - The complex",
"is_Xs_Y) e^{iZ} \\\\end{pmatrix}^{2^\\\\tau}\\\\\\\\ &= T^{2^\\\\tau}, \\\\end{align*} with .. math:: \\\\begin{align*} X &= \\\\frac{1}{2}2^{-\\\\tau}x,\\\\\\\\",
"chosen device on object constrution. Parameters: * **state** (:obj:`numpy.ndarray` of :obj:`numpy.complex128` (time_index, magnetic_quantum_number))",
"each time step. See :ref:`architecture` for some information. spin_calculator : :obj:`callable` Calculates the",
"for your GPU. See `Achieved Occupancy`_ for Nvidia's official explanation. \"\"\" utilities =",
"the stepwise time evolution operators in succession to find the quantum state timeseries",
"= 0.5*cisz*(cx + cy - 1j*sx*sy) # result[1, 0] = cisz*(-1j*sx + cx*sy)/sqrt2",
"increase the execution speed for a specific GPU model. Defaults to 63 (optimal",
"threads, and changing this value could increase performance for your GPU. See `Achieved",
"Parameters ---------- state : :class:`numpy.ndarray` of :class:`numpy.complex128` (time_index, state_index) The state (wavefunction) of",
"0 operator[1, 0] = 0 operator[0, 1] = 0 operator[1, 1] = 0",
"sqrt2 = math.sqrt(2) sqrt3 = math.sqrt(3) class SpinQuantumNumber(Enum): \"\"\" Options for the spin",
"append_exponentiation(field_sample[2, :], time_evolution_fine, time_evolution_coarse) get_field_integration = get_field_integration_half_step append_exponentiation_integration = append_exponentiation_integration_half_step elif integration_method ==",
"in result. .. math:: \\\\begin{align*} (LR)_{i,k} = \\\\sum_j (L)_{i,j} (R)_{j,k} \\\\end{align*} Parameters: *",
"operator[2, 0]*operator[0, 1] + operator[2, 1]*operator[1, 1] + (2 + operator[2, 2])*operator[2, 1]",
"self.threads_per_block](sweep_parameter, time, time_end_points, time_step_integration, time_step_output, time_evolution_coarse) except: print(\"\\033[31mspinsim error: numba.cuda could not jit",
"Cuda cores, meaning some cores are inactive, and the GPU is said to",
"(start time (0) or end time (1)) The time values for when the",
".. _Supported Python features: http://numba.pydata.org/numba-doc/latest/reference/pysupported.html .. _Supported Numpy features: http://numba.pydata.org/numba-doc/latest/reference/numpysupported.html .. _Supported CUDA",
"exponentiation_method.index if (exponentiation_method == ExponentiationMethod.ANALYTIC) and (spin_quantum_number != SpinQuantumNumber.HALF): print(\"\\033[31mspinsim warning!!!\\n_attempting to use",
"Measured in s. * **time_evolution_coarse** (:obj:`numpy.ndarray` of :obj:`numpy.float128` (time_index, y_index, x_index)) - The",
"evolution operators in succession to find the quantum state timeseries of the 3",
"time_end_points, time_evolution_coarse, sweep_parameter) elif device_index == 2: # Run calculation for each coarse",
"+ left[0, 2]*right[2, 0] result[1, 0] = left[1, 0]*right[0, 0] + left[1, 1]*right[1,",
"0 operator[1, 0] = 0 operator[2, 0] = 0 operator[0, 1] = 0",
"\\\\frac{1}{2}2^{-\\\\tau}x,\\\\\\\\ Y &= \\\\frac{1}{2}2^{-\\\\tau}y,\\\\\\\\ Z &= \\\\frac{1}{2}2^{-\\\\tau}z,\\\\\\\\ c_{\\\\theta} &= \\\\cos(\\\\theta),\\\\\\\\ s_{\\\\theta} &= \\\\sin(\\\\theta).",
"# q = field_sample[3]/precision # cx = math.cos(x) # sx = math.sin(x) #",
"equation and returns the quantum state of the spin system over time. Parameters",
"to increase the execution speed for a specific GPU model. Defaults to 63",
"device function.\\033[0m\\n\") raise time_evolution_coarse = time_evolution_coarse.copy_to_host() time = time.copy_to_host() elif self.device == Device.ROC:",
"product of l and r. set_to(operator, result) : :obj:`callable` Copy the contents of",
"this option is set to :obj:`True` - no such approximations are made, and",
"set_to_one = utilities.set_to_one set_to_zero = utilities.set_to_zero matrix_multiply = utilities.matrix_multiply adjoint = utilities.adjoint matrix_exponential_analytic",
"jit_device_template(func): return cuda.jit(template, device = True, inline = True)(func) return jit_device_template self.jit_device_template =",
"have less occupancy. Lowering the value increases GPU occupancy, meaning more threads run",
"= left[2, 0]*right[0, 1] + left[2, 1]*right[1, 1] + left[2, 2]*right[2, 1] result[0,",
"Find the stepwise time evolution opperator. Parameters ---------- sweep_parameter : :obj:`float` time_coarse :",
"math.tau*time_step_integration*(w0*field_sample[0, 2] + w1*field_sample[1, 2]) if dimension > 2: field_sample[2, 3] = math.tau*time_step_integration*(w0*field_sample[0,",
"written in terms of the eigenstates of the spin projection operator in the",
"of the spin system in the lab frame, for each time sampled. See",
"might be able to increase execution time for different GPU models. device :",
"= np.complex128) # elif device_index == 1: # temporary = cuda.local.array((2, 2), dtype",
"to finish at. Measured in s. The duration of the experiment is `time_end",
"jit_host self.jit_host = jit_host def jit_device(func): return func self.jit_device = jit_device def jit_device_template(template):",
"result, trotter_cutoff): hyper_cube_amount = math.ceil(trotter_cutoff/2) if hyper_cube_amount < 0: hyper_cube_amount = 0 precision",
"cuda.device_array((time_index_max, self.spin_quantum_number.dimension, self.spin_quantum_number.dimension), np.complex128) blocks_per_grid = (time.size + (self.threads_per_block - 1)) // self.threads_per_block",
"might be able to increase execution time for different GPU models. \"\"\" jit_device",
"0 \\\\\\\\ 0 & -2 & 0 \\\\\\\\ 0 & 0 & 1",
"= j\\\\\\\\ 0,&i\\\\neq j \\\\end{cases} \\\\end{align*} Parameters: * **operator** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index,",
"\\\\begin{align*} A^\\\\dagger &\\\\equiv A^H\\\\\\\\ (A^\\\\dagger)_{y,x} &= ((A)_{x,y})^* \\\\end{align*} Matrix can be in :math:`\\\\mathbb{C}^{2\\\\times2}`",
"(2 + operator[2, 2])*operator[2, 1] result[0, 2] = (2 + operator[0, 0])*operator[0, 2]",
"(:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The operator to take the adjoint of.",
"= np.complex128) time_evolution_old = time_evolution_old_group[roc.get_local_id(1), :, :] # Calculate the exponential if exponentiation_method_index",
"+ (2 + operator[2, 2])*operator[2, 2] @jit_device def adjoint(operator, result): result[0, 0] =",
"w0*field_sample[1, 1]) field_sample[2, 2] = math.tau*time_step_integration*(w1*field_sample[0, 2] + w0*field_sample[1, 2]) if dimension >",
"# if hyper_cube_amount < 0: # hyper_cube_amount = 0 # precision = 4**hyper_cube_amount",
"2] *= rotating_wave_winding[0] else: time_evolution_coarse[time_index, 1, 0] *= rotating_wave_winding[0] time_evolution_coarse[time_index, 1, 1] *=",
"c_Y - i s_Xs_Y)}{2} & \\\\frac{e^{i\\\\frac{2Q}{3}} (s_Y -i c_Y s_X)}{\\\\sqrt{2}} & \\\\frac{e^{-i\\\\left(-Z +",
"to right multiply by. * **result** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - A",
"matrix exponential based on the Lie Product Formula, .. math:: \\\\exp(A + B)",
"math.cos(ez) + 1j*math.sin(ez) eq = field_sample[3]/(6*precision) eq = math.cos(eq) + 1j*math.sin(eq) # Ca",
"time self.time_evolution = time_evolution self.state = state self.spin_calculator = spin_calculator def __getattr__(self, attr_name):",
":obj:`float` The numerical value of the spin quantum number. dimension : :obj:`int` Dimension",
"evolution operator (matrix) between the current and next timesteps, for each time sampled.",
"sweep_parameter, field_sample[0, :]) time_sample = ((time_fine + 0.5*time_step_integration*(1 + 1/sqrt3)) - time_coarse) rotating_wave_winding[1]",
"time_evolution_coarse.copy_to_host() time = time.copy_to_host() elif self.device == Device.ROC: time = roc.device_array(time_index_max, np.float64) time_evolution_coarse",
"c_{\\\\theta} &= \\\\cos(\\\\theta),\\\\\\\\ s_{\\\\theta} &= \\\\sin(\\\\theta). \\\\end{align*} Once :math:`T` is calculated, it is",
"ExponentiationMethod.ANALYTIC if integration_method == IntegrationMethod.MAGNUS_CF4: sample_index_max = 3 sample_index_end = 4 elif integration_method",
"= 0 operator[2, 0] = 0 operator[0, 1] = 0 operator[1, 1] =",
"== \"cpu_single\": def jit_host(template, max_registers): def jit_host(func): return nb.njit(template)(func) return jit_host self.jit_host =",
"\"python\": def jit_host(template, max_registers): def jit_host(func): return func return jit_host self.jit_host = jit_host",
"if integration_method == IntegrationMethod.MAGNUS_CF4: sample_index_max = 3 sample_index_end = 4 elif integration_method ==",
"Euler integration steps, one sampling the field from the start of the time",
"+ w1*field_sample[1, 0]) field_sample[2, 1] = math.tau*time_step_integration*(w0*field_sample[0, 1] + w1*field_sample[1, 1]) field_sample[2, 2]",
"for Nvidia's official explanation. \"\"\" utilities = Utilities(spin_quantum_number, device, threads_per_block) conj = utilities.conj",
"time_index < spin.shape[0]: if dimension == 2: spin[time_index, 0] = (state[time_index, 0]*conj(state[time_index, 1])).real",
"1] + operator[2, 1]*operator[1, 1] + (2 + operator[2, 2])*operator[2, 1] result[0, 2]",
"when compiling the integrator, where higher level objects like enums cannot be interpreted.",
"+ (2 + operator[2, 2])*operator[2, 0] result[0, 1] = (2 + operator[0, 0])*operator[0,",
"IntegrationMethod.MAGNUS_CF4, exponentiation_method = None, trotter_cutoff:int = 28, threads_per_block = 64, max_registers = 63):",
"sy = math.sin(y) # cisz = math.cos(z + q/3) - 1j*math.sin(z + q/3)",
"= utilities.set_to_one set_to_zero = utilities.set_to_zero matrix_multiply = utilities.matrix_multiply adjoint = utilities.adjoint matrix_exponential_analytic =",
"of the spin projection operator in the z direction. spin_calculator : :obj:`callable` Calculates",
"occupancy, meaning more threads run concurrently, at the expense of fewer resgiters being",
"user defined field function must be :func:`numba.cuda.jit()` compilable. See `Supported CUDA Python features`_",
"for time_index in nb.prange(time_coarse.size): get_time_evolution_loop(time_index, time_coarse, time_step_output, time_step_integration, time_end_points, time_evolution_coarse, sweep_parameter) elif device_index",
"if device.index == 0: spin = np.empty((state.shape[0], 3), np.float64) get_spin(state, spin) elif device",
"== Device.ROC: time = roc.device_array(time_index_max, np.float64) time_evolution_coarse = roc.device_array((time_index_max, self.spin_quantum_number.dimension, self.spin_quantum_number.dimension), np.complex128) blocks_per_grid",
"<filename>spinsim/__init__.py \"\"\" \"\"\" # from . import utilities from enum import Enum import",
"time evolution operator in parallel. Compiled for chosen device on object constrution. Parameters:",
"1: time_evolution_fine = cuda.local.array((dimension, dimension), dtype = np.complex128) field_sample = cuda.local.array((sample_index_max, lie_dimension), dtype",
"value of a complex number. .. math:: \\\\begin{align*} |a + ib| &= \\\\sqrt{a^2",
"rotating_wave, rotating_wave_winding): time_sample = time_fine - time_coarse rotating_wave_winding[0] = math.cos(math.tau*rotating_wave*time_sample) + 1j*math.sin(math.tau*rotating_wave*time_sample) time_sample",
"matrices left and right together, to be returned in result. .. math:: \\\\begin{align*}",
"- The complex number to take the absolute value of. Returns * **az**",
"device : :obj:`Device` The option to select which device will be targeted for",
"the spin system in the lab frame, for each time sampled. time_evolution :",
"\\\\end{align*} Parameters: * **z** (:class:`numpy.complex128`) - The complex number to take the conjugate",
"return spin self.get_time_evolution_raw = get_time_evolution self.spin_calculator = spin_calculator def evaluate(self, sweep_parameter, time_start, time_end,",
"+ z[1].imag**2) @jit_device def inner(left, right): return conj(left[0])*right[0] + conj(left[1])*right[1] @jit_device def set_to(operator,",
"the results of the simulation. \"\"\" if math.fabs(time_step_output/time_step_integration - round(time_step_output/time_step_integration)) > 1e-6: print(f\"\\033[33mspinsim",
"== Device.CUDA: time = cuda.device_array(time_index_max, np.float64) time_evolution_coarse = cuda.device_array((time_index_max, self.spin_quantum_number.dimension, self.spin_quantum_number.dimension), np.complex128) blocks_per_grid",
"transform_frame_spin_half_rotating(field_sample, rotating_wave, rotating_wave_winding): X = (field_sample[0] + 1j*field_sample[1])/(rotating_wave_winding**2) field_sample[0] = X.real field_sample[1] =",
"0 # precision = 4**hyper_cube_amount # x = field_sample[0]/(2*precision) # y = field_sample[1]/(2*precision)",
"object constrution. Parameters: * **state** (:obj:`numpy.ndarray` of :obj:`numpy.complex128` (time_index, magnetic_quantum_number)) - The quantum",
"This is a four dimensional vector, with the first three entries being x,",
"IntegrationMethod.HALF_STEP: sample_index_max = 3 sample_index_end = 4 elif integration_method == IntegrationMethod.MIDPOINT_SAMPLE: sample_index_max =",
"number to take the absolute value of. Returns * **az** (:class:`numpy.float64`) - The",
"the integrator is compiled for a CPU or GPU. Defaults to :obj:`Device.CUDA` if",
"Measured in s. * **time_step_integration** (:obj:`float`) - The integration time step. Measured in",
"# if device_index == 0: # temporary = np.empty((3, 3), dtype = np.complex128)",
"device_index == 1: # temporary = cuda.local.array((2, 2), dtype = np.complex128) # elif",
"result[1, 0] = cisz*(-1j*sx + cx*sy)/sqrt2 # result[2, 0] = 0.5*cisz*(cx - cy",
"to copy to. set_to_one(operator) : :obj:`callable` Make a matrix the multiplicative identity, ie,",
"1] result[0, 2] = operator[0, 2] result[1, 2] = operator[1, 2] result[2, 2]",
"variables if device_index == 0: time_evolution_fine = np.empty((dimension, dimension), dtype = np.complex128) field_sample",
"right): return conj(left[0])*right[0] + conj(left[1])*right[1] @jit_device def set_to(operator, result): result[0, 0] = operator[0,",
".. math:: \\\\begin{align*} J_x &= \\\\frac{1}{2}\\\\begin{pmatrix} 0 & 1 \\\\\\\\ 1 & 0",
"field_sample[0]/(2*precision) # y = field_sample[1]/(2*precision) # z = field_sample[2]/(2*precision) # cx = math.cos(x)",
"sqrt3)/6 w1 = (1.5 - sqrt3)/6 field_sample[2, 0] = math.tau*time_step_integration*(w0*field_sample[0, 0] + w1*field_sample[1,",
"- sqrt3)/6 field_sample[2, 0] = math.tau*time_step_integration*(w0*field_sample[0, 0] + w1*field_sample[1, 0]) field_sample[2, 1] =",
"the resultant adjoint to. matrix_exponential_analytic(field_sample, result) : :obj:`callable` Calculates a :math:`\\\\mathfrak{su}(2)` matrix exponential",
"the integration algorithm. In units of s. time_step_output : :obj:`float` The time difference",
"Declare variables if device_index == 0: time_evolution_fine = np.empty((dimension, dimension), dtype = np.complex128)",
"**result** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - An array to write the resultant",
"= np.float64) rotating_wave_winding = np.empty(sample_index_end, dtype = np.complex128) elif device_index == 1: time_evolution_fine",
"complex number to take the conjugate of. Returns * **cz** (:class:`numpy.complex128`) - The",
"jit_device(func): return cuda.jit(device = True, inline = True)(func) self.jit_device = jit_device def jit_device_template(template):",
"0, 0] /= rotating_wave_winding[0] time_evolution_coarse[time_index, 0, 1] /= rotating_wave_winding[0] if dimension > 2:",
"spin value in parallel. For spin half: .. math:: \\\\begin{align*} \\\\langle F\\\\rangle(t) =",
"2] result[2, 2] = operator[2, 0]*operator[0, 2] + operator[2, 1]*operator[1, 2] + (2",
":class:`numpy.complex128` The state (spin wavefunction) of the system at the start of the",
"dimensional vector, with the first three entries being x, y, z spatial directions",
"J_y &= \\\\frac{1}{\\\\sqrt{2}}\\\\begin{pmatrix} 0 & -i & 0 \\\\\\\\ i & 0 &",
"GPU. See `Achieved Occupancy`_ for Nvidia's official explanation. \"\"\" utilities = Utilities(spin_quantum_number, device,",
"1: # temporary = cuda.local.array((3, 3), dtype = np.complex128) # elif device_index ==",
"`Achieved Occupancy`_ for Nvidia's official explanation. \"\"\" utilities = Utilities(spin_quantum_number, device, threads_per_block) conj",
"approximation in the rotating frame. integration_method : :obj:`IntegrationMethod` Which integration method to use",
"official explanation. \"\"\" utilities = Utilities(spin_quantum_number, device, threads_per_block) conj = utilities.conj complex_abs =",
"state[time_index, 2].real**2 - state[time_index, 2].imag**2 return def spin_calculator(state): \"\"\" Calculates the expected spin",
"value self.index = index ANALYTIC = (\"analytic\", 0) \"\"\" Analytic expression of the",
"(2*conj(state[time_index, 1])*(state[time_index, 0] + state[time_index, 2])/sqrt2).real spin[time_index, 1] = (2j*conj(state[time_index, 1])*(state[time_index, 0] -",
"__init__(self, get_field, spin_quantum_number, device = None, exponentiation_method = None, use_rotating_frame = True, integration_method",
"(y_index, x_index)) - The matrix to set to :math:`0`. matrix_multiply(left, right, result) :",
"2] = operator[1, 2] result[2, 2] = operator[2, 2] @jit_device def set_to_one(operator): operator[0,",
"== SpinQuantumNumber.HALF: exponentiation_method = ExponentiationMethod.ANALYTIC if integration_method == IntegrationMethod.MAGNUS_CF4: sample_index_max = 3 sample_index_end",
"Parameters: * **operator** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The operator to take",
"\"\"\" class IntegrationMethod(Enum): \"\"\" Options for describing which method is used during the",
"Returns * **nz** (:class:`numpy.float64`) - The 2 norm of z. inner(left, right) :",
"\\\\exp(-ix J_x - iy J_y - iz J_z)\\\\\\\\ &= \\\\begin{pmatrix} \\\\cos(\\\\frac{r}{2}) - i\\\\frac{z}{r}\\\\sin(\\\\frac{r}{2})",
"= \\\\begin{pmatrix} \\\\Re(\\\\sqrt{2}\\\\psi_{0}(t)^*(\\\\psi_{+1}(t) + \\\\psi_{-1}(t))\\\\\\\\ -\\\\Im(\\\\sqrt{2}\\\\psi_{0}(t)^*(\\\\psi_{+1}(t) - \\\\psi_{-1}(t))\\\\\\\\ |\\\\psi_{+1}(t)|^2 - |\\\\psi_{-1}(t)|^2 \\\\end{pmatrix} \\\\end{align*}",
"(y_index, x_index)) - The matrix which the result of the exponentiation is to",
"Enum import numpy as np import numba as nb from numba import cuda",
"Will not work with spin one systems. Assumes the exponent is an imaginary",
"dimension self.label = label HALF = (1/2, 2, \"half\") \"\"\" For two level",
"1j*sx*sy) # cisz = math.cos(2*q/3) + 1j*math.sin(2*q/3) # result[0, 1] = cisz*(-sy -",
"(time_index)) - The times that `state` was evaluated at. * **time_end_points** (:obj:`numpy.ndarray` of",
"result[1, 1] = operator[1, 1] result[2, 1] = operator[2, 1] result[0, 2] =",
":obj:`ExponentiationMethod` Which method to use for matrix exponentiation in the integration algorithm. Defaults",
"iq J_q)\\\\\\\\ &= \\\\exp(2^{-\\\\tau}(-ix J_x - iy J_y - iz J_z - iq",
"cisz*cx*cy # result[2, 1] = cisz*(sy - 1j*cy*sx)/sqrt2 # cisz = math.cos(z -",
"required, in spin one systems). .. note:: This function must be compilable for",
"In units of s. time_step_integration : :obj:`float` The time step used within the",
"fourth entry being the amplitude of the quadratic shift (only appearing, and required,",
"integration time step. Measured in s. time_step_output : :obj:`float` The sample resolution of",
":class:`numpy.complex128` (time_index, bra_state_index, ket_state_index) The evaluated time evolution operator between each time step.",
"result[1, 2] = conj(operator[2, 1]) result[2, 2] = conj(operator[2, 2]) @jit_device def matrix_exponential_analytic(field_sample,",
"1]*operator[1, 1] result[1, 1] = operator[1, 0]*operator[0, 1] + (2 + operator[1, 1])*operator[1,",
"spin projection operator in the z direction. spin : :obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index,",
"0. .. math:: \\\\begin{align*} l \\\\cdot r &\\\\equiv \\\\langle l, r \\\\rangle\\\\\\\\ l",
"if spin_quantum_number == SpinQuantumNumber.HALF: @jit_device def norm2(z): return math.sqrt(z[0].real**2 + z[0].imag**2 + z[1].real**2",
"the spin system over time. Parameters ---------- sweep_parameter : :obj:`float` The input to",
"In units of s. time_step_output : :obj:`float` The time difference between each element",
"the field at, in units of s. * **simulation_index** (:obj:`int`) - a parameter",
"of. * **result** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - An array to write",
"appearing, and required, in spin one systems). .. note:: This function must be",
"0.5*cisz*(cx - cy + 1j*sx*sy) # result[1, 2] = cisz*(-1j*sx - cx*sy)/sqrt2 #",
"setattr(self, attr_name, spin) return self.spin raise AttributeError(\"{} has no attribute called {}.\".format(self, attr_name))",
"math.tau*time_step_integration*field_sample[0, 2] if dimension > 2: field_sample[0, 3] = math.tau*time_step_integration*field_sample[0, 3] append_exponentiation(field_sample[0, :],",
"set_to_one(operator): operator[0, 0] = 1 operator[1, 0] = 0 operator[2, 0] = 0",
"self._value_ = value self.index = index if value == \"python\": def jit_host(template, max_registers):",
"complex128[:])\") def get_field_integration_half_step(sweep_parameter, time_fine, time_coarse, time_step_integration, field_sample, rotating_wave, rotating_wave_winding): time_sample = time_fine -",
": :obj:`numpy.ndarray` of :obj:`numpy.complex128` (time_index, magnetic_quantum_number) The quantum state of the spin system",
"J_y &= \\\\frac{1}{2}\\\\begin{pmatrix} 0 & -i \\\\\\\\ i & 0 \\\\end{pmatrix},& J_z &=",
"run fewer threads concurrently than it has Cuda cores, meaning some cores are",
"z[1].imag**2) @jit_device def inner(left, right): return conj(left[0])*right[0] + conj(left[1])*right[1] @jit_device def set_to(operator, result):",
"get approximate analytic solutions of spin system dynamics. This is not done when",
"(possibly large) z component of the field, which increases the accuracy of the",
"For two level systems. \"\"\" ONE = (1, 3, \"one\") \"\"\" For three",
"1] = cisz*(-sy - 1j*cy*sx)/sqrt2 # result[1, 1] = cisz*cx*cy # result[2, 1]",
"float64, complex128[:])\") def get_field_integration_magnus_cf4(sweep_parameter, time_fine, time_coarse, time_step_integration, field_sample, rotating_wave, rotating_wave_winding): time_sample = ((time_fine",
"= (\"cpu\", 0) \"\"\" Use the :func:`numba.jit()` LLVM compiler to compile the integrator",
"else: spin[time_index, 0] = (2*conj(state[time_index, 1])*(state[time_index, 0] + state[time_index, 2])/sqrt2).real spin[time_index, 1] =",
"operator[1, 2]*operator[2, 1] result[2, 1] = operator[2, 0]*operator[0, 1] + operator[2, 1]*operator[1, 1]",
"(R)_{j,k} \\\\end{align*} Parameters: * **left** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The matrix",
"+ y^2 + z^2}`. Parameters: * **field_sample** (:class:`numpy.ndarray` of :class:`numpy.float64`, (y_index, x_index)) -",
": :class:`numpy.ndarray` of :class:`numpy.float64` (start time (0) or end time (1)) The time",
"equivalent of the trapezoidal method. \"\"\" class ExponentiationMethod(Enum): \"\"\" The implementation to use",
"target devices :obj:`Device.CUDA` (:obj:`Device.ROC`). Defaults to 64. Modifying might be able to increase",
"and defaults to :obj:`ExponentiationMethod.ANALYTIC` when `spin_quantum_number` is set to :obj:`SpinQuantumNumber.HALF`. See :obj:`ExponentiationMethod` for",
"it is being run on is Nvidia Cuda compatible, and defaults to :obj:`Device.CPU`",
"> 0: ep = (field_sample[0] + 1j*field_sample[1])/a else: ep = 1 a =",
"self.dimension = dimension self.label = label HALF = (1/2, 2, \"half\") \"\"\" For",
"for. * **field_sample** (:class:`numpy.ndarray` of :class:`numpy.float64` (spatial_index)) the returned value of the field.",
"left[1, 1]*right[1, 2] + left[1, 2]*right[2, 2] result[2, 2] = left[2, 0]*right[0, 2]",
"spin one systems** Assumes the exponent is an imaginary linear combination of a",
"= -1j*a/sqrt2 # ez = field_sample[2]/(2*precision) # ez = 1 + 1j*ez #",
"CUDA = (\"cuda\", 1) \"\"\" Use the :func:`numba.cuda.jit()` LLVM compiler to compile the",
"= field_sample[0, 2] if dimension == 2: rotating_wave /= 2 # For every",
"1] = 0 result[1, 1] = 1 @jit_device def matrix_exponential_lie_trotter(field_sample, result, trotter_cutoff): hyper_cube_amount",
"each time sampled. See :math:`U(t)` in :ref:`overview_of_simulation_method`. This is an output, so use",
":obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index, spatial_direction) The expected spin projection (Bloch vector) over time.",
"(A)_{i, j} &= \\\\delta_{i, j}\\\\\\\\ &= \\\\begin{cases} 1,&i = j\\\\\\\\ 0,&i\\\\neq j \\\\end{cases}",
"values of x, y and z (and q for spin one) respectively, as",
"operator[2, 2])*operator[2, 1] result[0, 2] = (2 + operator[0, 0])*operator[0, 2] + operator[0,",
"1, x_index, z_index]*state[time_index - 1, z_index] else: state[time_index, x_index] += state_init[x_index] sqrt2 =",
"0.5*time_step_integration - time_coarse rotating_wave_winding[0] = math.cos(math.tau*rotating_wave*time_sample) + 1j*math.sin(math.tau*rotating_wave*time_sample) time_sample += time_coarse get_field_jit(time_sample, sweep_parameter,",
"matrix_multiply(result, result, temporary) # matrix_multiply(temporary, temporary, result) result[0, 0] += 1 result[1, 1]",
"&= \\\\frac{1}{2}2^{-\\\\tau}y,\\\\\\\\ Z &= \\\\frac{1}{2}2^{-\\\\tau}z,\\\\\\\\ c_{\\\\theta} &= \\\\cos(\\\\theta),\\\\\\\\ s_{\\\\theta} &= \\\\sin(\\\\theta). \\\\end{align*} **For",
"has no attribute called {}.\".format(self, attr_name)) class Simulator: \"\"\" Attributes ---------- spin_quantum_number :",
"operator[0, 1]*operator[1, 0] + operator[0, 2]*operator[2, 0] result[1, 0] = operator[1, 0]*operator[0, 0]",
"of the time step. The equivalent of the trapezoidal method. \"\"\" class ExponentiationMethod(Enum):",
"an evaluation of the integrator. Attributes ---------- time : :obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index)",
"spin projection (Bloch vector) over time. \"\"\" self.time = time self.time_evolution = time_evolution",
"0 if use_rotating_frame: time_sample = time_coarse[time_index] + time_step_output/2 get_field_jit(time_sample, sweep_parameter, field_sample[0, :]) rotating_wave",
"0 \\\\end{pmatrix},\\\\\\\\ J_z &= \\\\begin{pmatrix} 1 & 0 & 0 \\\\\\\\ 0 &",
"+= time_evolution[time_index - 1, x_index, z_index]*state[time_index - 1, z_index] else: state[time_index, x_index] +=",
"state, time_evolution_coarse) results = Results(time, time_evolution_coarse, state, self.spin_calculator) return results @staticmethod @nb.njit def",
"the integrator to run on an Nvidia cuda compatible GPU, in parallel. ..",
"projection operator in the z direction. spin_calculator : :obj:`callable` Calculates the expected spin",
"for all options and more details. threads_per_block : :obj:`int` The size of each",
"the integrator to run on all CPU cores, in parallel. .. note ::",
"time_step_output, time_step_integration, time_end_points, time_evolution_coarse, sweep_parameter) elif device_index == 2: # Run calculation for",
"cuda.jit(device = True, inline = True)(func) self.jit_device = jit_device def jit_device_template(template): def jit_device_template(func):",
"x_index)) - The matrix to left multiply by. * **right** (:class:`numpy.ndarray` of :class:`numpy.complex128`,",
"vector) over time. \"\"\" def __init__(self, time, time_evolution, state, spin_calculator): \"\"\" Parameters ----------",
"float64, float64, float64[:, :], float64, complex128[:])\") def get_field_integration_magnus_cf4(sweep_parameter, time_fine, time_coarse, time_step_integration, field_sample, rotating_wave,",
"dimension, dimension), dtype = np.complex128) time_evolution_fine = time_evolution_fine_group[roc.get_local_id(1), :, :] field_sample_group = roc.shared.array((threads_per_block,",
"`sweep_parameter` is used to define the bias field strength in `get_field`, then one",
"\\\\exp(2^{-\\\\tau}(-ix J_x - iy J_y - iz J_z - iq J_q))^{2^\\\\tau}\\\\\\\\ &\\\\approx (\\\\exp(-i(2^{-\\\\tau}",
"Trotter theorem. \"\"\" class Device(Enum): \"\"\" The target device that the integrator is",
"left vector is conjugated. Thus the inner product of two orthogonal vectors is",
"to be written to. matrix_exponential_lie_trotter(field_sample, result) : :obj:`callable` Calculates a matrix exponential based",
"Parameters ---------- value : :obj:`float` The numerical value of the spin quantum number.",
"# if device_index == 0: # temporary = np.empty((2, 2), dtype = np.complex128)",
"See :class:`Device` for more information and links. spin_quantum_number : :obj:`SpinQuantumNumber` The option to",
"result[1, 2] = sa*eq*ez/ep result[2, 2] = (Ca*ez/eq)*(Ca*ez/eq) - 1 if device_index ==",
"To use this device option, the user defined field function must be :func:`numba.jit()`",
"definition is used here rather than the physics definition, so the left vector",
"time_step_output, time_evolution_coarse) elif self.device == Device.CUDA: time = cuda.device_array(time_index_max, np.float64) time_evolution_coarse = cuda.device_array((time_index_max,",
"time_evolution_fine, time_evolution_coarse) get_field_integration = get_field_integration_magnus_cf4 append_exponentiation_integration = append_exponentiation_integration_magnus_cf4 elif integration_method == IntegrationMethod.HALF_STEP: @jit_device_template(\"(float64,",
"for power_index in range(hyper_cube_amount): matrix_square_residual(result, temporary) matrix_square_residual(temporary, result) result[0, 0] += 1 result[1,",
"0] + left[2, 1]*right[1, 0] + left[2, 2]*right[2, 0] result[0, 1] = left[0,",
"self.threads_per_block try: self.get_time_evolution_raw[blocks_per_grid, self.threads_per_block](sweep_parameter, time, time_end_points, time_step_integration, time_step_output, time_evolution_coarse) except: print(\"\\033[31mspinsim error: numba.roc",
"will integrate a spin half :obj:`SpinQuantumNumber.HALF`, or spin one :obj:`SpinQuantumNumber.ONE` quantum system. device",
"\\\\begin{align*} (A)_{i, j} &= \\\\delta_{i, j}\\\\\\\\ &= \\\\begin{cases} 1,&i = j\\\\\\\\ 0,&i\\\\neq j",
"\\\\exp(A) &= \\\\exp(-ix J_x - iy J_y - iz J_z)\\\\\\\\ &= \\\\begin{pmatrix} \\\\cos(\\\\frac{r}{2})",
"(Bloch vector) over time for a given time series of a quantum state.",
"the simulator. Parameters ---------- get_field : :obj:`callable` A python function that describes the",
"0] - state[time_index, 2])/sqrt2).real spin[time_index, 2] = state[time_index, 0].real**2 + state[time_index, 0].imag**2 -",
"many experiments, without the need for slow recompilation. For example, if the `sweep_parameter`",
"exponentiation within the integrator. Parameters ---------- value : :obj:`str` A text label that",
"operator is found for. In units of s. This is an output, so",
"np.float64) time_evolution_coarse = roc.device_array((time_index_max, self.spin_quantum_number.dimension, self.spin_quantum_number.dimension), np.complex128) blocks_per_grid = (time.size + (self.threads_per_block -",
"integration_method, exponentiation_method, trotter_cutoff, threads_per_block, max_registers) except: print(\"\\033[31mspinsim error: numba could not jit get_field",
"z[1].real**2 + z[1].imag**2) @jit_device def inner(left, right): return conj(left[0])*right[0] + conj(left[1])*right[1] @jit_device def",
".. math:: \\\\begin{align*} X &= 2^{-\\\\tau}x,\\\\\\\\ Y &= 2^{-\\\\tau}y,\\\\\\\\ Z &= 2^{-\\\\tau}z,\\\\\\\\ Q",
"1]*right[1, 0] result[0, 1] = left[0, 0]*right[0, 1] + left[0, 1]*right[1, 1] result[1,",
"Results: \"\"\" The results of a an evaluation of the integrator. Attributes ----------",
"time_step_integration if use_rotating_frame: # Take out of rotating frame rotating_wave_winding[0] = math.cos(math.tau*rotating_wave*time_step_output) +",
"The expected spin projection (Bloch vector) over time. This is calculated just in",
"1] = 0 operator[1, 1] = 0 @jit_device def matrix_multiply(left, right, result): result[0,",
"@nb.njit def get_state(state_init, state, time_evolution): \"\"\" Use the stepwise time evolution operators in",
"of :class:`numpy.complex128`, (y_index, x_index)) - The operator to take the adjoint of. *",
"information. state : :obj:`numpy.ndarray` of :obj:`numpy.complex128` (time_index, magnetic_quantum_number) The evaluated quantum state of",
"def jit_host(template, max_registers): def jit_host(func): return roc.jit(template)(func) return jit_host self.jit_host = jit_host def",
"time_evolution_coarse = time_evolution_coarse.copy_to_host() time = time.copy_to_host() state = np.empty((time_index_max, self.spin_quantum_number.dimension), np.complex128) self.get_state(state_init, state,",
"definition, so the left vector is conjugated. Thus the inner product of two",
"np.complex128) elif device_index == 1: time_evolution_fine = cuda.local.array((dimension, dimension), dtype = np.complex128) field_sample",
"((A)_{x,y})^* \\\\end{align*} Matrix can be in :math:`\\\\mathbb{C}^{2\\\\times2}` or :math:`\\\\mathbb{C}^{3\\\\times3}`. Parameters: * **operator** (:class:`numpy.ndarray`",
"free, fourth order Magnus based integrator. \"\"\" MIDPOINT_SAMPLE = \"midpoint_sample\" \"\"\" Euler integration",
"between two complex vectors. .. note:: The mathematics definition is used here rather",
"at. Measured in s. * **time_step_integration** (:obj:`float`) - The integration time step. Measured",
"1j*math.sin(ez) # eq = field_sample[3]/(6*precision) # eq = math.cos(eq) + 1j*math.sin(eq) result[0, 0]",
"rotating_wave, rotating_wave_winding): time_sample = ((time_fine + 0.5*time_step_integration*(1 - 1/sqrt3)) - time_coarse) rotating_wave_winding[0] =",
"1 @jit_device_template(\"(float64[:], complex128[:, :], complex128[:, :])\") def append_exponentiation(field_sample, time_evolution_fine, time_evolution_coarse): if device_index ==",
"spin_quantum_number.dimension lie_dimension = dimension + 1 # utility_set = spin_quantum_number.utility_set if not exponentiation_method:",
"exponentiation_method = ExponentiationMethod.LIE_TROTTER exponentiation_method_index = 1 @jit_device_template(\"(float64[:], complex128[:, :], complex128[:, :])\") def append_exponentiation(field_sample,",
"spin half. Switching to a Lie Trotter method.\\033[0m\") exponentiation_method = ExponentiationMethod.LIE_TROTTER exponentiation_method_index =",
"= math.tau*time_step_integration*field_sample[0, 0] field_sample[0, 1] = math.tau*time_step_integration*field_sample[0, 1] field_sample[0, 2] = math.tau*time_step_integration*field_sample[0, 2]",
"all options and more details. threads_per_block : :obj:`int` The size of each thread",
"= np.complex128) elif device_index == 2: temporary_group = roc.shared.array((threads_per_block, 3, 3), dtype =",
"time_evolution_coarse[time_index, 2, 2] *= rotating_wave_winding[0] else: time_evolution_coarse[time_index, 1, 0] *= rotating_wave_winding[0] time_evolution_coarse[time_index, 1,",
"algorithm. In units of s. time_step_output : :obj:`float` The time difference between each",
"current and next timesteps, for each time sampled. See :math:`U(t)` in :ref:`overview_of_simulation_method`. This",
"cuda.grid(1) if time_index < time_coarse.size: get_time_evolution_loop(time_index, time_coarse, time_step_output, time_step_integration, time_end_points, time_evolution_coarse, sweep_parameter) elif",
"- time_start`. time_step_integration : :obj:`float` The integration time step. Measured in s. time_step_output",
"integration_method == IntegrationMethod.MAGNUS_CF4: @jit_device_template(\"(float64, float64, float64, float64, float64[:, :], float64, complex128[:])\") def get_field_integration_magnus_cf4(sweep_parameter,",
"time evolution operator set_to(time_evolution_coarse, time_evolution_old) matrix_multiply(time_evolution_fine, time_evolution_old, time_evolution_coarse) if use_rotating_frame: if dimension ==",
"by calling this method multiple times, each time varying `sweep_parameter`. * **time_coarse** (:obj:`numpy.ndarray`",
"in units of s. * **simulation_index** (:obj:`int`) - a parameter that can be",
"= field_sample[0]/precision # y = field_sample[1]/precision # z = field_sample[2]/precision # q =",
"roc.shared.array((threads_per_block, sample_index_max, lie_dimension), dtype = np.float64) field_sample = field_sample_group[roc.get_local_id(1), :, :] rotating_wave_winding_group =",
"@jit_device_template(\"(float64[:], float64, complex128)\") def transform_frame_spin_half_rotating(field_sample, rotating_wave, rotating_wave_winding): X = (field_sample[0] + 1j*field_sample[1])/(rotating_wave_winding**2) field_sample[0]",
"right together, to be returned in result. .. math:: \\\\begin{align*} (LR)_{i,k} = \\\\sum_j",
"0: temporary = np.empty((3, 3), dtype = np.complex128) elif device_index == 1: temporary",
"a = a/precision Ca = math.cos(a/2) Sa = math.sin(a/2) ca = math.cos(a) sa",
"def jit_device_template(template): def jit_device_template(func): return nb.njit(template)(func) return jit_device_template self.jit_device_template = jit_device_template elif value",
"to calculate `spin` the first time it is referenced by the user. Parameters:",
"spin one :obj:`SpinQuantumNumber.ONE` quantum system. threads_per_block : :obj:`int` The size of each thread",
"(:obj:`int`) - a parameter that can be swept over when multiple simulations need",
"this spin belong to. label : :obj:`str` A text label that can be",
"functions (functions compiled for use on the target device) used in the integrator.",
"the time that the experiment is to finish at. Measured in s. *",
":obj:`float` The time offset that the experiment is to start at. Measured in",
"jit_device_template(func): return roc.jit(template, device = True)(func) return jit_device_template self.jit_device_template = jit_device_template PYTHON =",
"exponentiation is to be written to. * **trotter_cutoff** (:obj:`int`) - The number of",
"more information and links. spin_quantum_number : :obj:`SpinQuantumNumber` The option to select whether the",
"+ cy - 1j*sx*sy) # result[1, 0] = cisz*(-1j*sx + cx*sy)/sqrt2 # result[2,",
"approximation, a technique used to get approximate analytic solutions of spin system dynamics.",
"-((Sa*ep/eq)*(Sa*ep/eq)) result[0, 1] = sa*eq/(ez*ep) result[1, 1] = ca*(eq*eq*eq*eq) - 1 result[2, 1]",
"user. Modifies the field function so the integrator can be used for many",
"time_evolution_coarse, sweep_parameter) return @jit_host(\"(complex128[:, :], float64[:, :])\", max_registers = max_registers) def get_spin(state, spin):",
"sampling the field from the start of the time step, one sampling the",
"pure python interpreted code for the integrator, ie, don't compile the integrator. \"\"\"",
"2: # Run calculation for each coarse timestep in parallel time_index = roc.get_global_id(1)",
"X.real field_sample[1] = X.imag field_sample[2] = field_sample[2] - 2*rotating_wave transform_frame = transform_frame_spin_half_rotating else:",
"time = time.copy_to_host() state = np.empty((time_index_max, self.spin_quantum_number.dimension), np.complex128) self.get_state(state_init, state, time_evolution_coarse) results =",
"result[0, 1] = left[0, 0]*right[0, 1] + left[0, 1]*right[1, 1] + left[0, 2]*right[2,",
"time_evolution_coarse[time_index, :], field_sample, time_step_integration, rotating_wave, rotating_wave_winding) time_fine += time_step_integration if use_rotating_frame: # Take",
"is an imaginary linear combination of a subspace of :math:`\\\\mathfrak{su}(3)`, being, .. math::",
"features`_ for compilable python features, and `Supported Numpy features`_ for compilable numpy features.",
"True)(func) return jit_host self.jit_host = jit_host def jit_device(func): return nb.njit()(func) self.jit_device = jit_device",
"so really this number is 64). Raising this value allocates more registers (fast",
"of a quantum state. This :obj:`callable` is passed to the :obj:`Results` object returned",
"2: field_sample[2, 3] = math.tau*time_step_integration*field_sample[1, 3]/2 append_exponentiation(field_sample[2, :], time_evolution_fine, time_evolution_coarse) get_field_integration = get_field_integration_half_step",
"2), dtype = np.complex128) # temporary = temporary_group[roc.get_local_id(1), :, :] # for power_index",
"math:: \\\\begin{align*} A &= -i(x J_x + y J_y + z J_z), \\\\end{align*}",
"time_index in range(state.shape[0]): # State = time evolution * previous state for x_index",
":obj:`numpy.float64` (time_index)) - The times that `state` was evaluated at. * **time_end_points** (:obj:`numpy.ndarray`",
":obj:`int` The number of squares made by the matrix exponentiator, if :obj:`ExponentiationMethod.LIE_TROTTER` is",
"- The conjugate of z. complex_abs(z) : :obj:`callable` The absolute value of a",
"rotating_wave_winding[0] = math.cos(math.tau*rotating_wave*time_sample) + 1j*math.sin(math.tau*rotating_wave*time_sample) time_sample += time_coarse get_field_jit(time_sample, sweep_parameter, field_sample[0, :]) @jit_device_template(\"(complex128[:,",
"2: field_sample[2, 3] = math.tau*time_step_integration*field_sample[0, 3]/2 append_exponentiation(field_sample[2, :], time_evolution_fine, time_evolution_coarse) field_sample[2, 0] =",
"dimension, dimension), dtype = np.complex128) time_evolution_old = time_evolution_old_group[roc.get_local_id(1), :, :] # Calculate the",
"(field_sample[0] + 1j*field_sample[1])/rotating_wave_winding field_sample[0] = X.real field_sample[1] = X.imag field_sample[2] = field_sample[2] -",
"complex number to take the absolute value of. Returns * **az** (:class:`numpy.float64`) -",
"x_index] += state_init[x_index] sqrt2 = math.sqrt(2) sqrt3 = math.sqrt(3) machine_epsilon = np.finfo(np.float64).eps*1000 class",
"- left[2]*right[1]) result[1] = conj(left[2]*right[0] - left[0]*right[2]) result[2] = conj(left[0]*right[1] - left[1]*right[0]) @jit_device",
"Measured in s. time_end : :obj:`float` The time that the experiment is to",
"def norm2(z): return math.sqrt(z[0].real**2 + z[0].imag**2 + z[1].real**2 + z[1].imag**2) @jit_device def inner(left,",
"spin_quantum_number == SpinQuantumNumber.ONE: exponentiation_method = ExponentiationMethod.LIE_TROTTER elif spin_quantum_number == SpinQuantumNumber.HALF: exponentiation_method = ExponentiationMethod.ANALYTIC",
"device == Device.CUDA: spin = cuda.device_array((state.shape[0], 3), np.float64) blocks_per_grid = (state.shape[0] + (threads_per_block",
"of :class:`numpy.complex128`, (y_index, x_index)) - A matrix to be filled with the result",
"utilities.set_to_one set_to_zero = utilities.set_to_zero matrix_multiply = utilities.matrix_multiply adjoint = utilities.adjoint matrix_exponential_analytic = utilities.matrix_exponential_analytic",
"1] + (2 + operator[1, 1])*operator[1, 1] @jit_device def adjoint(operator, result): result[0, 0]",
"0 operator[0, 2] = 0 operator[1, 2] = 0 operator[2, 2] = 1",
"return jit_device_template self.jit_device_template = jit_device_template PYTHON = (\"python\", 0) \"\"\" Use pure python",
"---------- value : :obj:`str` A text label that can be used for archiving.",
"used. Thus, there will be an optimal value of `max_registers` for each model",
"in the z direction. This removes the (possibly large) z component of the",
"2), dtype = np.complex128) # elif device_index == 2: # temporary_group = roc.shared.array((threads_per_block,",
"3), dtype = np.complex128) # elif device_index == 2: # temporary_group = roc.shared.array((threads_per_block,",
"warning :: Work in progress, not currently functional! \"\"\" class Results: \"\"\" The",
"text label that can be used for archiving. index : :obj:`int` A reference",
"# x = field_sample[0]/(2*precision) # y = field_sample[1]/(2*precision) # z = field_sample[2]/(2*precision) #",
"of :class:`numpy.complex128`, (index)) - The vector to right multiply in the inner product.",
"official explanation. \"\"\" if not device: if cuda.is_available(): device = Device.CUDA else: device",
"right multiply in the inner product. Returns * **d** (:class:`numpy.complex128`) - The inner",
"2: # temporary_group = roc.shared.array((threads_per_block, 2, 2), dtype = np.complex128) # temporary =",
"the result of the exponentiation is to be written to. matrix_exponential_lie_trotter(field_sample, result) :",
"\\\\exp(-ix J_x - iy J_y - iz J_z)\\\\\\\\ &= \\\\exp(2^{-\\\\tau}(-ix J_x - iy",
"`spin_quantum_number` is set to :obj:`SpinQuantumNumber.ONE`, and defaults to :obj:`ExponentiationMethod.ANALYTIC` when `spin_quantum_number` is set",
"of the spin system in the lab frame, for each time sampled. Units",
"to :obj:`SpinQuantumNumber.HALF`. See :obj:`ExponentiationMethod` for more details. use_rotating_frame : :obj:`bool` Whether or not",
"of the 3 level atom. Parameters ---------- state_init : :class:`numpy.ndarray` of :class:`numpy.complex128` The",
"left[0, 2]*right[2, 1] result[1, 1] = left[1, 0]*right[0, 1] + left[1, 1]*right[1, 1]",
"time_step_output : :obj:`float` The time difference between each element of `time_coarse`. In units",
"0] = 0 operator[2, 0] = 0 operator[0, 1] = 0 operator[1, 1]",
": :obj:`callable` A python function that describes the field that the spin system",
"math.cos(y) # sy = math.sin(y) # cisz = math.cos(z + q/3) - 1j*math.sin(z",
"\"\"\" for time_index in range(state.shape[0]): # State = time evolution * previous state",
"in the integration algorithm. Defaults to :obj:`ExponentiationMethod.LIE_TROTTER` when `spin_quantum_number` is set to :obj:`SpinQuantumNumber.ONE`,",
"spin.copy_to_host() return spin self.get_time_evolution_raw = get_time_evolution self.spin_calculator = spin_calculator def evaluate(self, sweep_parameter, time_start,",
"- iz J_z))^{2^\\\\tau}\\\\\\\\ &\\\\approx (\\\\exp(-i(2^{-\\\\tau} x) J_x) \\\\exp(-i(2^{-\\\\tau} y) J_y) \\\\exp(-i(2^{-\\\\tau} z) J_z)^{2^\\\\tau}\\\\\\\\",
"math.sin(r/2) result[0, 0] = c - 1j*z*s result[1, 0] = (y - 1j*x)*s",
"left multiply in the inner product. * **right** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (index)) -",
"+ operator[2, 2])*operator[2, 1] result[0, 2] = (2 + operator[0, 0])*operator[0, 2] +",
"= math.tau*time_step_integration*field_sample[1, 3]/2 append_exponentiation(field_sample[2, :], time_evolution_fine, time_evolution_coarse) get_field_integration = get_field_integration_half_step append_exponentiation_integration = append_exponentiation_integration_half_step",
"Must be a whole number multiple of `time_step_integration`. Measured in s. state_init :",
"time.copy_to_host() state = np.empty((time_index_max, self.spin_quantum_number.dimension), np.complex128) self.get_state(state_init, state, time_evolution_coarse) results = Results(time, time_evolution_coarse,",
"expected spin projection (Bloch vector) over time. This is calculated just in time",
"a given time series of a quantum state. This :obj:`callable` is passed to",
"& 0 & -1 \\\\end{pmatrix},& J_q &= \\\\frac{1}{3}\\\\begin{pmatrix} 1 & 0 & 0",
"See `Achieved Occupancy`_ for Nvidia's official explanation. \"\"\" if not device: if cuda.is_available():",
"A text label that can be used for archiving. index : :obj:`int` A",
"(field_sample[0] + 1j*field_sample[1])/(rotating_wave_winding**2) field_sample[0] = X.real field_sample[1] = X.imag field_sample[2] = field_sample[2] -",
"Lie Product Formula, .. math:: \\\\exp(A + B) = \\\\lim_{c \\\\to \\\\infty} \\\\left(\\\\exp\\\\left(\\\\frac{1}{c}A\\\\right)",
"of :obj:`numpy.complex128` (time_index, magnetic_quantum_number) The quantum state of the spin system over time,",
"magnetic field, for example), and the fourth entry being the amplitude of the",
"Formula, .. math:: \\\\exp(A + B) = \\\\lim_{c \\\\to \\\\infty} \\\\left(\\\\exp\\\\left(\\\\frac{1}{c}A\\\\right) \\\\exp\\\\left(\\\\frac{1}{c}B\\\\right)\\\\right)^c. **For",
"combination of a subspace of :math:`\\\\mathfrak{su}(3)`, being, .. math:: \\\\begin{align*} A &= -i(x",
"j}\\\\\\\\ &= \\\\begin{cases} 1,&i = j\\\\\\\\ 0,&i\\\\neq j \\\\end{cases} \\\\end{align*} Parameters: * **operator**",
"the target device) used in the integrator. These device functions are compiled for",
"max_registers): def jit_host(func): return nb.njit(template, parallel = True)(func) return jit_host self.jit_host = jit_host",
"is 0. .. math:: \\\\begin{align*} l \\\\cdot r &\\\\equiv \\\\langle l, r \\\\rangle\\\\\\\\",
"the spin projection operator in the z direction. spin : :obj:`numpy.ndarray` of :obj:`numpy.float64`",
"Matrix can be in :math:`\\\\mathbb{C}^{2\\\\times2}` or :math:`\\\\mathbb{C}^{3\\\\times3}`. Parameters: * **operator** (:class:`numpy.ndarray` of :class:`numpy.complex128`,",
"\\\\begin{align*} (LR)_{i,k} = \\\\sum_j (L)_{i,j} (R)_{j,k} \\\\end{align*} Parameters: * **left** (:class:`numpy.ndarray` of :class:`numpy.complex128`,",
"1] *= rotating_wave_winding[0] @jit_host(\"(float64, float64[:], float64[:], float64, float64, complex128[:, :, :])\", max_registers) def",
"1] = 0 operator[2, 1] = 0 operator[0, 2] = 0 operator[1, 2]",
"matrix to be filled with the result of the product. adjoint(operator) : :obj:`callable`",
"filled with the result of the product. adjoint(operator) : :obj:`callable` Takes the hermitian",
"== 1: time_index = cuda.grid(1) elif device_index == 1: time_index = roc.get_global_id(1) if",
"number of squares made by the matrix exponentiator, if :obj:`ExponentiationMethod.LIE_TROTTER` is chosen. threads_per_block",
"float64, float64[:, :], float64, complex128[:])\") def get_field_integration_magnus_cf4(sweep_parameter, time_fine, time_coarse, time_step_integration, field_sample, rotating_wave, rotating_wave_winding):",
"as described above. * **result** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The matrix",
"0]/2 field_sample[2, 1] = math.tau*time_step_integration*field_sample[0, 1]/2 field_sample[2, 2] = math.tau*time_step_integration*field_sample[0, 2]/2 if dimension",
"state_index) The state (wavefunction) of the spin system in the lab frame, for",
"if device_index == 0: for time_index in nb.prange(spin.shape[0]): if dimension == 2: spin[time_index,",
"left[1, 0]*right[0, 0] + left[1, 1]*right[1, 0] result[0, 1] = left[0, 0]*right[0, 1]",
"GPU occupancy, meaning more threads run concurrently, at the expense of fewer resgiters",
"3] = math.tau*time_step_integration*field_sample[1, 3]/2 append_exponentiation(field_sample[2, :], time_evolution_fine, time_evolution_coarse) get_field_integration = get_field_integration_half_step append_exponentiation_integration =",
"= operator[0, 1] result[1, 1] = operator[1, 1] @jit_device def set_to_one(operator): operator[0, 0]",
"\\\\frac{e^{-i\\\\left(Z + \\\\frac{Q}{3}\\\\right)}(c_X + c_Y - i s_Xs_Y)}{2} & \\\\frac{e^{i\\\\frac{2Q}{3}} (-s_Y -i c_Y",
"\"\"\" Use pure python interpreted code for the integrator, ie, don't compile the",
"run on a single CPU core. .. note :: To use this device",
"field_sample[2]/(2*precision) ez = math.cos(ez) + 1j*math.sin(ez) # eq = field_sample[3]/(6*precision) # eq =",
"& 0 \\\\end{pmatrix},& J_y &= \\\\frac{1}{\\\\sqrt{2}}\\\\begin{pmatrix} 0 & -i & 0 \\\\\\\\ i",
"s_Xs_Y)}{2} & \\\\frac{e^{i\\\\frac{2Q}{3}} (s_Y -i c_Y s_X)}{\\\\sqrt{2}} & \\\\frac{e^{-i\\\\left(-Z + \\\\frac{Q}{3}\\\\right)}(c_X + c_Y",
"3] = math.tau*time_step_integration*field_sample[0, 3]/2 append_exponentiation(field_sample[2, :], time_evolution_fine, time_evolution_coarse) field_sample[2, 0] = math.tau*time_step_integration*field_sample[1, 0]/2",
"elif device_index == 2: # temporary_group = roc.shared.array((threads_per_block, 2, 2), dtype = np.complex128)",
"to the approximate matrix (:math:`\\\\tau` above). \"\"\" def __init__(self, spin_quantum_number, device, threads_per_block): \"\"\"",
"+= 1 result[2, 2] += 1 # @jit_device # def matrix_exponential_lie_trotter(field_sample, result, trotter_cutoff):",
":])\") def append_exponentiation(field_sample, time_evolution_fine, time_evolution_coarse): if device_index == 0: time_evolution_old = np.empty((dimension, dimension),",
"rotating_wave, rotating_wave_winding) append_exponentiation_integration(time_evolution_fine, time_evolution_coarse[time_index, :], field_sample, time_step_integration, rotating_wave, rotating_wave_winding) time_fine += time_step_integration if",
"will integrate a spin half :obj:`SpinQuantumNumber.HALF`, or spin one :obj:`SpinQuantumNumber.ONE` quantum system. threads_per_block",
":: To use this device option, the user defined field function must be",
"= jit_device(get_field) if integration_method == IntegrationMethod.MAGNUS_CF4: @jit_device_template(\"(float64, float64, float64, float64, float64[:, :], float64,",
"c_Y - i s_Xs_Y)}{2} & \\\\frac{e^{i\\\\frac{2Q}{3}} (-s_Y -i c_Y s_X)}{\\\\sqrt{2}} & \\\\frac{e^{-i\\\\left(-Z +",
"+ conj(left[1])*right[1] + conj(left[2])*right[2] @jit_device def set_to(operator, result): result[0, 0] = operator[0, 0]",
"B) = \\\\lim_{c \\\\to \\\\infty} \\\\left(\\\\exp\\\\left(\\\\frac{1}{c}A\\\\right) \\\\exp\\\\left(\\\\frac{1}{c}B\\\\right)\\\\right)^c. **For spin half systems:** Assumes the",
"will be an optimal value of `max_registers` for each model of GPU running",
"= math.tau*time_step_integration*field_sample[0, 2]/2 if dimension > 2: field_sample[2, 3] = math.tau*time_step_integration*field_sample[0, 3]/2 append_exponentiation(field_sample[2,",
"0] = math.tau*time_step_integration*field_sample[0, 0] field_sample[0, 1] = math.tau*time_step_integration*field_sample[0, 1] field_sample[0, 2] = math.tau*time_step_integration*field_sample[0,",
"with .. math:: \\\\begin{align*} J_x &= \\\\frac{1}{\\\\sqrt{2}}\\\\begin{pmatrix} 0 & 1 & 0 \\\\\\\\",
"time for a given time series of a quantum state. Used to calculate",
"to use for matrix exponentiation in the integration algorithm. Defaults to :obj:`ExponentiationMethod.LIE_TROTTER` when",
"adjoint(operator, result): result[0, 0] = conj(operator[0, 0]) result[1, 0] = conj(operator[0, 1]) result[2,",
"to. set_to_one(operator) : :obj:`callable` Make a matrix the multiplicative identity, ie, :math:`1`. ..",
"1] + w1*field_sample[1, 1]) field_sample[2, 2] = math.tau*time_step_integration*(w0*field_sample[0, 2] + w1*field_sample[1, 2]) if",
"2] result[1, 2] = operator[1, 2] result[2, 2] = operator[2, 2] @jit_device def",
"`spin_quantum_number` is set to :obj:`SpinQuantumNumber.HALF`. See :obj:`ExponentiationMethod` for more details. use_rotating_frame : :obj:`bool`",
"result[0, 1] = sa*eq/(ez*ep) result[1, 1] = ca*(eq*eq*eq*eq) - 1 result[2, 1] =",
":obj:`float` The time step used within the integration algorithm. In units of s.",
"if dimension > 2: field_sample[2, 3] = math.tau*time_step_integration*(w1*field_sample[0, 3] + w0*field_sample[1, 3]) append_exponentiation(field_sample[2,",
"= (B)_{i, j} Parameters: * **operator** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The",
"result[1, 1] += 1 result[2, 2] += 1 # @jit_device # def matrix_exponential_lie_trotter(field_sample,",
"Use the stepwise time evolution operators in succession to find the quantum state",
"2]) if dimension > 2: field_sample[2, 3] = math.tau*time_step_integration*(w1*field_sample[0, 3] + w0*field_sample[1, 3])",
"in time using the JITed :obj:`callable` `spin_calculator`. spin_calculator : :obj:`callable` Calculates the expected",
"of the spin projection operator in the z direction. Returns ------- results :",
"the experiment is to finish at. Measured in s. The duration of the",
"math:: \\\\begin{align*} X &= \\\\frac{1}{2}2^{-\\\\tau}x,\\\\\\\\ Y &= \\\\frac{1}{2}2^{-\\\\tau}y,\\\\\\\\ Z &= \\\\frac{1}{2}2^{-\\\\tau}z,\\\\\\\\ c_{\\\\theta} &=",
"Python features: http://numba.pydata.org/numba-doc/latest/cuda/cudapysupported.html \"\"\" def __init__(self, value, index): super().__init__() self._value_ = value self.index",
"= get_time_evolution self.spin_calculator = spin_calculator def evaluate(self, sweep_parameter, time_start, time_end, time_step_integration, time_step_output, state_init):",
"cisz = math.cos(z) + 1j*math.sin(z) # result[0, 0] = (cx*cy - 1j*sx*sy)/cisz #",
"jit get_field function into a cuda device function.\\033[0m\\n\") raise time_evolution_coarse = time_evolution_coarse.copy_to_host() time",
"= (y - 1j*x)*s result[0, 1] = -(y + 1j*x)*s result[1, 1] =",
"compiling the integrator, where higher level objects like enums cannot be interpreted. \"\"\"",
"1]*right[1, 1] + left[1, 2]*right[2, 1] result[2, 1] = left[2, 0]*right[0, 1] +",
"= (state.shape[0] + (threads_per_block - 1)) // threads_per_block get_spin[blocks_per_grid, threads_per_block](cuda.to_device(state), spin) spin =",
"def append_exponentiation_integration_magnus_cf4(time_evolution_fine, time_evolution_coarse, field_sample, time_step_integration, rotating_wave, rotating_wave_winding): transform_frame(field_sample[0, :], rotating_wave, rotating_wave_winding[0]) transform_frame(field_sample[1, :],",
"self.spin_quantum_number.dimension), np.complex128) blocks_per_grid = (time.size + (self.threads_per_block - 1)) // self.threads_per_block try: self.get_time_evolution_raw[blocks_per_grid,",
"+ q J_q), \\\\end{align*} with .. math:: \\\\begin{align*} J_x &= \\\\frac{1}{\\\\sqrt{2}}\\\\begin{pmatrix} 0 &",
"+ 1j*math.sin(math.tau*rotating_wave*time_sample) time_sample += time_coarse get_field_jit(time_sample, sweep_parameter, field_sample[1, :]) @jit_device_template(\"(complex128[:, :], complex128[:, :],",
"= 0 operator[0, 1] = 0 operator[1, 1] = 0 operator[2, 1] =",
"result[0, 0] = conj(operator[0, 0]) result[1, 0] = conj(operator[0, 1]) result[0, 1] =",
"* **operator** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The matrix to set to",
"J_y + z J_z), \\\\end{align*} with .. math:: \\\\begin{align*} J_x &= \\\\frac{1}{2}\\\\begin{pmatrix} 0",
"(exponentiation_method == ExponentiationMethod.ANALYTIC) and (spin_quantum_number != SpinQuantumNumber.HALF): print(\"\\033[31mspinsim warning!!!\\n_attempting to use an analytic",
"float64, float64[:, :], float64, complex128[:])\") def get_field_integration_half_step(sweep_parameter, time_fine, time_coarse, time_step_integration, field_sample, rotating_wave, rotating_wave_winding):",
"The time offset that the experiment is to start at, and the time",
"2), dtype = np.complex128) elif device_index == 2: temporary_group = roc.shared.array((threads_per_block, 2, 2),",
"math.sqrt(2) sqrt3 = math.sqrt(3) class SpinQuantumNumber(Enum): \"\"\" Options for the spin quantum number",
"(start/end)) - The time offset that the experiment is to start at, and",
"J_z)\\\\\\\\ &= \\\\exp(2^{-\\\\tau}(-ix J_x - iy J_y - iz J_z))^{2^\\\\tau}\\\\\\\\ &\\\\approx (\\\\exp(-i(2^{-\\\\tau} x)",
"trapezoidal method. \"\"\" class ExponentiationMethod(Enum): \"\"\" The implementation to use for matrix exponentiation",
"bias values, by calling this method multiple times, each time varying `sweep_parameter`. time_start",
"self.jit_host = jit_host def jit_device(func): return roc.jit(device = True)(func) self.jit_device = jit_device def",
"= math.tau*time_step_integration*field_sample[0, 1] field_sample[0, 2] = math.tau*time_step_integration*field_sample[0, 2] if dimension > 2: field_sample[0,",
"field_sample, time_step_integration, rotating_wave, rotating_wave_winding): transform_frame(field_sample[0, :], rotating_wave, rotating_wave_winding[0]) transform_frame(field_sample[1, :], rotating_wave, rotating_wave_winding[1]) field_sample[2,",
"component of the field, which increases the accuracy of the output since the",
"= (field_sample[0] + 1j*field_sample[1])/rotating_wave_winding field_sample[0] = X.real field_sample[1] = X.imag field_sample[2] = field_sample[2]",
"the exponentiation is to be written to. matrix_exponential_lie_trotter(field_sample, result) : :obj:`callable` Calculates a",
"# cisz = math.cos(z - q/3) + 1j*math.sin(z - q/3) # result[0, 2]",
"def jit_device(func): return func self.jit_device = jit_device def jit_device_template(template): def jit_device_template(func): return func",
"of a rotating wave approximation, a technique used to get approximate analytic solutions",
"= transform_frame_lab get_field_jit = jit_device(get_field) if integration_method == IntegrationMethod.MAGNUS_CF4: @jit_device_template(\"(float64, float64, float64, float64,",
"- left[0]*right[2]) result[2] = conj(left[0]*right[1] - left[1]*right[0]) @jit_device def inner(left, right): return conj(left[0])*right[0]",
"J_x &= \\\\frac{1}{2}\\\\begin{pmatrix} 0 & 1 \\\\\\\\ 1 & 0 \\\\end{pmatrix},& J_y &=",
"meaning more threads run concurrently, at the expense of fewer resgiters being avaliable",
"sa = -1j*math.sin(a)/sqrt2 ez = field_sample[2]/(2*precision) ez = math.cos(ez) + 1j*math.sin(ez) eq =",
"vectors is 0. .. math:: \\\\begin{align*} l \\\\cdot r &\\\\equiv \\\\langle l, r",
"= field_sample[1]/precision # z = field_sample[2]/precision # q = field_sample[3]/precision # cx =",
"& 0 & -i \\\\\\\\ 0 & i & 0 \\\\end{pmatrix},\\\\\\\\ J_z &=",
"iz J_z - iq J_q))^{2^\\\\tau}\\\\\\\\ &\\\\approx (\\\\exp(-i(2^{-\\\\tau} x) J_x) \\\\exp(-i(2^{-\\\\tau} y) J_y) \\\\exp(-i(2^{-\\\\tau}",
"-\\\\Im(\\\\sqrt{2}\\\\psi_{0}(t)^*(\\\\psi_{+1}(t) - \\\\psi_{-1}(t))\\\\\\\\ |\\\\psi_{+1}(t)|^2 - |\\\\psi_{-1}(t)|^2 \\\\end{pmatrix} \\\\end{align*} Parameters ---------- state : :class:`numpy.ndarray`",
"\\\\end{align*} For spin one: .. math:: \\\\begin{align*} \\\\langle F\\\\rangle(t) = \\\\begin{pmatrix} \\\\Re(\\\\sqrt{2}\\\\psi_{0}(t)^*(\\\\psi_{+1}(t) +",
"Attributes ---------- spin_quantum_number : :obj:`SpinQuantumNumber` The option to select whether the simulator will",
"* **z** (:class:`numpy.complex128`) - The complex number to take the absolute value of.",
"to use for matrix exponentiation within the integrator. Parameters ---------- value : :obj:`str`",
"of a an evaluation of the integrator. Attributes ---------- time : :obj:`numpy.ndarray` of",
": :class:`numpy.ndarray` of :class:`numpy.complex128` (time_index, state_index) The state (wavefunction) of the spin system",
"Dimension of the hilbert space the states with this spin belong to. label",
"without the need for slow recompilation. For example, if the `sweep_parameter` is used",
"get_field_integration = get_field_integration_half_step append_exponentiation_integration = append_exponentiation_integration_half_step elif integration_method == IntegrationMethod.MIDPOINT_SAMPLE: @jit_device_template(\"(float64, float64, float64,",
"3, 3), dtype = np.complex128) temporary = temporary_group[roc.get_local_id(1), :, :] for power_index in",
"must have three arguments: * **time_sample** (:obj:`float`) - the time to sample the",
"for each specific GPU model. This means that if more registers are allocated",
"math.cos(eq) + 1j*math.sin(eq) # Ca = 1 # Sa = a/2 # ca",
"result[0] = conj(left[1]*right[2] - left[2]*right[1]) result[1] = conj(left[2]*right[0] - left[0]*right[2]) result[2] = conj(left[0]*right[1]",
"(time.size + (self.threads_per_block - 1)) // self.threads_per_block try: self.get_time_evolution_raw[blocks_per_grid, self.threads_per_block](sweep_parameter, time, time_end_points, time_step_integration,",
"use_rotating_frame: # Take out of rotating frame rotating_wave_winding[0] = math.cos(math.tau*rotating_wave*time_step_output) + 1j*math.sin(math.tau*rotating_wave*time_step_output) time_evolution_coarse[time_index,",
"> 0: if device_index == 1: time_index = cuda.grid(1) elif device_index == 1:",
"= conj(operator[2, 2]) @jit_device def matrix_exponential_analytic(field_sample, result, trotter_cutoff): pass @jit_device def matrix_exponential_lie_trotter(field_sample, result,",
"be calculated as .. math:: \\\\begin{align*} \\\\exp(A) &= \\\\exp(-ix J_x - iy J_y",
"= (2 + operator[0, 0])*operator[0, 0] + operator[0, 1]*operator[1, 0] result[1, 0] =",
"for a given time series of a quantum state. This :obj:`callable` is passed",
"field_sample[2]/(2*precision) # ez = 1 + 1j*ez # eq = field_sample[3]/(6*precision) # eq",
"= self.spin_calculator(self.state) setattr(self, attr_name, spin) return self.spin raise AttributeError(\"{} has no attribute called",
"J_z &= \\\\frac{1}{2}\\\\begin{pmatrix} 1 & 0 \\\\\\\\ 0 & -1 \\\\end{pmatrix} \\\\end{align*} Then",
"(1)) The time values for when the experiment is to start and finishes.",
"+ left[2, 2]*right[2, 2] @jit_device def matrix_square_residual(operator, result): result[0, 0] = (2 +",
"operator[2, 1] result[0, 2] = operator[0, 2] result[1, 2] = operator[1, 2] result[2,",
"time_end_points, time_evolution_coarse, sweep_parameter) return @jit_host(\"(complex128[:, :], float64[:, :])\", max_registers = max_registers) def get_spin(state,",
"left[0, 1]*right[1, 1] + left[0, 2]*right[2, 1] result[1, 1] = left[1, 0]*right[0, 1]",
"state. Parameters ---------- state : :obj:`numpy.ndarray` of :obj:`numpy.complex128` (time_index, magnetic_quantum_number) The quantum state",
"r \\\\rangle\\\\\\\\ l \\\\cdot r &= \\\\sum_i (l_i)^* r_i \\\\end{align*} Parameters: * **left**",
"of the spin projection operator in the z direction. spin : :obj:`numpy.ndarray` of",
"(time_index, y_index, x_index)) - The evaluated time evolution operator between each time step.",
"= np.complex128) # elif device_index == 2: # temporary_group = roc.shared.array((threads_per_block, 2, 2),",
"spatial_direction) The expected spin projection (Bloch vector) over time. \"\"\" if device.index ==",
"GPU target devices :obj:`Device.CUDA` (:obj:`Device.ROC`). Defaults to 64. Modifying might be able to",
"threads_per_block) conj = utilities.conj complex_abs = utilities.complex_abs norm2 = utilities.norm2 inner = utilities.inner",
"increase performance for your GPU. See `Achieved Occupancy`_ for Nvidia's official explanation. \"\"\"",
"0] result[0, 1] = left[0, 0]*right[0, 1] + left[0, 1]*right[1, 1] result[1, 1]",
"0] result[1, 0] = left[1, 0]*right[0, 0] + left[1, 1]*right[1, 0] result[0, 1]",
"options and more details. get_time_evolution_raw : :obj:`callable` The internal function for evaluating the",
"1]*operator[1, 2] + operator[0, 2]*operator[2, 2] result[1, 2] = operator[1, 0]*operator[0, 2] +",
"index): super().__init__() self._value_ = value self.index = index ANALYTIC = (\"analytic\", 0) \"\"\"",
"np.complex128) time_evolution_old = time_evolution_old_group[roc.get_local_id(1), :, :] # Calculate the exponential if exponentiation_method_index ==",
"bra_state_index, ket_state_index) Time evolution operator (matrix) between the current and next timesteps, for",
"left[1, 0]*right[0, 2] + left[1, 1]*right[1, 2] + left[1, 2]*right[2, 2] result[2, 2]",
"run. For example, it is used to sweep over dressing frequencies during the",
"It must have three arguments: * **time_sample** (:obj:`float`) - the time to sample",
"matrix_exponential_lie_trotter(field_sample, result, trotter_cutoff): # hyper_cube_amount = math.ceil(trotter_cutoff/2) # if hyper_cube_amount < 0: #",
"time_fine, time_coarse[time_index], time_step_integration, field_sample, rotating_wave, rotating_wave_winding) append_exponentiation_integration(time_evolution_fine, time_evolution_coarse[time_index, :], field_sample, time_step_integration, rotating_wave, rotating_wave_winding)",
"quantum state of the spin system over time. Parameters ---------- sweep_parameter : :obj:`float`",
"time_evolution_fine = cuda.local.array((dimension, dimension), dtype = np.complex128) field_sample = cuda.local.array((sample_index_max, lie_dimension), dtype =",
"systems. \"\"\" ONE = (1, 3, \"one\") \"\"\" For three level systems. \"\"\"",
"step. The equivalent of the trapezoidal method. \"\"\" class ExponentiationMethod(Enum): \"\"\" The implementation",
"left[0]*right[2]) result[2] = conj(left[0]*right[1] - left[1]*right[0]) @jit_device def inner(left, right): return conj(left[0])*right[0] +",
"integrator will on average take smaller steps. .. note :: The use of",
"A reference number, used when compiling the integrator, where higher level objects like",
"available for use with spin half systems. Will not work with spin one",
"r y /= r z /= r c = math.cos(r/2) s = math.sin(r/2)",
"Measured in s. * **time_step_output** (:obj:`float`) - The sample resolution of the output",
"used for testing. Note that one extra register per thread is always added",
"and the output state in given out of the rotating frame. One can,",
":class:`numpy.complex128`, (y_index, x_index)) - An array to write the resultant adjoint to. matrix_exponential_analytic(field_sample,",
"Y &= 2^{-\\\\tau}y,\\\\\\\\ Z &= 2^{-\\\\tau}z,\\\\\\\\ Q &= 2^{-\\\\tau}q,\\\\\\\\ c_{\\\\theta} &= \\\\cos(\\\\theta),\\\\\\\\ s_{\\\\theta}",
"= np.empty((state.shape[0], 3), np.float64) get_spin(state, spin) elif device == Device.CUDA: spin = cuda.device_array((state.shape[0],",
"is referenced by the user. Parameters: * **state** (:obj:`numpy.ndarray` of :obj:`numpy.complex128` (time_index, magnetic_quantum_number))",
"that `spinsim` was designed for. * **field_sample** (:class:`numpy.ndarray` of :class:`numpy.float64` (spatial_index)) the returned",
"roc.shared.array((threads_per_block, 3, 3), dtype = np.complex128) # temporary = temporary_group[roc.get_local_id(1), :, :] #",
"1 & 0 \\\\end{pmatrix},& J_y &= \\\\frac{1}{\\\\sqrt{2}}\\\\begin{pmatrix} 0 & -i & 0 \\\\\\\\",
"def jit_device_template(func): return cuda.jit(template, device = True, inline = True)(func) return jit_device_template self.jit_device_template",
"# ez = 1 + 1j*ez # eq = field_sample[3]/(6*precision) # eq =",
"1]*right[1, 0] + left[1, 2]*right[2, 0] result[2, 0] = left[2, 0]*right[0, 0] +",
":obj:`SpinQuantumNumber.HALF`, or spin one :obj:`SpinQuantumNumber.ONE` quantum system. threads_per_block : :obj:`int` The size of",
"x_index)) - An array to write the resultant adjoint to. matrix_exponential_analytic(field_sample, result) :",
"(:class:`numpy.ndarray` of :class:`numpy.float64` (spatial_index)) the returned value of the field. This is a",
"result[0, 1] = Sa/ep result[1, 1] = Ca*ez - 1 if device_index ==",
"= Ca/ez - 1 result[1, 0] = Sa*ep result[0, 1] = Sa/ep result[1,",
"1] + left[2, 1]*right[1, 1] + left[2, 2]*right[2, 1] result[0, 2] = left[0,",
"core. .. note :: To use this device option, the user defined field",
"self.get_time_evolution_raw = get_time_evolution self.spin_calculator = spin_calculator def evaluate(self, sweep_parameter, time_start, time_end, time_step_integration, time_step_output,",
"0 & -1 \\\\end{pmatrix} \\\\end{align*} Then the exponential can be calculated as ..",
"`time_coarse`. In units of s. Determines the sample rate of the outputs `time_coarse`",
"= math.cos(x) # sx = math.sin(x) # cy = math.cos(y) # sy =",
"for all options and more details. get_time_evolution_raw : :obj:`callable` The internal function for",
"&\\\\equiv \\\\langle l, r \\\\rangle\\\\\\\\ l \\\\cdot r &= \\\\sum_i (l_i)^* r_i \\\\end{align*}",
"# z = field_sample[2]/precision # q = field_sample[3]/precision # cx = math.cos(x) #",
"1 @jit_device def set_to_zero(operator): operator[0, 0] = 0 operator[1, 0] = 0 operator[2,",
"+ q/3) # result[0, 0] = 0.5*cisz*(cx + cy - 1j*sx*sy) # result[1,",
"the states with this spin belong to. label : :obj:`str` A text label",
"return results @staticmethod @nb.njit def get_state(state_init, state, time_evolution): \"\"\" Use the stepwise time",
"True)(func) self.jit_device = jit_device def jit_device_template(template): def jit_device_template(func): return cuda.jit(template, device = True,",
"compile the integrator to run on a single CPU core. .. note ::",
"the user. Modifies the field function so the integrator can be used for",
"step for time_fine_index in range(math.floor(time_step_output/time_step_integration + 0.5)): get_field_integration(sweep_parameter, time_fine, time_coarse[time_index], time_step_integration, field_sample, rotating_wave,",
"inner(left, right) : :obj:`callable` The inner (maths convention dot) product between two complex",
"of. Returns * **az** (:class:`numpy.float64`) - The absolute value of z. norm2(z) :",
"if more registers are allocated than are available for the GPU model, the",
"or spin one :obj:`SpinQuantumNumber.ONE` quantum system. threads_per_block : :obj:`int` The size of each",
"experiment is to start at, and the time that the experiment is to",
"* **operator** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The matrix to copy from.",
"each element of `time_coarse`. In units of s. Determines the sample rate of",
"1 result[1, 1] += 1 result[2, 2] += 1 # @jit_device # def",
"index ANALYTIC = (\"analytic\", 0) \"\"\" Analytic expression of the matrix exponential. For",
"thread is always added to the number specified for control, so really this",
"= operator[0, 0] result[1, 0] = operator[1, 0] result[2, 0] = operator[2, 0]",
"cisz*(sy - 1j*cy*sx)/sqrt2 # cisz = math.cos(z - q/3) + 1j*math.sin(z - q/3)",
"and spin calculation functions of the simulator. Parameters ---------- get_field : :obj:`callable` A",
"(:obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index)) - The times that `state` was evaluated at. *",
"operator[1, 0] = 0 operator[2, 0] = 0 operator[0, 1] = 0 operator[1,",
"state[time_index, 2].real**2 - state[time_index, 2].imag**2 elif device_index > 0: if device_index == 1:",
":math:`0`. .. math:: \\\\begin{align*} (A)_{i, j} = 0 \\\\end{align*} Parameters: * **operator** (:class:`numpy.ndarray`",
"conj(operator[2, 1]) result[2, 2] = conj(operator[2, 2]) @jit_device def matrix_exponential_analytic(field_sample, result, trotter_cutoff): pass",
"64. Modifying might be able to increase execution time for different GPU models.",
":], rotating_wave, rotating_wave_winding[0]) transform_frame(field_sample[1, :], rotating_wave, rotating_wave_winding[1]) w0 = (1.5 + sqrt3)/6 w1",
"and `time_evolution_coarse`. time_evolution_coarse : :class:`numpy.ndarray` of :class:`numpy.complex128` (time_index, bra_state_index, ket_state_index) Time evolution operator",
"This function must be compilable for the device that the integrator is being",
":class:`numpy.complex128`, (y_index, x_index)) - The matrix to left multiply by. * **right** (:class:`numpy.ndarray`",
"(2j*conj(state[time_index, 1])*(state[time_index, 0] - state[time_index, 2])/sqrt2).real spin[time_index, 2] = state[time_index, 0].real**2 + state[time_index,",
"s_Y)}{\\\\sqrt{2}} \\\\\\\\ \\\\frac{e^{-i\\\\left(Z + \\\\frac{Q}{3}\\\\right)}(c_X - c_Y - i s_Xs_Y)}{2} & \\\\frac{e^{i\\\\frac{2Q}{3}} (s_Y",
"for some information. state : :obj:`numpy.ndarray` of :obj:`numpy.complex128` (time_index, magnetic_quantum_number) The evaluated quantum",
"operator[1, 0]*operator[0, 0] + (2 + operator[1, 1])*operator[1, 0] result[0, 1] = (2",
"time_coarse get_field_jit(time_sample, sweep_parameter, field_sample[1, :]) @jit_device_template(\"(complex128[:, :], complex128[:, :], float64[:, :], float64, float64,",
"left[1, 2]*right[2, 1] result[2, 1] = left[2, 0]*right[0, 1] + left[2, 1]*right[1, 1]",
"jit_device def jit_device_template(template): def jit_device_template(func): return roc.jit(template, device = True)(func) return jit_device_template self.jit_device_template",
"time_sample += time_coarse get_field_jit(time_sample, sweep_parameter, field_sample[0, :]) time_sample = time_fine + time_step_integration -",
"half. Switching to a Lie Trotter method.\\033[0m\") exponentiation_method = ExponentiationMethod.LIE_TROTTER exponentiation_method_index = 1",
"result[2, 0] = -((Sa*ep/eq)*(Sa*ep/eq)) result[0, 1] = sa*eq/(ez*ep) result[1, 1] = ca*(eq*eq*eq*eq) -",
"complex128)\") def transform_frame_spin_half_rotating(field_sample, rotating_wave, rotating_wave_winding): X = (field_sample[0] + 1j*field_sample[1])/(rotating_wave_winding**2) field_sample[0] = X.real",
"complex128[:, :, :], float64)\") def get_time_evolution_loop(time_index, time_coarse, time_step_output, time_step_integration, time_end_points, time_evolution_coarse, sweep_parameter): #",
"State = time evolution * previous state for x_index in nb.prange(state.shape[1]): state[time_index, x_index]",
"0] result[1, 0] = left[1, 0]*right[0, 0] + left[1, 1]*right[1, 0] + left[1,",
"for when the experiment is to start and finishes. In units of s.",
"# Take out of rotating frame rotating_wave_winding[0] = math.cos(math.tau*rotating_wave*time_step_output) + 1j*math.sin(math.tau*rotating_wave*time_step_output) time_evolution_coarse[time_index, 0,",
"\"\"\" Compiles the integrator and spin calculation functions of the simulator. Parameters ----------",
"Options for the spin quantum number of a system. Parameters ---------- value :",
"- 1j*z.imag) @jit_device def complex_abs(z): return math.sqrt(z.real**2 + z.imag**2) if spin_quantum_number == SpinQuantumNumber.HALF:",
"/= rotating_wave_winding[0] if dimension > 2: time_evolution_coarse[time_index, 0, 2] /= rotating_wave_winding[0] time_evolution_coarse[time_index, 2,",
"set_to_zero(operator): operator[0, 0] = 0 operator[1, 0] = 0 operator[2, 0] = 0",
"= math.tau*time_step_integration*(w0*field_sample[0, 3] + w1*field_sample[1, 3]) append_exponentiation(field_sample[2, :], time_evolution_fine, time_evolution_coarse) field_sample[2, 0] =",
"\\|a + ib\\|_2 = \\\\sqrt {\\\\left(\\\\sum_i a_i^2 + b_i^2\\\\right)} Parameters: * **z** (:class:`numpy.ndarray`",
"+= time_coarse get_field_jit(time_sample, sweep_parameter, field_sample[0, :]) @jit_device_template(\"(complex128[:, :], complex128[:, :], float64[:, :], float64,",
"# x = field_sample[0]/precision # y = field_sample[1]/precision # z = field_sample[2]/precision #",
"time evolution opperator. Parameters ---------- sweep_parameter : :obj:`float` time_coarse : :class:`numpy.ndarray` of :class:`numpy.float64`",
"@jit_device_template(\"(int64, float64[:], float64, float64, float64[:], complex128[:, :, :], float64)\") def get_time_evolution_loop(time_index, time_coarse, time_step_output,",
"complex128[:])\") def append_exponentiation_integration_magnus_cf4(time_evolution_fine, time_evolution_coarse, field_sample, time_step_integration, rotating_wave, rotating_wave_winding): transform_frame(field_sample[0, :], rotating_wave, rotating_wave_winding[0]) transform_frame(field_sample[1,",
"parallel. .. note :: To use this device option, the user defined field",
"math.cos(eq) + 1j*math.sin(eq) result[0, 0] = Ca/ez - 1 result[1, 0] = Sa*ep",
"X.imag field_sample[2] = field_sample[2] - 2*rotating_wave transform_frame = transform_frame_spin_half_rotating else: @jit_device_template(\"(float64[:], float64, complex128)\")",
"with the result of the product. adjoint(operator) : :obj:`callable` Takes the hermitian adjoint",
"except: print(\"\\033[31mspinsim error: numba.cuda could not jit get_field function into a cuda device",
"\"\"\" The results of a an evaluation of the integrator. Attributes ---------- time",
"&= \\\\begin{pmatrix} \\\\cos(\\\\frac{r}{2}) - i\\\\frac{z}{r}\\\\sin(\\\\frac{r}{2}) & -\\\\frac{y + ix}{r}\\\\sin(\\\\frac{r}{2})\\\\\\\\ \\\\frac{y - ix}{r}\\\\sin(\\\\frac{r}{2}) &",
"value of the field. This is a four dimensional vector, with the first",
":math:`r = \\\\sqrt{x^2 + y^2 + z^2}`. Parameters: * **field_sample** (:class:`numpy.ndarray` of :class:`numpy.float64`,",
"\"\"\" self.time = time self.time_evolution = time_evolution self.state = state self.spin_calculator = spin_calculator",
"# from . import utilities from enum import Enum import numpy as np",
"frame, using the rating wave approximation: just define `get_field()` with field functions that",
"roc.shared.array((threads_per_block, 3, 3), dtype = np.complex128) temporary = temporary_group[roc.get_local_id(1), :, :] for power_index",
"q for spin one) respectively, as described above. * **result** (:class:`numpy.ndarray` of :class:`numpy.complex128`,",
"field_sample[2, 2] = math.tau*time_step_integration*field_sample[1, 2]/2 if dimension > 2: field_sample[2, 3] = math.tau*time_step_integration*field_sample[1,",
"math:: \\|a + ib\\|_2 = \\\\sqrt {\\\\left(\\\\sum_i a_i^2 + b_i^2\\\\right)} Parameters: * **z**",
"+ 0.5*time_step_integration*(1 - 1/sqrt3)) - time_coarse) rotating_wave_winding[0] = math.cos(math.tau*rotating_wave*time_sample) + 1j*math.sin(math.tau*rotating_wave*time_sample) time_sample +=",
"0: # hyper_cube_amount = 0 # precision = 4**hyper_cube_amount # x = field_sample[0]/precision",
"expense of fewer resgiters being avaliable to each thread, meaning slower memory must",
"0 \\\\end{pmatrix},& J_z &= \\\\frac{1}{2}\\\\begin{pmatrix} 1 & 0 \\\\\\\\ 0 & -1 \\\\end{pmatrix}",
"meaning slower memory must be used. Thus, there will be an optimal value",
"This means that if more registers are allocated than are available for the",
": :obj:`callable` The internal function for evaluating the time evolution operator in parallel.",
"2] = operator[1, 0]*operator[0, 2] + (2 + operator[1, 1])*operator[1, 2] + operator[1,",
"copy from. * **result** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The matrix to",
"dtype = np.complex128) rotating_wave_winding = rotating_wave_winding_group[roc.get_local_id(1), :] time_coarse[time_index] = time_end_points[0] + time_step_output*time_index time_fine",
":obj:`callable` Make a matrix the additive identity, ie, :math:`0`. .. math:: \\\\begin{align*} (A)_{i,",
"running on the GPU target devices :obj:`Device.CUDA` (:obj:`Device.ROC`). Defaults to 64. Modifying might",
"a/precision Ca = math.cos(a/2) Sa = -1j*math.sin(a/2) ez = field_sample[2]/(2*precision) ez = math.cos(ez)",
"&\\\\equiv A^H\\\\\\\\ (A^\\\\dagger)_{y,x} &= ((A)_{x,y})^* \\\\end{align*} Matrix can be in :math:`\\\\mathbb{C}^{2\\\\times2}` or :math:`\\\\mathbb{C}^{3\\\\times3}`.",
"if device_index == 1: time_index = cuda.grid(1) elif device_index == 1: time_index =",
"to compile the integrator to run on a single CPU core. .. note",
"self.norm2 = norm2 self.inner = inner self.set_to = set_to self.set_to_one = set_to_one self.set_to_zero",
"for. In units of s. This is an output, so use an empty",
"(l_i)^* r_i \\\\end{align*} Parameters: * **left** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (index)) - The vector",
"each thread block (workgroup), in terms of the number of threads (workitems) they",
"float64[:, :], float64, complex128[:])\") def get_field_integration_half_step(sweep_parameter, time_fine, time_coarse, time_step_integration, field_sample, rotating_wave, rotating_wave_winding): time_sample",
"+ operator[2, 1]*operator[1, 2] + (2 + operator[2, 2])*operator[2, 2] @jit_device def adjoint(operator,",
"text label that can be used for archiving. \"\"\" MAGNUS_CF4 = \"magnus_cf4\" \"\"\"",
"roc.device_array(time_index_max, np.float64) time_evolution_coarse = roc.device_array((time_index_max, self.spin_quantum_number.dimension, self.spin_quantum_number.dimension), np.complex128) blocks_per_grid = (time.size + (self.threads_per_block",
"# temporary_group = roc.shared.array((threads_per_block, 3, 3), dtype = np.complex128) # temporary = temporary_group[roc.get_local_id(1),",
"a > 0: ep = (field_sample[0] + 1j*field_sample[1])/a else: ep = 1 a",
"iy J_y - iz J_z)\\\\\\\\ &= \\\\begin{pmatrix} \\\\cos(\\\\frac{r}{2}) - i\\\\frac{z}{r}\\\\sin(\\\\frac{r}{2}) & -\\\\frac{y +",
"ez = field_sample[2]/(2*precision) ez = math.cos(ez) + 1j*math.sin(ez) eq = field_sample[3]/(6*precision) eq =",
"field, which increases the accuracy of the output since the integrator will on",
"integration_method == IntegrationMethod.HALF_STEP: sample_index_max = 3 sample_index_end = 4 elif integration_method == IntegrationMethod.MIDPOINT_SAMPLE:",
"or :math:`\\\\mathbb{C}^{3\\\\times3}`. Parameters: * **operator** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The operator",
"1]*operator[1, 0] + (2 + operator[2, 2])*operator[2, 0] result[0, 1] = (2 +",
"y and z respectively, as described above. * **result** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index,",
"== Device.ROC: spin = roc.device_array((state.shape[0], 3), np.float64) blocks_per_grid = (state.shape[0] + (threads_per_block -",
"0 result[0, 1] = 0 result[1, 1] = 1 @jit_device def matrix_exponential_lie_trotter(field_sample, result,",
"(:class:`numpy.ndarray` of :class:`numpy.complex128`, (index)) - The vector to left multiply in the inner",
"= None self.get_spin_raw = None try: self.compile_time_evolver(get_field, spin_quantum_number, device, use_rotating_frame, integration_method, exponentiation_method, trotter_cutoff,",
"== 3: @jit_device_template(\"(float64[:], float64, complex128)\") def transform_frame_spin_one_rotating(field_sample, rotating_wave, rotating_wave_winding): X = (field_sample[0] +",
":obj:`IntegrationMethod` for more details. trotter_cutoff : :obj:`int` The number of squares made by",
"option is set to :obj:`True` - no such approximations are made, and the",
"== 1: time_index = roc.get_global_id(1) if time_index < spin.shape[0]: if dimension == 2:",
"* **d** (:class:`numpy.complex128`) - The inner product of l and r. set_to(operator, result)",
"numba.cuda could not jit get_field function into a cuda device function.\\033[0m\\n\") raise time_evolution_coarse",
"The vector to left multiply in the inner product. * **right** (:class:`numpy.ndarray` of",
"+ left[1, 2]*right[2, 0] result[2, 0] = left[2, 0]*right[0, 0] + left[2, 1]*right[1,",
"if a > 0: ep = (field_sample[0] + 1j*field_sample[1])/a else: ep = 1",
"sampling the field from the end of the time step. The equivalent of",
"z^2}`. Parameters: * **field_sample** (:class:`numpy.ndarray` of :class:`numpy.float64`, (y_index, x_index)) - The values of",
"on a single CPU core. .. note :: To use this device option,",
"outputs `time_coarse` and `time_evolution_coarse`. time_evolution_coarse : :class:`numpy.ndarray` of :class:`numpy.complex128` (time_index, bra_state_index, ket_state_index) Time",
"being compiled for. See :class:`Device` for more information and links. spin_quantum_number : :obj:`SpinQuantumNumber`",
"if exponentiation_method_index == 0: matrix_exponential_analytic(field_sample, time_evolution_fine) elif exponentiation_method_index == 1: matrix_exponential_lie_trotter(field_sample, time_evolution_fine, trotter_cutoff)",
"= X.real field_sample[1] = X.imag field_sample[2] = field_sample[2] - rotating_wave transform_frame = transform_frame_spin_one_rotating",
"= left[1, 0]*right[0, 0] + left[1, 1]*right[1, 0] + left[1, 2]*right[2, 0] result[2,",
"The number of squares to make to the approximate matrix (:math:`\\\\tau` above). \"\"\"",
"= math.cos(math.tau*rotating_wave*time_sample) + 1j*math.sin(math.tau*rotating_wave*time_sample) time_sample += time_coarse get_field_jit(time_sample, sweep_parameter, field_sample[1, :]) @jit_device_template(\"(complex128[:, :],",
"for the chosen target device on construction of the object. Attributes ---------- conj(z)",
": :obj:`int` The number of squares made by the matrix exponentiator, if :obj:`ExponentiationMethod.LIE_TROTTER`",
"@jit_device_template(\"(float64, float64, float64, float64, float64[:, :], float64, complex128[:])\") def get_field_integration_midpoint(sweep_parameter, time_fine, time_coarse, time_step_integration,",
"1j*z.imag) @jit_device def complex_abs(z): return math.sqrt(z.real**2 + z.imag**2) if spin_quantum_number == SpinQuantumNumber.HALF: @jit_device",
"= value self.dimension = dimension self.label = label HALF = (1/2, 2, \"half\")",
"GPU, for each specific GPU model. This means that if more registers are",
"dtype = np.complex128) field_sample = np.empty((sample_index_max, lie_dimension), dtype = np.float64) rotating_wave_winding = np.empty(sample_index_end,",
"2] = sa*eq*ez/ep result[2, 2] = (Ca*ez/eq)*(Ca*ez/eq) - 1 if device_index == 0:",
"math.tau*time_step_integration*field_sample[0, 3]/2 append_exponentiation(field_sample[2, :], time_evolution_fine, time_evolution_coarse) field_sample[2, 0] = math.tau*time_step_integration*field_sample[1, 0]/2 field_sample[2, 1]",
"x_index in nb.prange(state.shape[1]): state[time_index, x_index] = 0 if time_index > 0: for z_index",
":], time_evolution_fine, time_evolution_coarse) get_field_integration = get_field_integration_magnus_cf4 append_exponentiation_integration = append_exponentiation_integration_magnus_cf4 elif integration_method == IntegrationMethod.HALF_STEP:",
"value of. Returns * **az** (:class:`numpy.float64`) - The absolute value of z. norm2(z)",
"# cisz = math.cos(z + q/3) - 1j*math.sin(z + q/3) # result[0, 0]",
"+ z J_z), \\\\end{align*} with .. math:: \\\\begin{align*} J_x &= \\\\frac{1}{2}\\\\begin{pmatrix} 0 &",
"the end of the time step. The equivalent of the trapezoidal method. \"\"\"",
"used for many experiments, without the need for slow recompilation. For example, if",
"0] = left[1, 0]*right[0, 0] + left[1, 1]*right[1, 0] + left[1, 2]*right[2, 0]",
"each time step. See :ref:`architecture` for some information. state : :obj:`numpy.ndarray` of :obj:`numpy.complex128`",
"1j*sx*sy)*cisz # if device_index == 0: # temporary = np.empty((2, 2), dtype =",
"sa*eq*ez*ep result[0, 2] = -((Sa*eq/ep)*(Sa*eq/ep)) result[1, 2] = sa*eq*ez/ep result[2, 2] = (Ca*ez/eq)*(Ca*ez/eq)",
"is to finish at. Measured in s. * **time_step_integration** (:obj:`float`) - The integration",
"== 1: # temporary = cuda.local.array((2, 2), dtype = np.complex128) # elif device_index",
"the exponent is an imaginary linear combination of a subspace of :math:`\\\\mathfrak{su}(3)`, being,",
"x_index)) - The matrix to copy to. set_to_one(operator) : :obj:`callable` Make a matrix",
"Take out of rotating frame rotating_wave_winding[0] = math.cos(math.tau*rotating_wave*time_step_output) + 1j*math.sin(math.tau*rotating_wave*time_step_output) time_evolution_coarse[time_index, 0, 0]",
"over time. Parameters ---------- sweep_parameter : :obj:`float` The input to the `get_field` function",
"threads_per_block = 64, max_registers = 63): \"\"\" .. _Achieved Occupancy: https://docs.nvidia.com/gameworks/content/developertools/desktop/analysis/report/cudaexperiments/kernellevel/achievedoccupancy.htm Parameters ----------",
"Q &= 2^{-\\\\tau}q,\\\\\\\\ c_{\\\\theta} &= \\\\cos(\\\\theta),\\\\\\\\ s_{\\\\theta} &= \\\\sin(\\\\theta). \\\\end{align*} Once :math:`T` is",
"& \\\\frac{e^{i\\\\frac{2Q}{3}} (-s_Y -i c_Y s_X)}{\\\\sqrt{2}} & \\\\frac{e^{-i\\\\left(-Z + \\\\frac{Q}{3}\\\\right)}(c_X - c_Y +",
"np.empty((time_index_max, self.spin_quantum_number.dimension, self.spin_quantum_number.dimension), np.complex128) self.get_time_evolution_raw(sweep_parameter, time, time_end_points, time_step_integration, time_step_output, time_evolution_coarse) elif self.device ==",
"def cross(left, right, result): result[0] = conj(left[1]*right[2] - left[2]*right[1]) result[1] = conj(left[2]*right[0] -",
"spin = roc.device_array((state.shape[0], 3), np.float64) blocks_per_grid = (state.shape[0] + (threads_per_block - 1)) //",
"\\\\begin{align*} \\\\langle F\\\\rangle(t) = \\\\begin{pmatrix} \\\\Re(\\\\sqrt{2}\\\\psi_{0}(t)^*(\\\\psi_{+1}(t) + \\\\psi_{-1}(t))\\\\\\\\ -\\\\Im(\\\\sqrt{2}\\\\psi_{0}(t)^*(\\\\psi_{+1}(t) - \\\\psi_{-1}(t))\\\\\\\\ |\\\\psi_{+1}(t)|^2 -",
"self.threads_per_block](sweep_parameter, time, time_end_points, time_step_integration, time_step_output, time_evolution_coarse) except: print(\"\\033[31mspinsim error: numba.roc could not jit",
"(y_index, x_index)) - The matrix to copy from. * **result** (:class:`numpy.ndarray` of :class:`numpy.complex128`,",
"T^{2^\\\\tau}, \\\\end{align*} with .. math:: \\\\begin{align*} X &= 2^{-\\\\tau}x,\\\\\\\\ Y &= 2^{-\\\\tau}y,\\\\\\\\ Z",
"it is referenced by the user. Parameters: * **state** (:obj:`numpy.ndarray` of :obj:`numpy.complex128` (time_index,",
"result[2, 1] = cisz*(sy - 1j*cy*sx)/sqrt2 # cisz = math.cos(z - q/3) +",
"np.empty(sample_index_end, dtype = np.complex128) elif device_index == 1: time_evolution_fine = cuda.local.array((dimension, dimension), dtype",
":obj:`float` The time difference between each element of `time_coarse`. In units of s.",
"c_X c_Y & \\\\frac{e^{-i(Z - \\\\frac{Q}{3})} (-i s_X - c_X s_Y)}{\\\\sqrt{2}} \\\\\\\\ \\\\frac{e^{-i\\\\left(Z",
"that if more registers are allocated than are available for the GPU model,",
"\"\"\" CUDA = (\"cuda\", 1) \"\"\" Use the :func:`numba.cuda.jit()` LLVM compiler to compile",
"stepwise time evolution opperator. Parameters ---------- sweep_parameter : :obj:`float` time_coarse : :class:`numpy.ndarray` of",
"= conj(operator[2, 1]) result[2, 2] = conj(operator[2, 2]) @jit_device def matrix_exponential_analytic(field_sample, result, trotter_cutoff):",
"y and z (and q for spin one) respectively, as described above. *",
"0]*right[0, 1] + left[0, 1]*right[1, 1] result[1, 1] = left[1, 0]*right[0, 1] +",
"hyper_cube_amount = 0 # precision = 4**hyper_cube_amount # x = field_sample[0]/precision # y",
"return jit_host self.jit_host = jit_host def jit_device(func): return nb.njit()(func) self.jit_device = jit_device def",
"dimension > 2: field_sample[2, 3] = math.tau*time_step_integration*field_sample[1, 3]/2 append_exponentiation(field_sample[2, :], time_evolution_fine, time_evolution_coarse) get_field_integration",
"integration. That is, whether the integrator is compiled for a CPU or GPU.",
"cuda.local.array((2, 2), dtype = np.complex128) elif device_index == 2: temporary_group = roc.shared.array((threads_per_block, 2,",
"some information. spin_calculator : :obj:`callable` Calculates the expected spin projection (Bloch vector) over",
"conj = utilities.conj complex_abs = utilities.complex_abs norm2 = utilities.norm2 inner = utilities.inner set_to",
"details. get_time_evolution_raw : :obj:`callable` The internal function for evaluating the time evolution operator",
"maximum number for the whole GPU, for each specific GPU model. This means",
"field_sample[2]/(2*precision) ez = math.cos(ez) + 1j*math.sin(ez) eq = field_sample[3]/(6*precision) eq = math.cos(eq) +",
"Defaults to :obj:`ExponentiationMethod.LIE_TROTTER` when `spin_quantum_number` is set to :obj:`SpinQuantumNumber.ONE`, and defaults to :obj:`ExponentiationMethod.ANALYTIC`",
"multiple times, each time varying `sweep_parameter`. time_start : :obj:`float` The time offset that",
"set_to_zero self.matrix_multiply = matrix_multiply self.adjoint = adjoint self.matrix_exponential_analytic = matrix_exponential_analytic self.matrix_exponential_lie_trotter = matrix_exponential_lie_trotter",
"= int((time_end_points[1] - time_end_points[0])/time_step_output) if self.device.index == 0: time = np.empty(time_index_max, np.float64) time_evolution_coarse",
"Utilities: \"\"\" A on object that contains definitions of all of the device",
"jit_device_template = device.jit_device_template device_index = device.index dimension = spin_quantum_number.dimension lie_dimension = dimension +",
"expression of the matrix exponential. For spin half :obj:`SpinQuantumNumber.HALF` systems only. \"\"\" LIE_TROTTER",
"to the :obj:`Results` object returned from :func:`Simulator.evaluate()`, and is executed there just in",
"iz J_z)\\\\\\\\ &= \\\\begin{pmatrix} \\\\cos(\\\\frac{r}{2}) - i\\\\frac{z}{r}\\\\sin(\\\\frac{r}{2}) & -\\\\frac{y + ix}{r}\\\\sin(\\\\frac{r}{2})\\\\\\\\ \\\\frac{y -",
"+ is_Xc_Y) e^{iZ} \\\\\\\\ (c_Xs_Y - is_Xc_Y) e^{-iZ} & (c_Xc_Y + is_Xs_Y) e^{iZ}",
"projection (Bloch vector) over time. \"\"\" self.time = time self.time_evolution = time_evolution self.state",
"it is used to sweep over dressing frequencies during the simulations that `spinsim`",
"2]*operator[2, 0] result[2, 0] = operator[2, 0]*operator[0, 0] + operator[2, 1]*operator[1, 0] +",
"systems** Assumes the exponent is an imaginary linear combination of a subspace of",
"left[0, 0]*right[0, 1] + left[0, 1]*right[1, 1] result[1, 1] = left[1, 0]*right[0, 1]",
"= True)(func) return jit_device_template self.jit_device_template = jit_device_template elif value == \"roc\": def jit_host(template,",
"(time_index, spatial_direction)) - The expected spin projection (Bloch vector) over time. \"\"\" self.time",
"an integer multiple of time_step_integration. Resetting time_step_integration to {time_step_output/round(time_step_output/time_step_integration):8.4e}.\\033[0m\\n\") time_step_integration = time_step_output/round(time_step_output/time_step_integration) time_end_points",
"0 precision = 4**hyper_cube_amount a = math.sqrt(field_sample[0]*field_sample[0] + field_sample[1]*field_sample[1]) if a > 0:",
"dimension > 2: field_sample[0, 3] = math.tau*time_step_integration*field_sample[0, 3] append_exponentiation(field_sample[0, :], time_evolution_fine, time_evolution_coarse) get_field_integration",
"elif self.device == Device.ROC: time = roc.device_array(time_index_max, np.float64) time_evolution_coarse = roc.device_array((time_index_max, self.spin_quantum_number.dimension, self.spin_quantum_number.dimension),",
":obj:`Device.CUDA` as the target device, and can be modified to increase the execution",
"with :math:`r = \\\\sqrt{x^2 + y^2 + z^2}`. Parameters: * **field_sample** (:class:`numpy.ndarray` of",
"use of a rotating wave approximation, a technique used to get approximate analytic",
"J_y - iz J_z - iq J_q))^{2^\\\\tau}\\\\\\\\ &\\\\approx (\\\\exp(-i(2^{-\\\\tau} x) J_x) \\\\exp(-i(2^{-\\\\tau} y)",
"options and more details. threads_per_block : :obj:`int` The size of each thread block",
"\\\\frac{1}{2}\\\\begin{pmatrix} 1 & 0 \\\\\\\\ 0 & -1 \\\\end{pmatrix} \\\\end{align*} Then the exponential",
"\\\\end{align*} with .. math:: \\\\begin{align*} X &= \\\\frac{1}{2}2^{-\\\\tau}x,\\\\\\\\ Y &= \\\\frac{1}{2}2^{-\\\\tau}y,\\\\\\\\ Z &=",
"spin_calculator(state): \"\"\" Calculates the expected spin projection (Bloch vector) over time for a",
"+= 1 result[1, 1] += 1 result[2, 2] += 1 # @jit_device #",
"passed to the :obj:`Results` object returned from :func:`Simulator.evaluate()`, and is executed there just",
"self.jit_device = jit_device def jit_device_template(template): def jit_device_template(func): return nb.njit(template)(func) return jit_device_template self.jit_device_template =",
"== 0: # temporary = np.empty((3, 3), dtype = np.complex128) # elif device_index",
"complex vector. .. math:: \\|a + ib\\|_2 = \\\\sqrt {\\\\left(\\\\sum_i a_i^2 + b_i^2\\\\right)}",
"increases GPU occupancy, meaning more threads run concurrently, at the expense of fewer",
": :obj:`numpy.ndarray` of :obj:`numpy.float128` (time_index, y_index, x_index) The evaluated time evolution operator between",
":class:`numpy.complex128` (time_index, bra_state_index, ket_state_index) Time evolution operator (matrix) between the current and next",
"can be used for archiving. \"\"\" def __init__(self, value, dimension, label): super().__init__() self._value_",
"device_index == 2: time_evolution_old_group = roc.shared.array((threads_per_block, dimension, dimension), dtype = np.complex128) time_evolution_old =",
"1 & 0 \\\\\\\\ 1 & 0 & 1 \\\\\\\\ 0 & 1",
"self.jit_device = jit_device def jit_device_template(template): def jit_device_template(func): return roc.jit(template, device = True)(func) return",
"the GPU is said to have less occupancy. Lowering the value increases GPU",
"0]*operator[0, 1] + (2 + operator[1, 1])*operator[1, 1] + operator[1, 2]*operator[2, 1] result[2,",
"\"\"\" jit_device = device.jit_device device_index = device.index @jit_device def conj(z): return (z.real -",
"0.5*cisz*(cx + cy - 1j*sx*sy) # result[1, 0] = cisz*(-1j*sx + cx*sy)/sqrt2 #",
"exponential. For spin half :obj:`SpinQuantumNumber.HALF` systems only. \"\"\" LIE_TROTTER = (\"lie_trotter\", 1) \"\"\"",
"= roc.shared.array((threads_per_block, 2, 2), dtype = np.complex128) # temporary = temporary_group[roc.get_local_id(1), :, :]",
"1]) result[2, 2] = conj(operator[2, 2]) @jit_device def matrix_exponential_analytic(field_sample, result, trotter_cutoff): pass @jit_device",
"2, 1] *= rotating_wave_winding[0] time_evolution_coarse[time_index, 2, 2] *= rotating_wave_winding[0] else: time_evolution_coarse[time_index, 1, 0]",
"bra_state_index, ket_state_index) The evaluated time evolution operator between each time step. See :ref:`architecture`",
"- The integration time step. Measured in s. * **time_step_output** (:obj:`float`) - The",
"= roc.shared.array((threads_per_block, 3, 3), dtype = np.complex128) temporary = temporary_group[roc.get_local_id(1), :, :] for",
"result[0, 0] = left[0, 0]*right[0, 0] + left[0, 1]*right[1, 0] + left[0, 2]*right[2,",
"1] + left[1, 2]*right[2, 1] result[2, 1] = left[2, 0]*right[0, 1] + left[2,",
"s = math.sin(r/2) result[0, 0] = c - 1j*z*s result[1, 0] = (y",
"operator[0, 0] = 1 operator[1, 0] = 0 operator[2, 0] = 0 operator[0,",
"temporary, result) self.conj = conj self.complex_abs = complex_abs self.norm2 = norm2 self.inner =",
"self.conj = conj self.complex_abs = complex_abs self.norm2 = norm2 self.inner = inner self.set_to",
"np.float64) rotating_wave_winding = cuda.local.array(sample_index_end, dtype = np.complex128) elif device_index == 2: time_evolution_fine_group =",
"spin system dynamics. This is not done when this option is set to",
"a an evaluation of the integrator. Attributes ---------- time : :obj:`numpy.ndarray` of :obj:`numpy.float64`",
"can be used for archiving. \"\"\" MAGNUS_CF4 = \"magnus_cf4\" \"\"\" Commutator free, fourth",
"the quadratic shift (only appearing, and required, in spin one systems). .. note::",
"referenced by the user. Parameters: * **state** (:obj:`numpy.ndarray` of :obj:`numpy.complex128` (time_index, magnetic_quantum_number)) -",
"cisz*(-1j*sx + cx*sy)/sqrt2 # result[2, 0] = 0.5*cisz*(cx - cy - 1j*sx*sy) #",
"for use on the target device) used in the integrator. These device functions",
"jit_host(template, max_registers): def jit_host(func): return nb.njit(template, parallel = True)(func) return jit_host self.jit_host =",
"= Sa/ep result[1, 1] = Ca*ez - 1 if device_index == 0: temporary",
"0: for z_index in range(state.shape[1]): state[time_index, x_index] += time_evolution[time_index - 1, x_index, z_index]*state[time_index",
"(:obj:`Device.ROC`). Defaults to 64. Modifying might be able to increase execution time for",
"of. Returns * **cz** (:class:`numpy.complex128`) - The conjugate of z. complex_abs(z) : :obj:`callable`",
"so use an empty :class:`numpy.ndarray` with :func:`numpy.empty()`, or declare a :class:`numpy.ndarray` using :func:`numba.cuda.device_array_like()`.",
"not jit get_field function into a roc device function.\\033[0m\\n\") raise time_evolution_coarse = time_evolution_coarse.copy_to_host()",
"`get_field`, then one can run many simulations, sweeping through bias values, by calling",
"\"\"\" MAGNUS_CF4 = \"magnus_cf4\" \"\"\" Commutator free, fourth order Magnus based integrator. \"\"\"",
"compiled for. See :class:`Device` for more information and links. spin_quantum_number : :obj:`SpinQuantumNumber` The",
"SpinQuantumNumber.HALF: exponentiation_method = ExponentiationMethod.ANALYTIC if integration_method == IntegrationMethod.MAGNUS_CF4: sample_index_max = 3 sample_index_end =",
"operator[1, 0]*operator[0, 2] + (2 + operator[1, 1])*operator[1, 2] + operator[1, 2]*operator[2, 2]",
"+= time_coarse get_field_jit(time_sample, sweep_parameter, field_sample[0, :]) time_sample = time_fine + time_step_integration - time_coarse",
"math.tau*time_step_integration*field_sample[1, 0]/2 field_sample[2, 1] = math.tau*time_step_integration*field_sample[1, 1]/2 field_sample[2, 2] = math.tau*time_step_integration*field_sample[1, 2]/2 if",
"half :obj:`SpinQuantumNumber.HALF` systems only. \"\"\" LIE_TROTTER = (\"lie_trotter\", 1) \"\"\" Approximation using the",
"warning!!!\\n_attempting to use an analytic exponentiation method outside of spin half. Switching to",
"time_coarse[time_index] + time_step_output/2 get_field_jit(time_sample, sweep_parameter, field_sample[0, :]) rotating_wave = field_sample[0, 2] if dimension",
"time, time_end_points, time_step_integration, time_step_output, time_evolution_coarse) elif self.device == Device.CUDA: time = cuda.device_array(time_index_max, np.float64)",
"time_evolution_coarse) field_sample[2, 0] = math.tau*time_step_integration*field_sample[1, 0]/2 field_sample[2, 1] = math.tau*time_step_integration*field_sample[1, 1]/2 field_sample[2, 2]",
"spin projection operator in the z direction. spin_calculator : :obj:`callable` Calculates the expected",
"for the whole GPU, for each specific GPU model. This means that if",
"+ 1j*sx*cy)*cisz # result[1, 1] = (cx*cy + 1j*sx*sy)*cisz # if device_index ==",
"field_sample[0] y = field_sample[1] z = field_sample[2] r = math.sqrt(x**2 + y**2 +",
"2: time_evolution_old_group = roc.shared.array((threads_per_block, dimension, dimension), dtype = np.complex128) time_evolution_old = time_evolution_old_group[roc.get_local_id(1), :,",
"math.tau*time_step_integration*(w1*field_sample[0, 1] + w0*field_sample[1, 1]) field_sample[2, 2] = math.tau*time_step_integration*(w1*field_sample[0, 2] + w0*field_sample[1, 2])",
"matrix which the result of the exponentiation is to be written to. matrix_exponential_lie_trotter(field_sample,",
"of GPU running :mod:`spinsim`, balancing more threads vs faster running threads, and changing",
"in :ref:`overview_of_simulation_method`. This is an output, so use an empty :class:`numpy.ndarray` with :func:`numpy.empty()`,",
"using :func:`numba.cuda.device_array_like()`. \"\"\" if device_index == 0: for time_index in nb.prange(spin.shape[0]): if dimension",
"True, integration_method = IntegrationMethod.MAGNUS_CF4, trotter_cutoff = 32, threads_per_block = 64, max_registers = 63):",
"your GPU. See `Achieved Occupancy`_ for Nvidia's official explanation. \"\"\" utilities = Utilities(spin_quantum_number,",
"time_coarse rotating_wave_winding[1] = math.cos(math.tau*rotating_wave*time_sample) + 1j*math.sin(math.tau*rotating_wave*time_sample) time_sample += time_coarse get_field_jit(time_sample, sweep_parameter, field_sample[1, :])",
"2]*operator[2, 1] result[1, 1] = operator[1, 0]*operator[0, 1] + (2 + operator[1, 1])*operator[1,",
"1j*z*s else: result[0, 0] = 1 result[1, 0] = 0 result[0, 1] =",
"1 + 1j*eq result[0, 0] = (Ca/(eq*ez))*(Ca/(eq*ez)) - 1 result[1, 0] = sa*eq*ep/ez",
"2] result[1, 2] = operator[1, 0]*operator[0, 2] + (2 + operator[1, 1])*operator[1, 2]",
"= np.empty((time_index_max, self.spin_quantum_number.dimension, self.spin_quantum_number.dimension), np.complex128) self.get_time_evolution_raw(sweep_parameter, time, time_end_points, time_step_integration, time_step_output, time_evolution_coarse) elif self.device",
"1j*z*s result[1, 0] = (y - 1j*x)*s result[0, 1] = -(y + 1j*x)*s",
"if dimension > 2: field_sample[2, 3] = math.tau*time_step_integration*(w0*field_sample[0, 3] + w1*field_sample[1, 3]) append_exponentiation(field_sample[2,",
"for matrix exponentiation in the integration algorithm. Defaults to :obj:`ExponentiationMethod.LIE_TROTTER` when `spin_quantum_number` is",
"result[1, 2] = operator[1, 2] result[2, 2] = operator[2, 2] @jit_device def set_to_one(operator):",
":obj:`SpinQuantumNumber.HALF`. See :obj:`ExponentiationMethod` for more details. use_rotating_frame : :obj:`bool` Whether or not to",
"Nvidia's official explanation. \"\"\" utilities = Utilities(spin_quantum_number, device, threads_per_block) conj = utilities.conj complex_abs",
"Integrates the time dependent Schroedinger equation and returns the quantum state of the",
"@jit_host(\"(complex128[:, :], float64[:, :])\", max_registers = max_registers) def get_spin(state, spin): \"\"\" Calculate each",
"ie, :math:`0`. .. math:: \\\\begin{align*} (A)_{i, j} = 0 \\\\end{align*} Parameters: * **operator**",
"time_step_integration, time_step_output, time_evolution_coarse) elif self.device == Device.CUDA: time = cuda.device_array(time_index_max, np.float64) time_evolution_coarse =",
"matrix exponentiation within the integrator. Parameters ---------- value : :obj:`str` A text label",
"Calculates the expected spin projection (Bloch vector) over time for a given time",
"a complex number. .. math:: \\\\begin{align*} |a + ib| &= \\\\sqrt{a^2 + b^2}\\\\\\\\",
"def jit_host(template, max_registers): def jit_host(func): return nb.njit(template, parallel = True)(func) return jit_host self.jit_host",
"nb.njit(template)(func) return jit_device_template self.jit_device_template = jit_device_template elif value == \"cuda\": def jit_host(template, max_registers):",
"1] = operator[0, 1] result[1, 1] = operator[1, 1] @jit_device def set_to_one(operator): operator[0,",
"jit_device_template self.jit_device_template = jit_device_template elif value == \"cpu_single\": def jit_host(template, max_registers): def jit_host(func):",
"@jit_device def set_to_one(operator): operator[0, 0] = 1 operator[1, 0] = 0 operator[2, 0]",
"field strength in `get_field`, then one can run many simulations, sweeping through bias",
"np.empty(time_index_max, np.float64) time_evolution_coarse = np.empty((time_index_max, self.spin_quantum_number.dimension, self.spin_quantum_number.dimension), np.complex128) self.get_time_evolution_raw(sweep_parameter, time, time_end_points, time_step_integration, time_step_output,",
"rotating_wave_winding[0] time_evolution_coarse[time_index, 2, 0] *= rotating_wave_winding[0] time_evolution_coarse[time_index, 2, 1] *= rotating_wave_winding[0] time_evolution_coarse[time_index, 2,",
"1/sqrt3)) - time_coarse) rotating_wave_winding[1] = math.cos(math.tau*rotating_wave*time_sample) + 1j*math.sin(math.tau*rotating_wave*time_sample) time_sample += time_coarse get_field_jit(time_sample, sweep_parameter,",
"1)) // threads_per_block get_spin[blocks_per_grid, threads_per_block](cuda.to_device(state), spin) spin = spin.copy_to_host() elif device == Device.ROC:",
"= jit_device_template elif value == \"cpu\": def jit_host(template, max_registers): def jit_host(func): return nb.njit(template,",
"def matrix_square_residual(operator, result): result[0, 0] = (2 + operator[0, 0])*operator[0, 0] + operator[0,",
"time. \"\"\" def __init__(self, get_field, spin_quantum_number, device = None, exponentiation_method = None, use_rotating_frame",
"0] = 0 operator[1, 0] = 0 operator[0, 1] = 0 operator[1, 1]",
"dimension > 2: field_sample[2, 3] = math.tau*time_step_integration*field_sample[0, 3]/2 append_exponentiation(field_sample[2, :], time_evolution_fine, time_evolution_coarse) field_sample[2,",
"time_evolution_old) matrix_multiply(time_evolution_fine, time_evolution_old, time_evolution_coarse) if use_rotating_frame: if dimension == 3: @jit_device_template(\"(float64[:], float64, complex128)\")",
"temporary_group = roc.shared.array((threads_per_block, 3, 3), dtype = np.complex128) temporary = temporary_group[roc.get_local_id(1), :, :]",
"rotating_wave_winding[1]) field_sample[2, 0] = math.tau*time_step_integration*field_sample[0, 0]/2 field_sample[2, 1] = math.tau*time_step_integration*field_sample[0, 1]/2 field_sample[2, 2]",
"A^H\\\\\\\\ (A^\\\\dagger)_{y,x} &= ((A)_{x,y})^* \\\\end{align*} Matrix can be in :math:`\\\\mathbb{C}^{2\\\\times2}` or :math:`\\\\mathbb{C}^{3\\\\times3}`. Parameters:",
"norm of z. inner(left, right) : :obj:`callable` The inner (maths convention dot) product",
"In units of s. Determines the sample rate of the outputs `time_coarse` and",
"i & 0 \\\\end{pmatrix},& J_z &= \\\\frac{1}{2}\\\\begin{pmatrix} 1 & 0 \\\\\\\\ 0 &",
"if dimension > 2: time_evolution_coarse[time_index, 0, 2] /= rotating_wave_winding[0] time_evolution_coarse[time_index, 2, 0] *=",
"2 norm of z. inner(left, right) : :obj:`callable` The inner (maths convention dot)",
"if not device: if cuda.is_available(): device = Device.CUDA else: device = Device.CPU self.threads_per_block",
"0, 1] /= rotating_wave_winding[0] if dimension > 2: time_evolution_coarse[time_index, 0, 2] /= rotating_wave_winding[0]",
"c = math.cos(r/2) s = math.sin(r/2) result[0, 0] = c - 1j*z*s result[1,",
"2] + left[0, 1]*right[1, 2] + left[0, 2]*right[2, 2] result[1, 2] = left[1,",
"matrix to copy to. set_to_one(operator) : :obj:`callable` Make a matrix the multiplicative identity,",
"exponentiation_method = ExponentiationMethod.ANALYTIC if integration_method == IntegrationMethod.MAGNUS_CF4: sample_index_max = 3 sample_index_end = 4",
"device.jit_device_template device_index = device.index dimension = spin_quantum_number.dimension lie_dimension = dimension + 1 #",
"utilities.matrix_multiply adjoint = utilities.adjoint matrix_exponential_analytic = utilities.matrix_exponential_analytic matrix_exponential_lie_trotter = utilities.matrix_exponential_lie_trotter jit_host = device.jit_host",
"def jit_host(func): return nb.njit(template, parallel = True)(func) return jit_host self.jit_host = jit_host def",
"succession to find the quantum state timeseries of the 3 level atom. Parameters",
"right multiply by. * **result** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - A matrix",
"s. * **time_evolution_coarse** (:obj:`numpy.ndarray` of :obj:`numpy.float128` (time_index, y_index, x_index)) - The evaluated time",
"1] = math.tau*time_step_integration*field_sample[1, 1]/2 field_sample[2, 2] = math.tau*time_step_integration*field_sample[1, 2]/2 if dimension > 2:",
"memory) to each thread, out of a maximum number for the whole GPU,",
":]) time_sample = ((time_fine + 0.5*time_step_integration*(1 + 1/sqrt3)) - time_coarse) rotating_wave_winding[1] = math.cos(math.tau*rotating_wave*time_sample)",
"to start and finishes. In units of s. time_step_integration : :obj:`float` The time",
"2, 2), dtype = np.complex128) # temporary = temporary_group[roc.get_local_id(1), :, :] # for",
"1]) field_sample[2, 2] = math.tau*time_step_integration*(w1*field_sample[0, 2] + w0*field_sample[1, 2]) if dimension > 2:",
"in terms of the eigenstates of the spin projection operator in the z",
"== 0: for time_index in nb.prange(time_coarse.size): get_time_evolution_loop(time_index, time_coarse, time_step_output, time_step_integration, time_end_points, time_evolution_coarse, sweep_parameter)",
"s. * **time_step_integration** (:obj:`float`) - The integration time step. Measured in s. *",
"func self.jit_device = jit_device def jit_device_template(template): def jit_device_template(func): return func return jit_device_template self.jit_device_template",
"Use the :func:`numba.cuda.jit()` LLVM compiler to compile the integrator to run on an",
"per thread when using :obj:`Device.CUDA` as the target device, and can be modified",
"float64, complex128)\") def transform_frame_spin_half_rotating(field_sample, rotating_wave, rotating_wave_winding): X = (field_sample[0] + 1j*field_sample[1])/(rotating_wave_winding**2) field_sample[0] =",
"= True)(func) return jit_host self.jit_host = jit_host def jit_device(func): return nb.njit()(func) self.jit_device =",
"get_field_integration_half_step(sweep_parameter, time_fine, time_coarse, time_step_integration, field_sample, rotating_wave, rotating_wave_winding): time_sample = time_fine - time_coarse rotating_wave_winding[0]",
"0 & -2 & 0 \\\\\\\\ 0 & 0 & 1 \\\\end{pmatrix} \\\\end{align*}",
"result[2, 1] = left[2, 0]*right[0, 1] + left[2, 1]*right[1, 1] + left[2, 2]*right[2,",
"# result[1, 2] = cisz*(-1j*sx - cx*sy)/sqrt2 # result[2, 2] = 0.5*cisz*(cx +",
":], float64, complex128[:])\") def get_field_integration_half_step(sweep_parameter, time_fine, time_coarse, time_step_integration, field_sample, rotating_wave, rotating_wave_winding): time_sample =",
"state[time_index, x_index] = 0 if time_index > 0: for z_index in range(state.shape[1]): state[time_index,",
"- i s_Xs_Y)}{2} & \\\\frac{e^{i\\\\frac{2Q}{3}} (-s_Y -i c_Y s_X)}{\\\\sqrt{2}} & \\\\frac{e^{-i\\\\left(-Z + \\\\frac{Q}{3}\\\\right)}(c_X",
"direction. spin_calculator : :obj:`callable` Calculates the expected spin projection (Bloch vector) over time",
"The integration time step. Measured in s. time_step_output : :obj:`float` The sample resolution",
"+ c_Y - i s_Xs_Y)}{2} & \\\\frac{e^{i\\\\frac{2Q}{3}} (-s_Y -i c_Y s_X)}{\\\\sqrt{2}} & \\\\frac{e^{-i\\\\left(-Z",
"on object that contains definitions of all of the device functions (functions compiled",
"+ left[0, 1]*right[1, 1] + left[0, 2]*right[2, 1] result[1, 1] = left[1, 0]*right[0,",
"= Ca*ez - 1 if device_index == 0: temporary = np.empty((2, 2), dtype",
"math.fabs(time_step_output/time_step_integration - round(time_step_output/time_step_integration)) > 1e-6: print(f\"\\033[33mspinsim warning: time_step_output not an integer multiple of",
"self._value_ = value self.index = index ANALYTIC = (\"analytic\", 0) \"\"\" Analytic expression",
"the integrator, where higher level objects like enums cannot be interpreted. \"\"\" def",
"1, 1] *= rotating_wave_winding[0] @jit_host(\"(float64, float64[:], float64[:], float64, float64, complex128[:, :, :])\", max_registers)",
"convention dot) product between two complex vectors. .. note:: The mathematics definition is",
":obj:`callable` The inner (maths convention dot) product between two complex vectors. .. note::",
"+ left[0, 1]*right[1, 0] + left[0, 2]*right[2, 0] result[1, 0] = left[1, 0]*right[0,",
"device_index == 1: time_index = roc.get_global_id(1) if time_index < spin.shape[0]: if dimension ==",
"+ (2^{-\\\\tau} q) J_q)))^{2^\\\\tau}\\\\\\\\ &= \\\\begin{pmatrix} \\\\frac{e^{-i\\\\left(Z + \\\\frac{Q}{3}\\\\right)}(c_X + c_Y - i",
"def inner(left, right): return conj(left[0])*right[0] + conj(left[1])*right[1] @jit_device def set_to(operator, result): result[0, 0]",
"math.cos(math.tau*rotating_wave*time_sample) + 1j*math.sin(math.tau*rotating_wave*time_sample) time_sample += time_coarse get_field_jit(time_sample, sweep_parameter, field_sample[0, :]) time_sample = ((time_fine",
"0] = 0.5*cisz*(cx - cy - 1j*sx*sy) # cisz = math.cos(2*q/3) + 1j*math.sin(2*q/3)",
"1)) // self.threads_per_block try: self.get_time_evolution_raw[blocks_per_grid, self.threads_per_block](sweep_parameter, time, time_end_points, time_step_integration, time_step_output, time_evolution_coarse) except: print(\"\\033[31mspinsim",
"field_sample[2, 0] = math.tau*time_step_integration*(w0*field_sample[0, 0] + w1*field_sample[1, 0]) field_sample[2, 1] = math.tau*time_step_integration*(w0*field_sample[0, 1]",
"1] + left[0, 1]*right[1, 1] + left[0, 2]*right[2, 1] result[1, 1] = left[1,",
"utilities.inner set_to = utilities.set_to set_to_one = utilities.set_to_one set_to_zero = utilities.set_to_zero matrix_multiply = utilities.matrix_multiply",
"(:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - An array to write the resultant adjoint",
"thread, out of a maximum number for the whole GPU, for each specific",
"= field_sample[1]/(2*precision) # z = field_sample[2]/(2*precision) # cx = math.cos(x) # sx =",
": :obj:`float` The sample resolution of the output timeseries for the state. Must",
"y^2 + z^2}`. Parameters: * **field_sample** (:class:`numpy.ndarray` of :class:`numpy.float64`, (y_index, x_index)) - The",
"jit_host = device.jit_host jit_device = device.jit_device jit_device_template = device.jit_device_template device_index = device.index dimension",
"time_evolution_coarse[time_index, 2, 1] *= rotating_wave_winding[0] time_evolution_coarse[time_index, 2, 2] *= rotating_wave_winding[0] else: time_evolution_coarse[time_index, 1,",
"\\\\\\\\ \\\\frac{e^{-i\\\\left(Z + \\\\frac{Q}{3}\\\\right)}(c_X - c_Y - i s_Xs_Y)}{2} & \\\\frac{e^{i\\\\frac{2Q}{3}} (s_Y -i",
"3 sample_index_end = 4 elif integration_method == IntegrationMethod.HALF_STEP: sample_index_max = 3 sample_index_end =",
"def set_to_one(operator): operator[0, 0] = 1 operator[1, 0] = 0 operator[0, 1] =",
"matrix_square_residual(operator, result): result[0, 0] = (2 + operator[0, 0])*operator[0, 0] + operator[0, 1]*operator[1,",
"1j*sx*sy) # result[1, 0] = cisz*(-1j*sx + cx*sy)/sqrt2 # result[2, 0] = 0.5*cisz*(cx",
"temporary) matrix_square_residual(temporary, result) # matrix_multiply(result, result, temporary) # matrix_multiply(temporary, temporary, result) result[0, 0]",
"operator[0, 1]*operator[1, 0] result[1, 0] = operator[1, 0]*operator[0, 0] + (2 + operator[1,",
"threads_per_block = 64, max_registers = 63): \"\"\" Compiles the integrator and spin calculation",
"dressing frequencies during the simulations that `spinsim` was designed for. * **field_sample** (:class:`numpy.ndarray`",
"The time that the experiment is to finish at. Measured in s. The",
":: The use of a rotating frame is commonly associated with the use",
"evaluate(self, sweep_parameter, time_start, time_end, time_step_integration, time_step_output, state_init): \"\"\" Integrates the time dependent Schroedinger",
"of the hilbert space the states with this spin belong to. label :",
"system. threads_per_block : :obj:`int` The size of each thread block (workgroup), in terms",
"for describing which method is used during the integration. Parameters ---------- value :",
"target device) used in the integrator. These device functions are compiled for the",
"= True, inline = True)(func) return jit_device_template self.jit_device_template = jit_device_template elif value ==",
"to compile the integrator to run on all CPU cores, in parallel. ..",
"(index)) - The vector to right multiply in the inner product. Returns *",
"3), np.float64) blocks_per_grid = (state.shape[0] + (threads_per_block - 1)) // threads_per_block get_spin[blocks_per_grid, threads_per_block](cuda.to_device(state),",
"# For every fine step for time_fine_index in range(math.floor(time_step_output/time_step_integration + 0.5)): get_field_integration(sweep_parameter, time_fine,",
"complex number. .. math:: \\\\begin{align*} (a + ib)^* &= a - ib\\\\\\\\ a,",
"J_q)))^{2^\\\\tau}\\\\\\\\ &= \\\\begin{pmatrix} \\\\frac{e^{-i\\\\left(Z + \\\\frac{Q}{3}\\\\right)}(c_X + c_Y - i s_Xs_Y)}{2} & \\\\frac{e^{i\\\\frac{2Q}{3}}",
"0 & 0 & 1 \\\\end{pmatrix} \\\\end{align*} Then the exponential can be approximated",
"1] + left[2, 2]*right[2, 1] result[0, 2] = left[0, 0]*right[0, 2] + left[0,",
"\\\\end{align*} Once :math:`T` is calculated, it is then recursively squared :math:`\\\\tau` times to",
"exponential can be calculated as .. math:: \\\\begin{align*} \\\\exp(A) &= \\\\exp(-ix J_x -",
"in spin one systems). .. note:: This function must be compilable for the",
"compilable python features, and `Supported Numpy features`_ for compilable numpy features. \"\"\" CPU",
"compatible GPU, in parallel. .. warning :: Work in progress, not currently functional!",
"result[1, 1] = c + 1j*z*s else: result[0, 0] = 1 result[1, 0]",
"frame. integration_method : :obj:`IntegrationMethod` Which integration method to use in the integration. Defaults",
"one sampling the field from the end of the time step. The equivalent",
":func:`Simulator.evaluate()`, and is executed there just in time if the `spin` property is",
"links. spin_quantum_number : :obj:`SpinQuantumNumber` The option to select whether the simulator will integrate",
"= 1 operator[1, 0] = 0 operator[2, 0] = 0 operator[0, 1] =",
"2] if dimension > 2: field_sample[0, 3] = math.tau*time_step_integration*field_sample[0, 3] append_exponentiation(field_sample[0, :], time_evolution_fine,",
"state (spin wavefunction) of the system at the start of the simulation. state",
"result[2, 2] = left[2, 0]*right[0, 2] + left[2, 1]*right[1, 2] + left[2, 2]*right[2,",
"run many simulations, sweeping through bias values, by calling this method multiple times,",
"z = field_sample[2]/(2*precision) # cx = math.cos(x) # sx = math.sin(x) # cy",
"- The expected spin projection (Bloch vector) over time. \"\"\" self.time = time",
"math.cos(math.tau*rotating_wave*time_sample) + 1j*math.sin(math.tau*rotating_wave*time_sample) time_sample += time_coarse get_field_jit(time_sample, sweep_parameter, field_sample[1, :]) @jit_device_template(\"(complex128[:, :], complex128[:,",
"state_init[x_index] sqrt2 = math.sqrt(2) sqrt3 = math.sqrt(3) machine_epsilon = np.finfo(np.float64).eps*1000 class Utilities: \"\"\"",
"evaluated time evolution operator between each time step. See :ref:`architecture` for some information.",
"1] = (2 + operator[0, 0])*operator[0, 1] + operator[0, 1]*operator[1, 1] + operator[0,",
"field_sample, rotating_wave, rotating_wave_winding): time_sample = time_fine + 0.5*time_step_integration - time_coarse rotating_wave_winding[0] = math.cos(math.tau*rotating_wave*time_sample)",
"- state[time_index, 2].real**2 - state[time_index, 2].imag**2 return def spin_calculator(state): \"\"\" Calculates the expected",
"sqrt2 = math.sqrt(2) sqrt3 = math.sqrt(3) machine_epsilon = np.finfo(np.float64).eps*1000 class Utilities: \"\"\" A",
"0]*right[0, 1] + left[2, 1]*right[1, 1] + left[2, 2]*right[2, 1] result[0, 2] =",
"= conj(left[2]*right[0] - left[0]*right[2]) result[2] = conj(left[0]*right[1] - left[1]*right[0]) @jit_device def inner(left, right):",
"steps, one sampling the field from the start of the time step, one",
"(LR)_{i,k} = \\\\sum_j (L)_{i,j} (R)_{j,k} \\\\end{align*} Parameters: * **left** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index,",
"operator[1, 0] result[0, 1] = operator[0, 1] result[1, 1] = operator[1, 1] @jit_device",
"field_sample[1]*field_sample[1]) if a > 0: ep = (field_sample[0] + 1j*field_sample[1])/a else: ep =",
"0] + (2 + operator[1, 1])*operator[1, 0] + operator[1, 2]*operator[2, 0] result[2, 0]",
"sweep_parameter : :obj:`float` The input to the `get_field` function supplied by the user.",
"the time step, one sampling the field from the end of the time",
"= field_sample[3]/precision # cx = math.cos(x) # sx = math.sin(x) # cy =",
"= ca*(eq*eq*eq*eq) - 1 result[2, 1] = sa*eq*ez*ep result[0, 2] = -((Sa*eq/ep)*(Sa*eq/ep)) result[1,",
"time_step_integration, time_end_points, time_evolution_coarse, sweep_parameter): # Declare variables if device_index == 0: time_evolution_fine =",
"& \\\\frac{e^{i\\\\frac{2Q}{3}} (s_Y -i c_Y s_X)}{\\\\sqrt{2}} & \\\\frac{e^{-i\\\\left(-Z + \\\\frac{Q}{3}\\\\right)}(c_X + c_Y +",
"X = (field_sample[0] + 1j*field_sample[1])/rotating_wave_winding field_sample[0] = X.real field_sample[1] = X.imag field_sample[2] =",
"= np.empty((dimension, dimension), dtype = np.complex128) elif device_index == 1: time_evolution_old = cuda.local.array((dimension,",
"evaluated at. * **time_end_points** (:obj:`numpy.ndarray` of :obj:`numpy.float64` (start/end)) - The time offset that",
"max_registers): def jit_host(func): return roc.jit(template)(func) return jit_host self.jit_host = jit_host def jit_device(func): return",
"number is 64). Raising this value allocates more registers (fast memory) to each",
"**right** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (index)) - The vector to right multiply in the",
"values, by calling this method multiple times, each time varying `sweep_parameter`. * **time_coarse**",
"this number is 64). Raising this value allocates more registers (fast memory) to",
"@jit_device def matrix_multiply(left, right, result): result[0, 0] = left[0, 0]*right[0, 0] + left[0,",
"used to define the bias field strength in `get_field`, then one can run",
"= math.cos(ez) + 1j*math.sin(ez) eq = field_sample[3]/(6*precision) eq = math.cos(eq) + 1j*math.sin(eq) #",
"warning:: Only available for use with spin half systems. Will not work with",
":obj:`numpy.float64` (time_index, spatial_direction) The expected spin projection (Bloch vector) over time. This is",
"and right together, to be returned in result. .. math:: \\\\begin{align*} (LR)_{i,k} =",
"def complex_abs(z): return math.sqrt(z.real**2 + z.imag**2) if spin_quantum_number == SpinQuantumNumber.HALF: @jit_device def norm2(z):",
"operator[1, 1])*operator[1, 2] + operator[1, 2]*operator[2, 2] result[2, 2] = operator[2, 0]*operator[0, 2]",
"1] result[1, 1] = operator[1, 1] @jit_device def set_to_one(operator): operator[0, 0] = 1",
"speed for a specific GPU model. Defaults to 63 (optimal for GTX1070, the",
"as, for large :math:`\\\\tau`, .. math:: \\\\begin{align*} \\\\exp(A) &= \\\\exp(-ix J_x - iy",
"to 1 set_to_one(time_evolution_coarse[time_index, :]) field_sample[0, 2] = 0 if use_rotating_frame: time_sample = time_coarse[time_index]",
"A python function that describes the field that the spin system is being",
"the product. adjoint(operator) : :obj:`callable` Takes the hermitian adjoint of a matrix. ..",
"Initialise time evolution operator to 1 set_to_one(time_evolution_coarse[time_index, :]) field_sample[0, 2] = 0 if",
"(cx*sy -1j*sx*cy)/cisz # result[0, 1] = -(cx*sy + 1j*sx*cy)*cisz # result[1, 1] =",
"select whether the simulator will integrate a spin half :obj:`SpinQuantumNumber.HALF`, or spin one",
"+ left[2, 1]*right[1, 1] + left[2, 2]*right[2, 1] result[0, 2] = left[0, 0]*right[0,",
"0) \"\"\" Use the :func:`numba.jit()` LLVM compiler to compile the integrator to run",
"must be :func:`numba.jit()` compilable. See `Supported Python features`_ for compilable python features, and",
"over time. \"\"\" def __init__(self, get_field, spin_quantum_number, device = None, exponentiation_method = None,",
"np.finfo(np.float64).eps*1000 class Utilities: \"\"\" A on object that contains definitions of all of",
"= conj(operator[0, 1]) result[0, 1] = conj(operator[1, 0]) result[1, 1] = conj(operator[1, 1])",
"= np.complex128) elif device_index == 1: time_evolution_fine = cuda.local.array((dimension, dimension), dtype = np.complex128)",
"elif device_index == 2: temporary_group = roc.shared.array((threads_per_block, 3, 3), dtype = np.complex128) temporary",
"# sx = math.sin(x) # cy = math.cos(y) # sy = math.sin(y) #",
"= field_sample[2]/(2*precision) ez = math.cos(ez) + 1j*math.sin(ez) eq = field_sample[3]/(6*precision) eq = math.cos(eq)",
"\\\\frac{Q}{3}\\\\right)}(c_X + c_Y + i s_Xs_Y)}{2} \\\\end{pmatrix}^{2^\\\\tau}\\\\\\\\ &= T^{2^\\\\tau}, \\\\end{align*} with .. math::",
"np.complex128) # temporary = temporary_group[roc.get_local_id(1), :, :] # for power_index in range(hyper_cube_amount): #",
"z direction. This removes the (possibly large) z component of the field, which",
"= np.empty((2, 2), dtype = np.complex128) elif device_index == 1: temporary = cuda.local.array((2,",
"from the end of the time step. The equivalent of the trapezoidal method.",
"= a/2 # ca = 1 # sa = -1j*a/sqrt2 # ez =",
"norm2 self.inner = inner self.set_to = set_to self.set_to_one = set_to_one self.set_to_zero = set_to_zero",
"= Utilities(spin_quantum_number, device, threads_per_block) conj = utilities.conj complex_abs = utilities.complex_abs norm2 = utilities.norm2",
": :obj:`Device` The option to select which device will be targeted for integration.",
"= left[0, 0]*right[0, 1] + left[0, 1]*right[1, 1] result[1, 1] = left[1, 0]*right[0,",
"1 + 1j*ez # eq = field_sample[3]/(6*precision) # eq = 1 + 1j*eq",
"# matrix_multiply(temporary, temporary, result) self.conj = conj self.complex_abs = complex_abs self.norm2 = norm2",
"cross(left, right, result): result[0] = conj(left[1]*right[2] - left[2]*right[1]) result[1] = conj(left[2]*right[0] - left[0]*right[2])",
"can be modified to increase the execution speed for a specific GPU model.",
"+ (2 + operator[1, 1])*operator[1, 1] @jit_device def adjoint(operator, result): result[0, 0] =",
"device_index = device.index @jit_device def conj(z): return (z.real - 1j*z.imag) @jit_device def complex_abs(z):",
"an empty :class:`numpy.ndarray` with :func:`numpy.empty()`, or declare a :class:`numpy.ndarray` using :func:`numba.cuda.device_array_like()`. time_end_points :",
"an analytic exponentiation method outside of spin half. Switching to a Lie Trotter",
"w0*field_sample[1, 0]) field_sample[2, 1] = math.tau*time_step_integration*(w1*field_sample[0, 1] + w0*field_sample[1, 1]) field_sample[2, 2] =",
"time_step_integration : :obj:`float` The integration time step. Measured in s. time_step_output : :obj:`float`",
"J_x - iy J_y - iz J_z - iq J_q)\\\\\\\\ &= \\\\exp(2^{-\\\\tau}(-ix J_x",
"result[0, 2] = -((Sa*eq/ep)*(Sa*eq/ep)) result[1, 2] = sa*eq*ez/ep result[2, 2] = (Ca*ez/eq)*(Ca*ez/eq) -",
"value, index): super().__init__() self._value_ = value self.index = index ANALYTIC = (\"analytic\", 0)",
"= math.cos(ez) + 1j*math.sin(ez) # eq = field_sample[3]/(6*precision) # eq = math.cos(eq) +",
"== \"cpu\": def jit_host(template, max_registers): def jit_host(func): return nb.njit(template, parallel = True)(func) return",
"integrator to run on an AMD ROCm compatible GPU, in parallel. .. warning",
"get_state(state_init, state, time_evolution): \"\"\" Use the stepwise time evolution operators in succession to",
"= np.complex128) # temporary = temporary_group[roc.get_local_id(1), :, :] # for power_index in range(hyper_cube_amount):",
"0 \\\\\\\\ i & 0 & -i \\\\\\\\ 0 & i & 0",
"of the trapezoidal method. \"\"\" class ExponentiationMethod(Enum): \"\"\" The implementation to use for",
"vector) over time for a given time series of a quantum state. This",
"multiple of `time_step_integration`. Measured in s. * **time_evolution_coarse** (:obj:`numpy.ndarray` of :obj:`numpy.float128` (time_index, y_index,",
"object that contains definitions of all of the device functions (functions compiled for",
"operator[0, 2]*operator[2, 2] result[1, 2] = operator[1, 0]*operator[0, 2] + (2 + operator[1,",
"left and right together, to be returned in result. .. math:: \\\\begin{align*} (LR)_{i,k}",
"\\\\sqrt{a^2 + b^2}\\\\\\\\ a, b &\\\\in \\\\mathbb{R} \\\\end{align*} Parameters: * **z** (:class:`numpy.complex128`) -",
"# for power_index in range(hyper_cube_amount): # matrix_multiply(result, result, temporary) # matrix_multiply(temporary, temporary, result)",
"64, max_registers = 63): \"\"\" Compiles the integrator and spin calculation functions of",
"dimension == 2: spin[time_index, 0] = (state[time_index, 0]*conj(state[time_index, 1])).real spin[time_index, 1] = (1j*state[time_index,",
"orthogonal vectors is 0. .. math:: \\\\begin{align*} l \\\\cdot r &\\\\equiv \\\\langle l,",
"= 64, max_registers = 63): \"\"\" .. _Achieved Occupancy: https://docs.nvidia.com/gameworks/content/developertools/desktop/analysis/report/cudaexperiments/kernellevel/achievedoccupancy.htm Parameters ---------- get_field",
"cuda.grid(1) elif device_index == 1: time_index = roc.get_global_id(1) if time_index < spin.shape[0]: if",
"evaluation of the integrator. Attributes ---------- time : :obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index) The",
"not an integer multiple of time_step_integration. Resetting time_step_integration to {time_step_output/round(time_step_output/time_step_integration):8.4e}.\\033[0m\\n\") time_step_integration = time_step_output/round(time_step_output/time_step_integration)",
"math:: \\\\begin{align*} (A)_{i, j} = 0 \\\\end{align*} Parameters: * **operator** (:class:`numpy.ndarray` of :class:`numpy.complex128`,",
"result): result[0, 0] = left[0, 0]*right[0, 0] + left[0, 1]*right[1, 0] result[1, 0]",
"= 3 sample_index_end = 4 elif integration_method == IntegrationMethod.MIDPOINT_SAMPLE: sample_index_max = 1 sample_index_end",
"avaliable to each thread, meaning slower memory must be used. Thus, there will",
"the number specified for control, so really this number is 64). Raising this",
"= math.cos(z) + 1j*math.sin(z) # result[0, 0] = (cx*cy - 1j*sx*sy)/cisz # result[1,",
"input to the `get_field` function supplied by the user. Modifies the field function",
"its analytic form. .. warning:: Only available for use with spin half systems.",
": :obj:`callable` Make a matrix the additive identity, ie, :math:`0`. .. math:: \\\\begin{align*}",
"JITed :obj:`callable` `spin_calculator`. spin_calculator : :obj:`callable` Calculates the expected spin projection (Bloch vector)",
"the absolute value of. Returns * **az** (:class:`numpy.float64`) - The absolute value of",
"field from the start of the time step, one sampling the field from",
"operator[2, 0] = 0 operator[0, 1] = 0 operator[1, 1] = 1 operator[2,",
"matrix_square_residual(temporary, result) result[0, 0] += 1 result[1, 1] += 1 result[2, 2] +=",
"time difference between each element of `time_coarse`. In units of s. Determines the",
"1] @jit_device def matrix_square_residual(operator, result): result[0, 0] = (2 + operator[0, 0])*operator[0, 0]",
"be returned in result. .. math:: \\\\begin{align*} (LR)_{i,k} = \\\\sum_j (L)_{i,j} (R)_{j,k} \\\\end{align*}",
"> 2: field_sample[2, 3] = math.tau*time_step_integration*field_sample[1, 3]/2 append_exponentiation(field_sample[2, :], time_evolution_fine, time_evolution_coarse) get_field_integration =",
"float64, float64, float64[:], complex128[:, :, :], float64)\") def get_time_evolution_loop(time_index, time_coarse, time_step_output, time_step_integration, time_end_points,",
"method is used during the integration. Parameters ---------- value : :obj:`str` A text",
"exponentiation_method = None, trotter_cutoff:int = 28, threads_per_block = 64, max_registers = 63): \"\"\"",
"+ z[1].real**2 + z[1].imag**2) @jit_device def inner(left, right): return conj(left[0])*right[0] + conj(left[1])*right[1] @jit_device",
"the use of a rotating wave approximation, a technique used to get approximate",
"to use an analytic exponentiation method outside of spin half. Switching to a",
"matrix (:math:`\\\\tau` above). \"\"\" def __init__(self, spin_quantum_number, device, threads_per_block): \"\"\" Parameters ---------- spin_quantum_number",
"\\\\end{pmatrix} \\\\end{align*} with :math:`r = \\\\sqrt{x^2 + y^2 + z^2}`. Parameters: * **field_sample**",
"None try: self.compile_time_evolver(get_field, spin_quantum_number, device, use_rotating_frame, integration_method, exponentiation_method, trotter_cutoff, threads_per_block, max_registers) except: print(\"\\033[31mspinsim",
"with field functions that use the rotating wave approximation in the rotating frame.",
"dimension), dtype = np.complex128) elif device_index == 2: time_evolution_old_group = roc.shared.array((threads_per_block, dimension, dimension),",
"get_time_evolution_loop(time_index, time_coarse, time_step_output, time_step_integration, time_end_points, time_evolution_coarse, sweep_parameter) return @jit_host(\"(complex128[:, :], float64[:, :])\", max_registers",
"results of the simulation. \"\"\" if math.fabs(time_step_output/time_step_integration - round(time_step_output/time_step_integration)) > 1e-6: print(f\"\\033[33mspinsim warning:",
"3, 3), dtype = np.complex128) # temporary = temporary_group[roc.get_local_id(1), :, :] # for",
"https://docs.nvidia.com/gameworks/content/developertools/desktop/analysis/report/cudaexperiments/kernellevel/achievedoccupancy.htm Parameters ---------- get_field : :obj:`callable` A python function that describes the field",
"&= ((A)_{x,y})^* \\\\end{align*} Matrix can be in :math:`\\\\mathbb{C}^{2\\\\times2}` or :math:`\\\\mathbb{C}^{3\\\\times3}`. Parameters: * **operator**",
"to get approximate analytic solutions of spin system dynamics. This is not done",
"rotating_wave_winding[0] @jit_host(\"(float64, float64[:], float64[:], float64, float64, complex128[:, :, :])\", max_registers) def get_time_evolution(sweep_parameter, time_coarse,",
"= 4**hyper_cube_amount # x = field_sample[0]/(2*precision) # y = field_sample[1]/(2*precision) # z =",
"on the GPU target devices :obj:`Device.CUDA` (:obj:`Device.ROC`). Defaults to 64. Modifying might be",
"a_i^2 + b_i^2\\\\right)} Parameters: * **z** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (index)) - The vector",
"The values of x, y and z (and q for spin one) respectively,",
"and more details. threads_per_block : :obj:`int` The size of each thread block (workgroup),",
"state[time_index, 0].real**2 + state[time_index, 0].imag**2 - state[time_index, 2].real**2 - state[time_index, 2].imag**2 return def",
"different GPU models. max_registers : :obj:`int` The maximum number of registers allocated per",
".. math:: \\\\begin{align*} \\\\langle F\\\\rangle(t) = \\\\begin{pmatrix} \\\\Re(\\\\sqrt{2}\\\\psi_{0}(t)^*(\\\\psi_{+1}(t) + \\\\psi_{-1}(t))\\\\\\\\ -\\\\Im(\\\\sqrt{2}\\\\psi_{0}(t)^*(\\\\psi_{+1}(t) - \\\\psi_{-1}(t))\\\\\\\\",
"cisz = math.cos(2*q/3) + 1j*math.sin(2*q/3) # result[0, 1] = cisz*(-sy - 1j*cy*sx)/sqrt2 #",
"and `Supported Numpy features`_ for compilable numpy features. \"\"\" CPU = (\"cpu\", 0)",
"or GPU. Defaults to :obj:`Device.CUDA` if the system it is being run on",
"For spin half: .. math:: \\\\begin{align*} \\\\langle F\\\\rangle(t) = \\\\begin{pmatrix} \\\\Re(\\\\psi_{+\\\\frac{1}{2}}(t)\\\\psi_{-\\\\frac{1}{2}}(t)^*)\\\\\\\\ -\\\\Im(\\\\psi_{+\\\\frac{1}{2}}(t)\\\\psi_{-\\\\frac{1}{2}}(t)^*)\\\\\\\\ \\\\frac{1}{2}(|\\\\psi_{+\\\\frac{1}{2}}(t)|^2",
"result) result[0, 0] += 1 result[1, 1] += 1 # @jit_device # def",
"= 0 operator[0, 1] = 0 operator[1, 1] = 1 operator[2, 1] =",
"of the simulation. \"\"\" if math.fabs(time_step_output/time_step_integration - round(time_step_output/time_step_integration)) > 1e-6: print(f\"\\033[33mspinsim warning: time_step_output",
"extra register per thread is always added to the number specified for control,",
"spin_quantum_number, device, threads_per_block): \"\"\" Parameters ---------- spin_quantum_number : :obj:`SpinQuantumNumber` The option to select",
"left[1, 1]*right[1, 0] + left[1, 2]*right[2, 0] result[2, 0] = left[2, 0]*right[0, 0]",
"max_registers : :obj:`int` The maximum number of registers allocated per thread when using",
":], float64, float64, complex128[:])\") def append_exponentiation_integration_magnus_cf4(time_evolution_fine, time_evolution_coarse, field_sample, time_step_integration, rotating_wave, rotating_wave_winding): transform_frame(field_sample[0, :],",
"over dressing frequencies during the simulations that `spinsim` was designed for. * **field_sample**",
"1j*math.sin(math.tau*rotating_wave*time_step_output) time_evolution_coarse[time_index, 0, 0] /= rotating_wave_winding[0] time_evolution_coarse[time_index, 0, 1] /= rotating_wave_winding[0] if dimension",
"\\\\psi_{-1}(t))\\\\\\\\ -\\\\Im(\\\\sqrt{2}\\\\psi_{0}(t)^*(\\\\psi_{+1}(t) - \\\\psi_{-1}(t))\\\\\\\\ |\\\\psi_{+1}(t)|^2 - |\\\\psi_{-1}(t)|^2 \\\\end{pmatrix} \\\\end{align*} Parameters ---------- state :",
"operator[0, 0] result[1, 0] = operator[1, 0] result[0, 1] = operator[0, 1] result[1,",
"time_end, time_step_integration, time_step_output, state_init): \"\"\" Integrates the time dependent Schroedinger equation and returns",
"transform_frame_lab(field_sample, rotating_wave, rotating_wave_winding): return transform_frame = transform_frame_lab get_field_jit = jit_device(get_field) if integration_method ==",
"there will be an optimal value of `max_registers` for each model of GPU",
"J_x) \\\\exp(-i(2^{-\\\\tau} y) J_y) \\\\exp(-i(2^{-\\\\tau} z) J_z)^{2^\\\\tau}\\\\\\\\ &= \\\\begin{pmatrix} (c_Xc_Y - is_Xs_Y) e^{-iZ}",
"for slow recompilation. For example, if the `sweep_parameter` is used to define the",
"complex number. .. math:: \\\\begin{align*} |a + ib| &= \\\\sqrt{a^2 + b^2}\\\\\\\\ a,",
"with .. math:: \\\\begin{align*} J_x &= \\\\frac{1}{2}\\\\begin{pmatrix} 0 & 1 \\\\\\\\ 1 &",
"LLVM compiler to compile the integrator to run on an Nvidia cuda compatible",
"product between two complex vectors. .. note:: The mathematics definition is used here",
"0 \\\\\\\\ 0 & 0 & 1 \\\\end{pmatrix} \\\\end{align*} Then the exponential can",
"the integrator. \"\"\" CPU_SINGLE = (\"cpu_single\", 0) \"\"\" Use the :func:`numba.jit()` LLVM compiler",
"== ExponentiationMethod.ANALYTIC) and (spin_quantum_number != SpinQuantumNumber.HALF): print(\"\\033[31mspinsim warning!!!\\n_attempting to use an analytic exponentiation",
"1 & 0 & 0 \\\\\\\\ 0 & 0 & 0 \\\\\\\\ 0",
"+ (2 + operator[1, 1])*operator[1, 2] + operator[1, 2]*operator[2, 2] result[2, 2] =",
"**result** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The matrix to copy to. set_to_one(operator)",
":ref:`architecture` for some information. \"\"\" for time_index in range(state.shape[0]): # State = time",
"for time_fine_index in range(math.floor(time_step_output/time_step_integration + 0.5)): get_field_integration(sweep_parameter, time_fine, time_coarse[time_index], time_step_integration, field_sample, rotating_wave, rotating_wave_winding)",
"# cy = math.cos(y) # sy = math.sin(y) # cisz = math.cos(z +",
"matrix to set to :math:`0`. matrix_multiply(left, right, result) : :obj:`callable` Multiply matrices left",
"of a system. Parameters ---------- value : :obj:`float` The numerical value of the",
"def get_field_integration_magnus_cf4(sweep_parameter, time_fine, time_coarse, time_step_integration, field_sample, rotating_wave, rotating_wave_winding): time_sample = ((time_fine + 0.5*time_step_integration*(1",
"functional! \"\"\" class Results: \"\"\" The results of a an evaluation of the",
"Parameters ---------- sweep_parameter : :obj:`float` time_coarse : :class:`numpy.ndarray` of :class:`numpy.float64` (time_index) A coarse",
"# temporary = cuda.local.array((2, 2), dtype = np.complex128) # elif device_index == 2:",
"in parallel. .. note :: To use this device option, the user defined",
"- iz J_z)\\\\\\\\ &= \\\\exp(2^{-\\\\tau}(-ix J_x - iy J_y - iz J_z))^{2^\\\\tau}\\\\\\\\ &\\\\approx",
"compilable. See `Supported CUDA Python features`_ for compilable python features. \"\"\" ROC =",
"less occupancy. Lowering the value increases GPU occupancy, meaning more threads run concurrently,",
"for archiving. \"\"\" def __init__(self, value, dimension, label): super().__init__() self._value_ = value self.dimension",
":obj:`int` The maximum number of registers allocated per thread when using :obj:`Device.CUDA` as",
"function must be :func:`numba.jit()` compilable. See `Supported Python features`_ for compilable python features,",
"the z direction. spin : :obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index, spatial_direction) The expected spin",
"time_sample = ((time_fine + 0.5*time_step_integration*(1 + 1/sqrt3)) - time_coarse) rotating_wave_winding[1] = math.cos(math.tau*rotating_wave*time_sample) +",
"2), dtype = np.complex128) elif device_index == 1: temporary = cuda.local.array((2, 2), dtype",
"= spin_calculator def evaluate(self, sweep_parameter, time_start, time_end, time_step_integration, time_step_output, state_init): \"\"\" Integrates the",
"1j*math.sin(math.tau*rotating_wave*time_sample) time_sample += time_coarse get_field_jit(time_sample, sweep_parameter, field_sample[0, :]) time_sample = ((time_fine + 0.5*time_step_integration*(1",
"get_time_evolution_loop(time_index, time_coarse, time_step_output, time_step_integration, time_end_points, time_evolution_coarse, sweep_parameter) elif device_index == 1: # Run",
"returned in result. .. math:: \\\\begin{align*} (LR)_{i,k} = \\\\sum_j (L)_{i,j} (R)_{j,k} \\\\end{align*} Parameters:",
"= jit_device def jit_device_template(template): def jit_device_template(func): return roc.jit(template, device = True)(func) return jit_device_template",
":], complex128[:, :], float64[:, :], float64, float64, complex128[:])\") def append_exponentiation_integration_half_step(time_evolution_fine, time_evolution_coarse, field_sample, time_step_integration,",
"= conj(operator[1, 1]) result[2, 1] = conj(operator[1, 2]) result[0, 2] = conj(operator[2, 0])",
"- round(time_step_output/time_step_integration)) > 1e-6: print(f\"\\033[33mspinsim warning: time_step_output not an integer multiple of time_step_integration.",
"x, y and z respectively, as described above. * **result** (:class:`numpy.ndarray` of :class:`numpy.complex128`,",
"1 result[1, 0] = Sa*ep result[0, 1] = Sa/ep result[1, 1] = Ca*ez",
"device on object constrution. Parameters: * **sweep_parameter** (:obj:`float`) - The input to the",
"= roc.shared.array((threads_per_block, 2, 2), dtype = np.complex128) temporary = temporary_group[roc.get_local_id(1), :, :] for",
":obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index) The times that `state` was evaluated at. time_evolution :",
"threads_per_block get_spin[blocks_per_grid, threads_per_block](roc.to_device(state), spin) spin = spin.copy_to_host() return spin self.get_time_evolution_raw = get_time_evolution self.spin_calculator",
"0 operator[0, 1] = 0 operator[1, 1] = 1 @jit_device def set_to_zero(operator): operator[0,",
"@jit_device # def matrix_exponential_lie_trotter(field_sample, result, trotter_cutoff): # hyper_cube_amount = math.ceil(trotter_cutoff/2) # if hyper_cube_amount",
"made, and the output state in given out of the rotating frame. One",
"= spin.copy_to_host() elif device == Device.ROC: spin = roc.device_array((state.shape[0], 3), np.float64) blocks_per_grid =",
"time, time_end_points, time_step_integration, time_step_output, time_evolution_coarse) except: print(\"\\033[31mspinsim error: numba.cuda could not jit get_field",
"method to use in the integration. Defaults to :obj:`IntegrationMethod.MAGNUS_CF4`. See :obj:`IntegrationMethod` for more",
"* **z** (:class:`numpy.complex128`) - The complex number to take the conjugate of. Returns",
"able to increase execution time for different GPU models. max_registers : :obj:`int` The",
"= (1.5 - sqrt3)/6 field_sample[2, 0] = math.tau*time_step_integration*(w0*field_sample[0, 0] + w1*field_sample[1, 0]) field_sample[2,",
"Make a matrix the additive identity, ie, :math:`0`. .. math:: \\\\begin{align*} (A)_{i, j}",
"spin projection (Bloch vector) over time. This is calculated just in time using",
":], rotating_wave, rotating_wave_winding[0]) transform_frame(field_sample[1, :], rotating_wave, rotating_wave_winding[1]) field_sample[2, 0] = math.tau*time_step_integration*field_sample[0, 0]/2 field_sample[2,",
"in the lab frame, for each time sampled. Units of :math:`\\\\hbar`. This is",
"multiple times, each time varying `sweep_parameter`. * **time_coarse** (:obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index)) -",
"sampled. See :math:`U(t)` in :ref:`overview_of_simulation_method`. This is an output, so use an empty",
"time_evolution_coarse = cuda.device_array((time_index_max, self.spin_quantum_number.dimension, self.spin_quantum_number.dimension), np.complex128) blocks_per_grid = (time.size + (self.threads_per_block - 1))",
":] for power_index in range(hyper_cube_amount): matrix_square_residual(result, temporary) matrix_square_residual(temporary, result) result[0, 0] += 1",
"math.cos(z - q/3) + 1j*math.sin(z - q/3) # result[0, 2] = 0.5*cisz*(cx -",
"= cisz*(-1j*sx + cx*sy)/sqrt2 # result[2, 0] = 0.5*cisz*(cx - cy - 1j*sx*sy)",
"**For spin one systems** Assumes the exponent is an imaginary linear combination of",
"\\\\\\\\ 0 & -1 \\\\end{pmatrix} \\\\end{align*} Then the exponential can be calculated as",
"time sampled. See :math:`U(t)` in :ref:`overview_of_simulation_method`. This is an output, so use an",
"exponentiation method outside of spin half. Switching to a Lie Trotter method.\\033[0m\") exponentiation_method",
"on its analytic form. .. warning:: Only available for use with spin half",
"sweep_parameter) elif device_index == 1: # Run calculation for each coarse timestep in",
"- time_coarse) rotating_wave_winding[1] = math.cos(math.tau*rotating_wave*time_sample) + 1j*math.sin(math.tau*rotating_wave*time_sample) time_sample += time_coarse get_field_jit(time_sample, sweep_parameter, field_sample[1,",
"time_sample = time_fine - time_coarse rotating_wave_winding[0] = math.cos(math.tau*rotating_wave*time_sample) + 1j*math.sin(math.tau*rotating_wave*time_sample) time_sample += time_coarse",
"- cx*sy)/sqrt2 # result[2, 2] = 0.5*cisz*(cx + cy + 1j*sx*sy) # if",
"rotating_wave, rotating_wave_winding[0]) field_sample[0, 0] = math.tau*time_step_integration*field_sample[0, 0] field_sample[0, 1] = math.tau*time_step_integration*field_sample[0, 1] field_sample[0,",
"0) \"\"\" Analytic expression of the matrix exponential. For spin half :obj:`SpinQuantumNumber.HALF` systems",
"+ 1j*math.sin(2*q/3) # result[0, 1] = cisz*(-sy - 1j*cy*sx)/sqrt2 # result[1, 1] =",
"self.time_evolution = time_evolution self.state = state self.spin_calculator = spin_calculator def __getattr__(self, attr_name): if",
"2]) @jit_device def matrix_exponential_analytic(field_sample, result, trotter_cutoff): pass @jit_device def matrix_exponential_lie_trotter(field_sample, result, trotter_cutoff): hyper_cube_amount",
"+ operator[1, 2]*operator[2, 2] result[2, 2] = operator[2, 0]*operator[0, 2] + operator[2, 1]*operator[1,",
"the eigenstates of the spin projection operator in the z direction. spin :",
"+ \\\\frac{Q}{3}\\\\right)}(c_X + c_Y + i s_Xs_Y)}{2} \\\\end{pmatrix}^{2^\\\\tau}\\\\\\\\ &= T^{2^\\\\tau}, \\\\end{align*} with ..",
"be used for archiving. index : :obj:`int` A reference number, used when compiling",
"if device_index == 0: temporary = np.empty((3, 3), dtype = np.complex128) elif device_index",
"use the rotating wave approximation in the rotating frame. integration_method : :obj:`IntegrationMethod` Which",
"set_to = utilities.set_to set_to_one = utilities.set_to_one set_to_zero = utilities.set_to_zero matrix_multiply = utilities.matrix_multiply adjoint",
"0] = 0 operator[0, 1] = 0 operator[1, 1] = 1 @jit_device def",
"2] + w1*field_sample[1, 2]) if dimension > 2: field_sample[2, 3] = math.tau*time_step_integration*(w0*field_sample[0, 3]",
"(cx*cy - 1j*sx*sy)/cisz # result[1, 0] = (cx*sy -1j*sx*cy)/cisz # result[0, 1] =",
"spin self.get_time_evolution_raw = get_time_evolution self.spin_calculator = spin_calculator def evaluate(self, sweep_parameter, time_start, time_end, time_step_integration,",
"able to increase execution time for different GPU models. \"\"\" jit_device = device.jit_device",
"0] *= rotating_wave_winding[0] time_evolution_coarse[time_index, 2, 1] *= rotating_wave_winding[0] time_evolution_coarse[time_index, 2, 2] *= rotating_wave_winding[0]",
"# ez = field_sample[2]/(2*precision) # ez = 1 + 1j*ez # eq =",
"roc device function.\\033[0m\\n\") raise time_evolution_coarse = time_evolution_coarse.copy_to_host() time = time.copy_to_host() state = np.empty((time_index_max,",
"level systems. \"\"\" class IntegrationMethod(Enum): \"\"\" Options for describing which method is used",
"y J_y + z J_z), \\\\end{align*} with .. math:: \\\\begin{align*} J_x &= \\\\frac{1}{2}\\\\begin{pmatrix}",
"& -i \\\\\\\\ 0 & i & 0 \\\\end{pmatrix},\\\\\\\\ J_z &= \\\\begin{pmatrix} 1",
"- 1, z_index] else: state[time_index, x_index] += state_init[x_index] sqrt2 = math.sqrt(2) sqrt3 =",
"than are available for the GPU model, the GPU must run fewer threads",
"\"\"\" CPU = (\"cpu\", 0) \"\"\" Use the :func:`numba.jit()` LLVM compiler to compile",
"Nvidia cuda compatible GPU, in parallel. .. note :: To use this device",
"imaginary linear combination of :math:`\\\\mathfrak{su}(2)`, being, .. math:: \\\\begin{align*} A &= -i(x J_x",
"1j*sx*sy) # if device_index == 0: # temporary = np.empty((3, 3), dtype =",
":obj:`Device.CUDA` if the system it is being run on is Nvidia Cuda compatible,",
"- The vector to left multiply in the inner product. * **right** (:class:`numpy.ndarray`",
"conj(operator[1, 1]) result[2, 1] = conj(operator[1, 2]) result[0, 2] = conj(operator[2, 0]) result[1,",
"= device self.get_time_evolution_raw = None self.get_spin_raw = None try: self.compile_time_evolver(get_field, spin_quantum_number, device, use_rotating_frame,",
"operator to 1 set_to_one(time_evolution_coarse[time_index, :]) field_sample[0, 2] = 0 if use_rotating_frame: time_sample =",
"if integration_method == IntegrationMethod.MAGNUS_CF4: @jit_device_template(\"(float64, float64, float64, float64, float64[:, :], float64, complex128[:])\") def",
"= roc.device_array((time_index_max, self.spin_quantum_number.dimension, self.spin_quantum_number.dimension), np.complex128) blocks_per_grid = (time.size + (self.threads_per_block - 1)) //",
"additive identity, ie, :math:`0`. .. math:: \\\\begin{align*} (A)_{i, j} = 0 \\\\end{align*} Parameters:",
"== 1: # temporary = cuda.local.array((3, 3), dtype = np.complex128) # elif device_index",
"for the state. Must be a whole number multiple of `time_step_integration`. Measured in",
"a rotating wave approximation, a technique used to get approximate analytic solutions of",
"of a rotating frame is commonly associated with the use of a rotating",
"conjugate of. Returns * **cz** (:class:`numpy.complex128`) - The conjugate of z. complex_abs(z) :",
"an empty :class:`numpy.ndarray` with :func:`numpy.empty()`, or declare a :class:`numpy.ndarray` using :func:`numba.cuda.device_array_like()`. \"\"\" if",
"be a whole number multiple of `time_step_integration`. Measured in s. state_init : :obj:`numpy.ndarray`",
"temporary, result) result[0, 0] += 1 result[1, 1] += 1 # @jit_device #",
"1] field_sample[0, 2] = math.tau*time_step_integration*field_sample[0, 2] if dimension > 2: field_sample[0, 3] =",
"result) : :obj:`callable` Calculates a :math:`\\\\mathfrak{su}(2)` matrix exponential based on its analytic form.",
"0]) result[1, 1] = conj(operator[1, 1]) result[2, 1] = conj(operator[1, 2]) result[0, 2]",
"was evaluated at. time_evolution : :obj:`numpy.ndarray` of :obj:`numpy.float128` (time_index, y_index, x_index) The evaluated",
"spin half systems. Will not work with spin one systems. Assumes the exponent",
"the simulator will integrate a spin half :obj:`SpinQuantumNumber.HALF`, or spin one :obj:`SpinQuantumNumber.ONE` quantum",
"else: ep = 1 a = a/precision Ca = math.cos(a/2) Sa = -1j*math.sin(a/2)",
"2] = math.tau*time_step_integration*(w1*field_sample[0, 2] + w0*field_sample[1, 2]) if dimension > 2: field_sample[2, 3]",
"fine step for time_fine_index in range(math.floor(time_step_output/time_step_integration + 0.5)): get_field_integration(sweep_parameter, time_fine, time_coarse[time_index], time_step_integration, field_sample,",
"time_evolution[time_index - 1, x_index, z_index]*state[time_index - 1, z_index] else: state[time_index, x_index] += state_init[x_index]",
"dtype = np.complex128) elif device_index == 2: time_evolution_fine_group = roc.shared.array((threads_per_block, dimension, dimension), dtype",
"1] = left[1, 0]*right[0, 1] + left[1, 1]*right[1, 1] @jit_device def matrix_square_residual(operator, result):",
"time_evolution_fine, trotter_cutoff) # Premultiply to the exitsing time evolution operator set_to(time_evolution_coarse, time_evolution_old) matrix_multiply(time_evolution_fine,",
"interpreted code for the integrator, ie, don't compile the integrator. \"\"\" CPU_SINGLE =",
"terms of the number of threads (workitems) they each contain, when running on",
"will be targeted for integration. That is, whether the integrator is compiled for",
"spin : :obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index, spatial_direction) The expected spin projection (Bloch vector)",
"1j*cy*sx)/sqrt2 # result[1, 1] = cisz*cx*cy # result[2, 1] = cisz*(sy - 1j*cy*sx)/sqrt2",
"sample_index_end), dtype = np.complex128) rotating_wave_winding = rotating_wave_winding_group[roc.get_local_id(1), :] time_coarse[time_index] = time_end_points[0] + time_step_output*time_index",
"# temporary = np.empty((2, 2), dtype = np.complex128) # elif device_index == 1:",
"+ operator[0, 0])*operator[0, 1] + operator[0, 1]*operator[1, 1] + operator[0, 2]*operator[2, 1] result[1,",
"+ ib\\|_2 = \\\\sqrt {\\\\left(\\\\sum_i a_i^2 + b_i^2\\\\right)} Parameters: * **z** (:class:`numpy.ndarray` of",
"# Run calculation for each coarse timestep in parallel time_index = cuda.grid(1) if",
"0: ep = (field_sample[0] + 1j*field_sample[1])/a else: ep = 1 a = a/precision",
"\\\\to \\\\infty} \\\\left(\\\\exp\\\\left(\\\\frac{1}{c}A\\\\right) \\\\exp\\\\left(\\\\frac{1}{c}B\\\\right)\\\\right)^c. **For spin half systems:** Assumes the exponent is an",
"spin = np.empty((state.shape[0], 3), np.float64) get_spin(state, spin) elif device == Device.CUDA: spin =",
"state of the spin system, written in terms of the eigenstates of the",
"@jit_device def norm2(z): return math.sqrt(z[0].real**2 + z[0].imag**2 + z[1].real**2 + z[1].imag**2) @jit_device def",
"if time_index > 0: for z_index in range(state.shape[1]): state[time_index, x_index] += time_evolution[time_index -",
"* **state** (:obj:`numpy.ndarray` of :obj:`numpy.complex128` (time_index, magnetic_quantum_number)) - The quantum state of the",
"# y = field_sample[1]/(2*precision) # z = field_sample[2]/(2*precision) # cx = math.cos(x) #",
"- The matrix to copy from. * **result** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index))",
"= \"midpoint_sample\" \"\"\" Euler integration method. \"\"\" HALF_STEP = \"half_step\" \"\"\" Integration method",
"absolute value of z. norm2(z) : :obj:`callable` The 2 norm of a complex",
"* **result** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The matrix which the result",
"Schroedinger equation and returns the quantum state of the spin system over time.",
"+ w1*field_sample[1, 2]) if dimension > 2: field_sample[2, 3] = math.tau*time_step_integration*(w0*field_sample[0, 3] +",
"results = Results(time, time_evolution_coarse, state, self.spin_calculator) return results @staticmethod @nb.njit def get_state(state_init, state,",
"if the `spin` property is needed. Compiled for chosen device on object constrution.",
"slow recompilation. For example, if the `sweep_parameter` is used to define the bias",
"each time sampled. See :math:`\\\\psi(t)` in :ref:`overview_of_simulation_method`. spin : :class:`numpy.ndarray` of :class:`numpy.float64` (time_index,",
"&= \\\\frac{1}{2}2^{-\\\\tau}z,\\\\\\\\ c_{\\\\theta} &= \\\\cos(\\\\theta),\\\\\\\\ s_{\\\\theta} &= \\\\sin(\\\\theta). \\\\end{align*} **For spin one systems**",
"sampled. Units of :math:`\\\\hbar`. This is an output, so use an empty :class:`numpy.ndarray`",
"sweep_parameter, field_sample[0, :]) time_sample = time_fine + time_step_integration - time_coarse rotating_wave_winding[1] = math.cos(math.tau*rotating_wave*time_sample)",
"= 1 @jit_device def matrix_exponential_lie_trotter(field_sample, result, trotter_cutoff): hyper_cube_amount = math.ceil(trotter_cutoff/2) if hyper_cube_amount <",
"super().__init__() self._value_ = value self.index = index if value == \"python\": def jit_host(template,",
"= roc.shared.array((threads_per_block, sample_index_max, lie_dimension), dtype = np.float64) field_sample = field_sample_group[roc.get_local_id(1), :, :] rotating_wave_winding_group",
"+ operator[1, 2]*operator[2, 0] result[2, 0] = operator[2, 0]*operator[0, 0] + operator[2, 1]*operator[1,",
"jit_device_template(func): return nb.njit(template)(func) return jit_device_template self.jit_device_template = jit_device_template elif value == \"cuda\": def",
"norm2 = utilities.norm2 inner = utilities.inner set_to = utilities.set_to set_to_one = utilities.set_to_one set_to_zero",
"result[0, 0] = (2 + operator[0, 0])*operator[0, 0] + operator[0, 1]*operator[1, 0] result[1,",
"python features. \"\"\" ROC = (\"roc\", 2) \"\"\" Use the :func:`numba.roc.jit()` LLVM compiler",
"2] += 1 # @jit_device # def matrix_exponential_lie_trotter(field_sample, result, trotter_cutoff): # hyper_cube_amount =",
"the lab frame, for each time sampled. See :math:`\\\\psi(t)` in :ref:`overview_of_simulation_method`. spin :",
"\\\\begin{pmatrix} \\\\frac{e^{-i\\\\left(Z + \\\\frac{Q}{3}\\\\right)}(c_X + c_Y - i s_Xs_Y)}{2} & \\\\frac{e^{i\\\\frac{2Q}{3}} (-s_Y -i",
"= time.copy_to_host() elif self.device == Device.ROC: time = roc.device_array(time_index_max, np.float64) time_evolution_coarse = roc.device_array((time_index_max,",
"rotating_wave_winding): time_sample = time_fine - time_coarse rotating_wave_winding[0] = math.cos(math.tau*rotating_wave*time_sample) + 1j*math.sin(math.tau*rotating_wave*time_sample) time_sample +=",
"used for archiving. \"\"\" def __init__(self, value, dimension, label): super().__init__() self._value_ = value",
"projection operator in the z direction. Returns ------- results : :obj:`Results` An object",
"= operator[1, 0]*operator[0, 1] + (2 + operator[1, 1])*operator[1, 1] + operator[1, 2]*operator[2,",
"+ (2 + operator[1, 1])*operator[1, 0] + operator[1, 2]*operator[2, 0] result[2, 0] =",
"\\\\begin{pmatrix} \\\\cos(\\\\frac{r}{2}) - i\\\\frac{z}{r}\\\\sin(\\\\frac{r}{2}) & -\\\\frac{y + ix}{r}\\\\sin(\\\\frac{r}{2})\\\\\\\\ \\\\frac{y - ix}{r}\\\\sin(\\\\frac{r}{2}) & \\\\cos(\\\\frac{r}{2})",
"jit_host(func): return nb.njit(template)(func) return jit_host self.jit_host = jit_host def jit_device(func): return nb.njit()(func) self.jit_device",
"GPU, in parallel. .. note :: To use this device option, the user",
"\\\\\\\\ i & 0 & -i \\\\\\\\ 0 & i & 0 \\\\end{pmatrix},\\\\\\\\",
"resultant adjoint to. matrix_exponential_analytic(field_sample, result) : :obj:`callable` Calculates a :math:`\\\\mathfrak{su}(2)` matrix exponential based",
"1] result[0, 2] = left[0, 0]*right[0, 2] + left[0, 1]*right[1, 2] + left[0,",
"time_coarse.size: get_time_evolution_loop(time_index, time_coarse, time_step_output, time_step_integration, time_end_points, time_evolution_coarse, sweep_parameter) return @jit_host(\"(complex128[:, :], float64[:, :])\",",
"The vector to right multiply in the inner product. Returns * **d** (:class:`numpy.complex128`)",
"device_index == 0: # temporary = np.empty((2, 2), dtype = np.complex128) # elif",
"units of s. * **simulation_index** (:obj:`int`) - a parameter that can be swept",
": :obj:`str` A text label that can be used for archiving. index :",
"\\\\end{align*} Parameters: * **z** (:class:`numpy.complex128`) - The complex number to take the absolute",
"each specific GPU model. This means that if more registers are allocated than",
"result[1, 0] = Sa*ep result[0, 1] = Sa/ep result[1, 1] = Ca*ez -",
"float64, float64[:, :], float64, complex128[:])\") def get_field_integration_midpoint(sweep_parameter, time_fine, time_coarse, time_step_integration, field_sample, rotating_wave, rotating_wave_winding):",
"GPU models. device : :obj:`Device` The option to select which device will be",
"float64, float64, complex128[:])\") def append_exponentiation_integration_midpoint(time_evolution_fine, time_evolution_coarse, field_sample, time_step_integration, rotating_wave, rotating_wave_winding): transform_frame(field_sample[0, :], rotating_wave,",
"error: numba.roc could not jit get_field function into a roc device function.\\033[0m\\n\") raise",
"construction of the object. Attributes ---------- conj(z) : :obj:`callable` Conjugate of a complex",
"= left[2, 0]*right[0, 2] + left[2, 1]*right[1, 2] + left[2, 2]*right[2, 2] @jit_device",
":obj:`True`, the integrator moves into a frame rotating in the z axis by",
"function.\\033[0m\\n\") raise time_evolution_coarse = time_evolution_coarse.copy_to_host() time = time.copy_to_host() elif self.device == Device.ROC: time",
"+ \\\\frac{Q}{3}\\\\right)}(c_X - c_Y - i s_Xs_Y)}{2} & \\\\frac{e^{i\\\\frac{2Q}{3}} (s_Y -i c_Y s_X)}{\\\\sqrt{2}}",
"time_evolution_coarse, sweep_parameter): # Declare variables if device_index == 0: time_evolution_fine = np.empty((dimension, dimension),",
"the spin quantum number. dimension : :obj:`int` Dimension of the hilbert space the",
"result): result[0, 0] = (2 + operator[0, 0])*operator[0, 0] + operator[0, 1]*operator[1, 0]",
"= Device.CUDA else: device = Device.CPU self.threads_per_block = threads_per_block self.spin_quantum_number = spin_quantum_number self.device",
"self.get_state(state_init, state, time_evolution_coarse) results = Results(time, time_evolution_coarse, state, self.spin_calculator) return results @staticmethod @nb.njit",
"when `spin_quantum_number` is set to :obj:`SpinQuantumNumber.HALF`. See :obj:`ExponentiationMethod` for more details. use_rotating_frame :",
"-i & 0 \\\\\\\\ i & 0 & -i \\\\\\\\ 0 & i",
"1j*math.sin(2*q/3) # result[0, 1] = cisz*(-sy - 1j*cy*sx)/sqrt2 # result[1, 1] = cisz*cx*cy",
"+ field_sample[1]*field_sample[1]) if a > 0: ep = (field_sample[0] + 1j*field_sample[1])/a else: ep",
"they each contain, when running on the GPU target devices :obj:`Device.CUDA` (:obj:`Device.ROC`). Defaults",
"an Nvidia cuda compatible GPU, in parallel. .. note :: To use this",
"example), and the fourth entry being the amplitude of the quadratic shift (only",
"+ operator[0, 0])*operator[0, 0] + operator[0, 1]*operator[1, 0] + operator[0, 2]*operator[2, 0] result[1,",
"model a magnetic field, for example), and the fourth entry being the amplitude",
"def __init__(self, value, dimension, label): super().__init__() self._value_ = value self.dimension = dimension self.label",
"(a + ib)^* &= a - ib\\\\\\\\ a, b &\\\\in \\\\mathbb{R} \\\\end{align*} Parameters:",
"available for the GPU model, the GPU must run fewer threads concurrently than",
"threads_per_block get_spin[blocks_per_grid, threads_per_block](cuda.to_device(state), spin) spin = spin.copy_to_host() elif device == Device.ROC: spin =",
"dimension = spin_quantum_number.dimension lie_dimension = dimension + 1 # utility_set = spin_quantum_number.utility_set if",
"of one matrix into another. .. math:: (A)_{i, j} = (B)_{i, j} Parameters:",
"(2 + operator[0, 0])*operator[0, 2] + operator[0, 1]*operator[1, 2] + operator[0, 2]*operator[2, 2]",
"field_sample[3]/(6*precision) # eq = 1 + 1j*eq result[0, 0] = (Ca/(eq*ez))*(Ca/(eq*ez)) - 1",
"print(f\"\\033[33mspinsim warning: time_step_output not an integer multiple of time_step_integration. Resetting time_step_integration to {time_step_output/round(time_step_output/time_step_integration):8.4e}.\\033[0m\\n\")",
"Trotter method.\\033[0m\") exponentiation_method = ExponentiationMethod.LIE_TROTTER exponentiation_method_index = 1 @jit_device_template(\"(float64[:], complex128[:, :], complex128[:, :])\")",
"copy to. set_to_one(operator) : :obj:`callable` Make a matrix the multiplicative identity, ie, :math:`1`.",
"+ operator[2, 2])*operator[2, 0] result[0, 1] = (2 + operator[0, 0])*operator[0, 1] +",
"28, threads_per_block = 64, max_registers = 63): \"\"\" Compiles the integrator and spin",
"\\\\sqrt {\\\\left(\\\\sum_i a_i^2 + b_i^2\\\\right)} Parameters: * **z** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (index)) -",
"get_field_integration_midpoint append_exponentiation_integration = append_exponentiation_integration_midpoint @jit_device_template(\"(int64, float64[:], float64, float64, float64[:], complex128[:, :, :], float64)\")",
"= operator[1, 0] result[0, 1] = operator[0, 1] result[1, 1] = operator[1, 1]",
"being x, y, z spatial directions (to model a magnetic field, for example),",
"0])*operator[0, 0] + operator[0, 1]*operator[1, 0] + operator[0, 2]*operator[2, 0] result[1, 0] =",
"recursively squared :math:`\\\\tau` times to obtain :math:`\\\\exp(A)`. Parameters: * **field_sample** (:class:`numpy.ndarray` of :class:`numpy.float64`,",
"used here rather than the physics definition, so the left vector is conjugated.",
"interpreted. \"\"\" def __init__(self, value, index): super().__init__() self._value_ = value self.index = index",
"rotating frame rotating_wave_winding[0] = math.cos(math.tau*rotating_wave*time_step_output) + 1j*math.sin(math.tau*rotating_wave*time_step_output) time_evolution_coarse[time_index, 0, 0] /= rotating_wave_winding[0] time_evolution_coarse[time_index,",
"result[1, 1] = left[1, 0]*right[0, 1] + left[1, 1]*right[1, 1] + left[1, 2]*right[2,",
"for more information and links. spin_quantum_number : :obj:`SpinQuantumNumber` The option to select whether",
"self.jit_host = jit_host def jit_device(func): return func self.jit_device = jit_device def jit_device_template(template): def",
"inner product of two orthogonal vectors is 0. .. math:: \\\\begin{align*} l \\\\cdot",
"device option, the user defined field function must be :func:`numba.cuda.jit()` compilable. See `Supported",
"integrate a spin half :obj:`SpinQuantumNumber.HALF`, or spin one :obj:`SpinQuantumNumber.ONE` quantum system. device :",
"+ z[0].imag**2 + z[1].real**2 + z[1].imag**2 + z[2].real**2 + z[2].imag**2) @jit_device def cross(left,",
"rotating_wave_winding[1]) w0 = (1.5 + sqrt3)/6 w1 = (1.5 - sqrt3)/6 field_sample[2, 0]",
"label : :obj:`str` A text label that can be used for archiving. \"\"\"",
"* **spin** (:obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index, spatial_direction)) - The expected spin projection (Bloch",
"== 2: rotating_wave /= 2 # For every fine step for time_fine_index in",
"\"\"\" Options for the spin quantum number of a system. Parameters ---------- value",
":obj:`callable` Multiply matrices left and right together, to be returned in result. ..",
"adjoint of. * **result** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - An array to",
"operator[2, 2] = 0 @jit_device def matrix_multiply(left, right, result): result[0, 0] = left[0,",
"The matrix to set to :math:`1`. set_to_zero(operator) : :obj:`callable` Make a matrix the",
"None, use_rotating_frame = True, integration_method = IntegrationMethod.MAGNUS_CF4, trotter_cutoff = 32, threads_per_block = 64,",
"of. Returns * **nz** (:class:`numpy.float64`) - The 2 norm of z. inner(left, right)",
"+ left[1, 1]*right[1, 1] + left[1, 2]*right[2, 1] result[2, 1] = left[2, 0]*right[0,",
"c_Y + i s_Xs_Y)}{2} \\\\end{pmatrix}^{2^\\\\tau}\\\\\\\\ &= T^{2^\\\\tau}, \\\\end{align*} with .. math:: \\\\begin{align*} X",
"0] *= rotating_wave_winding[0] time_evolution_coarse[time_index, 1, 1] *= rotating_wave_winding[0] @jit_host(\"(float64, float64[:], float64[:], float64, float64,",
"conjugated. Thus the inner product of two orthogonal vectors is 0. .. math::",
": :class:`numpy.ndarray` of :class:`numpy.float64` (time_index) A coarse grained list of time samples that",
"J_z)\\\\\\\\ &= \\\\begin{pmatrix} \\\\cos(\\\\frac{r}{2}) - i\\\\frac{z}{r}\\\\sin(\\\\frac{r}{2}) & -\\\\frac{y + ix}{r}\\\\sin(\\\\frac{r}{2})\\\\\\\\ \\\\frac{y - ix}{r}\\\\sin(\\\\frac{r}{2})",
"result[1, 0] = left[1, 0]*right[0, 0] + left[1, 1]*right[1, 0] + left[1, 2]*right[2,",
"{}.\".format(self, attr_name)) class Simulator: \"\"\" Attributes ---------- spin_quantum_number : :obj:`SpinQuantumNumber` The option to",
"result) : :obj:`callable` Copy the contents of one matrix into another. .. math::",
"= np.complex128) field_sample = cuda.local.array((sample_index_max, lie_dimension), dtype = np.float64) rotating_wave_winding = cuda.local.array(sample_index_end, dtype",
"&= \\\\frac{1}{\\\\sqrt{2}}\\\\begin{pmatrix} 0 & 1 & 0 \\\\\\\\ 1 & 0 & 1",
"run concurrently, at the expense of fewer resgiters being avaliable to each thread,",
"sample_index_end = 4 elif integration_method == IntegrationMethod.MIDPOINT_SAMPLE: sample_index_max = 1 sample_index_end = 1",
"z.imag**2) if spin_quantum_number == SpinQuantumNumber.HALF: @jit_device def norm2(z): return math.sqrt(z[0].real**2 + z[0].imag**2 +",
"within the integrator. Parameters ---------- value : :obj:`str` A text label that can",
"= norm2 self.inner = inner self.set_to = set_to self.set_to_one = set_to_one self.set_to_zero =",
"direction. This removes the (possibly large) z component of the field, which increases",
"time_step_integration, field_sample, rotating_wave, rotating_wave_winding): time_sample = time_fine - time_coarse rotating_wave_winding[0] = math.cos(math.tau*rotating_wave*time_sample) +",
"stepwise time evolution operators in succession to find the quantum state timeseries of",
"# result[2, 0] = 0.5*cisz*(cx - cy - 1j*sx*sy) # cisz = math.cos(2*q/3)",
"For spin one: .. math:: \\\\begin{align*} \\\\langle F\\\\rangle(t) = \\\\begin{pmatrix} \\\\Re(\\\\sqrt{2}\\\\psi_{0}(t)^*(\\\\psi_{+1}(t) + \\\\psi_{-1}(t))\\\\\\\\",
"cy + 1j*sx*sy) # if device_index == 0: # temporary = np.empty((3, 3),",
"time_evolution_old = np.empty((dimension, dimension), dtype = np.complex128) elif device_index == 1: time_evolution_old =",
"- \\\\psi_{-1}(t))\\\\\\\\ |\\\\psi_{+1}(t)|^2 - |\\\\psi_{-1}(t)|^2 \\\\end{pmatrix} \\\\end{align*} Parameters ---------- state : :class:`numpy.ndarray` of",
"= 0 operator[2, 2] = 0 @jit_device def matrix_multiply(left, right, result): result[0, 0]",
"|\\\\psi_{-1}(t)|^2 \\\\end{pmatrix} \\\\end{align*} Parameters ---------- state : :class:`numpy.ndarray` of :class:`numpy.complex128` (time_index, state_index) The",
"time_evolution_fine_group[roc.get_local_id(1), :, :] field_sample_group = roc.shared.array((threads_per_block, sample_index_max, lie_dimension), dtype = np.float64) field_sample =",
"> 1e-6: print(f\"\\033[33mspinsim warning: time_step_output not an integer multiple of time_step_integration. Resetting time_step_integration",
"z component of the field, which increases the accuracy of the output since",
"the system it is being run on is Nvidia Cuda compatible, and defaults",
"left[0, 1]*right[1, 2] + left[0, 2]*right[2, 2] result[1, 2] = left[1, 0]*right[0, 2]",
"for x_index in nb.prange(state.shape[1]): state[time_index, x_index] = 0 if time_index > 0: for",
"temporary, result) else: @jit_device def norm2(z): return math.sqrt(z[0].real**2 + z[0].imag**2 + z[1].real**2 +",
"one extra register per thread is always added to the number specified for",
"1: temporary = cuda.local.array((2, 2), dtype = np.complex128) elif device_index == 2: temporary_group",
"of the spin quantum number. dimension : :obj:`int` Dimension of the hilbert space",
"use of a rotating frame is commonly associated with the use of a",
"time_index = roc.get_global_id(1) if time_index < time_coarse.size: get_time_evolution_loop(time_index, time_coarse, time_step_output, time_step_integration, time_end_points, time_evolution_coarse,",
"+= state_init[x_index] sqrt2 = math.sqrt(2) sqrt3 = math.sqrt(3) machine_epsilon = np.finfo(np.float64).eps*1000 class Utilities:",
"(0) or end time (1)) The time values for when the experiment is",
"q J_q), \\\\end{align*} with .. math:: \\\\begin{align*} J_x &= \\\\frac{1}{\\\\sqrt{2}}\\\\begin{pmatrix} 0 & 1",
"spatial_direction)) - The expected spin projection (Bloch vector) over time. \"\"\" def __init__(self,",
"Used to calculate `spin` the first time it is referenced by the user.",
"2, 0] *= rotating_wave_winding[0] time_evolution_coarse[time_index, 2, 1] *= rotating_wave_winding[0] time_evolution_coarse[time_index, 2, 2] *=",
"j} Parameters: * **operator** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The matrix to",
"+ ib| &= \\\\sqrt{a^2 + b^2}\\\\\\\\ a, b &\\\\in \\\\mathbb{R} \\\\end{align*} Parameters: *",
"information. spin_calculator : :obj:`callable` Calculates the expected spin projection (Bloch vector) over time",
"with :func:`numpy.empty()`, or declare a :class:`numpy.ndarray` using :func:`numba.cuda.device_array_like()`. time_end_points : :class:`numpy.ndarray` of :class:`numpy.float64`",
"in the z direction. spin_calculator : :obj:`callable` Calculates the expected spin projection (Bloch",
"by. * **result** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - A matrix to be",
"\"\"\" ROC = (\"roc\", 2) \"\"\" Use the :func:`numba.roc.jit()` LLVM compiler to compile",
"= 0 result[0, 1] = 0 result[1, 1] = 1 @jit_device def matrix_exponential_lie_trotter(field_sample,",
"= Device.CPU self.threads_per_block = threads_per_block self.spin_quantum_number = spin_quantum_number self.device = device self.get_time_evolution_raw =",
"1])).real spin[time_index, 1] = (1j*state[time_index, 0]*conj(state[time_index, 1])).real spin[time_index, 2] = 0.5*(state[time_index, 0].real**2 +",
"state of the spin system over time. Parameters ---------- sweep_parameter : :obj:`float` The",
":obj:`callable` is passed to the :obj:`Results` object returned from :func:`Simulator.evaluate()`, and is executed",
"= conj(operator[1, 2]) result[0, 2] = conj(operator[2, 0]) result[1, 2] = conj(operator[2, 1])",
"def transform_frame_spin_one_rotating(field_sample, rotating_wave, rotating_wave_winding): X = (field_sample[0] + 1j*field_sample[1])/rotating_wave_winding field_sample[0] = X.real field_sample[1]",
"parallel = True)(func) return jit_host self.jit_host = jit_host def jit_device(func): return nb.njit()(func) self.jit_device",
"rotating_wave_winding): X = (field_sample[0] + 1j*field_sample[1])/(rotating_wave_winding**2) field_sample[0] = X.real field_sample[1] = X.imag field_sample[2]",
"for some information. \"\"\" for time_index in range(state.shape[0]): # State = time evolution",
"= time_coarse[time_index] # Initialise time evolution operator to 1 set_to_one(time_evolution_coarse[time_index, :]) field_sample[0, 2]",
"trotter_cutoff, threads_per_block, max_registers) except: print(\"\\033[31mspinsim error: numba could not jit get_field function into",
"2*rotating_wave transform_frame = transform_frame_spin_half_rotating else: @jit_device_template(\"(float64[:], float64, complex128)\") def transform_frame_lab(field_sample, rotating_wave, rotating_wave_winding): return",
"compiler to compile the integrator to run on an Nvidia cuda compatible GPU,",
"+ z[2].imag**2) @jit_device def cross(left, right, result): result[0] = conj(left[1]*right[2] - left[2]*right[1]) result[1]",
"time_evolution_old_group[roc.get_local_id(1), :, :] # Calculate the exponential if exponentiation_method_index == 0: matrix_exponential_analytic(field_sample, time_evolution_fine)",
"& 0 \\\\\\\\ 1 & 0 & 1 \\\\\\\\ 0 & 1 &",
"state, self.spin_calculator) return results @staticmethod @nb.njit def get_state(state_init, state, time_evolution): \"\"\" Use the",
"= value self.index = index ANALYTIC = (\"analytic\", 0) \"\"\" Analytic expression of",
"functions of the simulator. Parameters ---------- get_field : :obj:`callable` A python function that",
"time_evolution_old, time_evolution_coarse) if use_rotating_frame: if dimension == 3: @jit_device_template(\"(float64[:], float64, complex128)\") def transform_frame_spin_one_rotating(field_sample,",
"ep = 1 a = a/precision Ca = math.cos(a/2) Sa = -1j*math.sin(a/2) ez",
"Python features`_ for compilable python features. \"\"\" ROC = (\"roc\", 2) \"\"\" Use",
"roc.shared.array((threads_per_block, dimension, dimension), dtype = np.complex128) time_evolution_old = time_evolution_old_group[roc.get_local_id(1), :, :] # Calculate",
"use for matrix exponentiation within the integrator. Parameters ---------- value : :obj:`str` A",
"1j*math.sin(z + q/3) # result[0, 0] = 0.5*cisz*(cx + cy - 1j*sx*sy) #",
"0 @jit_device def matrix_multiply(left, right, result): result[0, 0] = left[0, 0]*right[0, 0] +",
"== 2: time_evolution_fine_group = roc.shared.array((threads_per_block, dimension, dimension), dtype = np.complex128) time_evolution_fine = time_evolution_fine_group[roc.get_local_id(1),",
"at. time_evolution : :obj:`numpy.ndarray` of :obj:`numpy.float128` (time_index, y_index, x_index) The evaluated time evolution",
"# temporary = cuda.local.array((3, 3), dtype = np.complex128) # elif device_index == 2:",
"(:class:`numpy.complex128`) - The conjugate of z. complex_abs(z) : :obj:`callable` The absolute value of",
"to :obj:`Device.CUDA` if the system it is being run on is Nvidia Cuda",
"= np.empty((2, 2), dtype = np.complex128) # elif device_index == 1: # temporary",
"1])*operator[1, 1] @jit_device def adjoint(operator, result): result[0, 0] = conj(operator[0, 0]) result[1, 0]",
"- iz J_z)\\\\\\\\ &= \\\\begin{pmatrix} \\\\cos(\\\\frac{r}{2}) - i\\\\frac{z}{r}\\\\sin(\\\\frac{r}{2}) & -\\\\frac{y + ix}{r}\\\\sin(\\\\frac{r}{2})\\\\\\\\ \\\\frac{y",
"for control, so really this number is 64). Raising this value allocates more",
"of :class:`numpy.complex128`, (y_index, x_index)) - An array to write the resultant adjoint to.",
"temporary = np.empty((2, 2), dtype = np.complex128) elif device_index == 1: temporary =",
"definitions of all of the device functions (functions compiled for use on the",
"= conj(left[1]*right[2] - left[2]*right[1]) result[1] = conj(left[2]*right[0] - left[0]*right[2]) result[2] = conj(left[0]*right[1] -",
"rotating_wave, rotating_wave_winding[1]) w0 = (1.5 + sqrt3)/6 w1 = (1.5 - sqrt3)/6 field_sample[2,",
"ep = (field_sample[0] + 1j*field_sample[1])/a else: ep = 1 a = a/precision Ca",
"state[time_index, 1].imag**2) else: spin[time_index, 0] = (2*conj(state[time_index, 1])*(state[time_index, 0] + state[time_index, 2])/sqrt2).real spin[time_index,",
"not device: if cuda.is_available(): device = Device.CUDA else: device = Device.CPU self.threads_per_block =",
"1] + operator[1, 2]*operator[2, 1] result[2, 1] = operator[2, 0]*operator[0, 1] + operator[2,",
"1j*sx*sy)/cisz # result[1, 0] = (cx*sy -1j*sx*cy)/cisz # result[0, 1] = -(cx*sy +",
": :obj:`float` The input to the `get_field` function supplied by the user. Modifies",
"0]*conj(state[time_index, 1])).real spin[time_index, 2] = 0.5*(state[time_index, 0].real**2 + state[time_index, 0].imag**2 - state[time_index, 1].real**2",
"\"\"\" LIE_TROTTER = (\"lie_trotter\", 1) \"\"\" Approximation using the Lie Trotter theorem. \"\"\"",
"SpinQuantumNumber.ONE: exponentiation_method = ExponentiationMethod.LIE_TROTTER elif spin_quantum_number == SpinQuantumNumber.HALF: exponentiation_method = ExponentiationMethod.ANALYTIC if integration_method",
"complex128[:, :], float64[:, :], float64, float64, complex128[:])\") def append_exponentiation_integration_magnus_cf4(time_evolution_fine, time_evolution_coarse, field_sample, time_step_integration, rotating_wave,",
"= (1.5 + sqrt3)/6 w1 = (1.5 - sqrt3)/6 field_sample[2, 0] = math.tau*time_step_integration*(w0*field_sample[0,",
"out of rotating frame rotating_wave_winding[0] = math.cos(math.tau*rotating_wave*time_step_output) + 1j*math.sin(math.tau*rotating_wave*time_step_output) time_evolution_coarse[time_index, 0, 0] /=",
"__init__(self, value, index): super().__init__() self._value_ = value self.index = index ANALYTIC = (\"analytic\",",
"http://numba.pydata.org/numba-doc/latest/reference/numpysupported.html .. _Supported CUDA Python features: http://numba.pydata.org/numba-doc/latest/cuda/cudapysupported.html \"\"\" def __init__(self, value, index): super().__init__()",
"the output state in given out of the rotating frame. One can, of",
"cuda.local.array(sample_index_end, dtype = np.complex128) elif device_index == 2: time_evolution_fine_group = roc.shared.array((threads_per_block, dimension, dimension),",
"(Bloch vector) over time. This is calculated just in time using the JITed",
"time_step_output not an integer multiple of time_step_integration. Resetting time_step_integration to {time_step_output/round(time_step_output/time_step_integration):8.4e}.\\033[0m\\n\") time_step_integration =",
"the amplitude of the quadratic shift (only appearing, and required, in spin one",
"absolute value of a complex number. .. math:: \\\\begin{align*} |a + ib| &=",
"ix}{r}\\\\sin(\\\\frac{r}{2})\\\\\\\\ \\\\frac{y - ix}{r}\\\\sin(\\\\frac{r}{2}) & \\\\cos(\\\\frac{r}{2}) + i\\\\frac{z}{r}\\\\sin(\\\\frac{r}{2}) \\\\end{pmatrix} \\\\end{align*} with :math:`r =",
"calculated, it is then recursively squared :math:`\\\\tau` times to obtain :math:`\\\\exp(A)`. Parameters: *",
"function supplied by the user. Modifies the field function so the integrator can",
"+ operator[0, 1]*operator[1, 1] + operator[0, 2]*operator[2, 1] result[1, 1] = operator[1, 0]*operator[0,",
"conj(operator[2, 2]) @jit_device def matrix_exponential_analytic(field_sample, result, trotter_cutoff): pass @jit_device def matrix_exponential_lie_trotter(field_sample, result, trotter_cutoff):",
"- The expected spin projection (Bloch vector) over time. \"\"\" def __init__(self, get_field,",
"this value could increase performance for your GPU. See `Achieved Occupancy`_ for Nvidia's",
"to :obj:`Device.CPU` otherwise. See :obj:`Device` for all options and more details. get_time_evolution_raw :",
":]) field_sample[0, 2] = 0 if use_rotating_frame: time_sample = time_coarse[time_index] + time_step_output/2 get_field_jit(time_sample,",
"to run on an AMD ROCm compatible GPU, in parallel. .. warning ::",
":mod:`spinsim`, balancing more threads vs faster running threads, and changing this value could",
"used in the integrator. These device functions are compiled for the chosen target",
"math.cos(ez) + 1j*math.sin(ez) # eq = field_sample[3]/(6*precision) # eq = math.cos(eq) + 1j*math.sin(eq)",
"\\\\begin{align*} |a + ib| &= \\\\sqrt{a^2 + b^2}\\\\\\\\ a, b &\\\\in \\\\mathbb{R} \\\\end{align*}",
"not exponentiation_method: if spin_quantum_number == SpinQuantumNumber.ONE: exponentiation_method = ExponentiationMethod.LIE_TROTTER elif spin_quantum_number == SpinQuantumNumber.HALF:",
"math.sqrt(z[0].real**2 + z[0].imag**2 + z[1].real**2 + z[1].imag**2 + z[2].real**2 + z[2].imag**2) @jit_device def",
"nb.njit(template)(func) return jit_host self.jit_host = jit_host def jit_device(func): return nb.njit()(func) self.jit_device = jit_device",
"one: .. math:: \\\\begin{align*} \\\\langle F\\\\rangle(t) = \\\\begin{pmatrix} \\\\Re(\\\\sqrt{2}\\\\psi_{0}(t)^*(\\\\psi_{+1}(t) + \\\\psi_{-1}(t))\\\\\\\\ -\\\\Im(\\\\sqrt{2}\\\\psi_{0}(t)^*(\\\\psi_{+1}(t) -",
"1: time_index = roc.get_global_id(1) if time_index < spin.shape[0]: if dimension == 2: spin[time_index,",
"to finish at. Measured in s. * **time_step_integration** (:obj:`float`) - The integration time",
"archiving. index : :obj:`int` A reference number, used when compiling the integrator, where",
"= 1 # Sa = a/2 # ca = 1 # sa =",
"all CPU cores, in parallel. .. note :: To use this device option,",
"a magnetic field, for example), and the fourth entry being the amplitude of",
"0] + left[1, 1]*right[1, 0] + left[1, 2]*right[2, 0] result[2, 0] = left[2,",
"the returned value of the field. This is a four dimensional vector, with",
"result[1, 1] = conj(operator[1, 1]) @jit_device def matrix_exponential_analytic(field_sample, result): x = field_sample[0] y",
"time_step_integration, field_sample, rotating_wave, rotating_wave_winding): time_sample = ((time_fine + 0.5*time_step_integration*(1 - 1/sqrt3)) - time_coarse)",
":math:`\\\\mathbb{C}^{3\\\\times3}`. Parameters: * **operator** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The operator to",
"= 0 operator[1, 1] = 1 @jit_device def set_to_zero(operator): operator[0, 0] = 0",
"\\\\begin{align*} \\\\exp(A) &= \\\\exp(-ix J_x - iy J_y - iz J_z)\\\\\\\\ &= \\\\exp(2^{-\\\\tau}(-ix",
"self.state = state self.spin_calculator = spin_calculator def __getattr__(self, attr_name): if attr_name == \"spin\":",
"specific GPU model. This means that if more registers are allocated than are",
"= np.complex128) # elif device_index == 1: # temporary = cuda.local.array((3, 3), dtype",
"(to model a magnetic field, for example), and the fourth entry being the",
"False, max_registers = max_registers)(func) return jit_host self.jit_host = jit_host def jit_device(func): return cuda.jit(device",
"= operator[2, 1] result[0, 2] = operator[0, 2] result[1, 2] = operator[1, 2]",
"0]*operator[0, 0] + (2 + operator[1, 1])*operator[1, 0] result[0, 1] = (2 +",
"the expense of fewer resgiters being avaliable to each thread, meaning slower memory",
"= \\\\sum_j (L)_{i,j} (R)_{j,k} \\\\end{align*} Parameters: * **left** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index))",
"else: time_evolution_coarse[time_index, 1, 0] *= rotating_wave_winding[0] time_evolution_coarse[time_index, 1, 1] *= rotating_wave_winding[0] @jit_host(\"(float64, float64[:],",
"- The 2 norm of z. inner(left, right) : :obj:`callable` The inner (maths",
"field_sample[2]/(2*precision) # cx = math.cos(x) # sx = math.sin(x) # cy = math.cos(y)",
"have three arguments: * **time_sample** (:obj:`float`) - the time to sample the field",
"- iy J_y - iz J_z)\\\\\\\\ &= \\\\exp(2^{-\\\\tau}(-ix J_x - iy J_y -",
"above. * **result** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The matrix which the",
":obj:`SpinQuantumNumber.ONE` quantum system. threads_per_block : :obj:`int` The size of each thread block (workgroup),",
"True)(func) return jit_device_template self.jit_device_template = jit_device_template PYTHON = (\"python\", 0) \"\"\" Use pure",
"time_evolution_coarse) except: print(\"\\033[31mspinsim error: numba.cuda could not jit get_field function into a cuda",
"archiving. \"\"\" def __init__(self, value, dimension, label): super().__init__() self._value_ = value self.dimension =",
"{\\\\left(\\\\sum_i a_i^2 + b_i^2\\\\right)} Parameters: * **z** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (index)) - The",
"- iq J_q))^{2^\\\\tau}\\\\\\\\ &\\\\approx (\\\\exp(-i(2^{-\\\\tau} x) J_x) \\\\exp(-i(2^{-\\\\tau} y) J_y) \\\\exp(-i(2^{-\\\\tau} z J_z",
"2] = left[0, 0]*right[0, 2] + left[0, 1]*right[1, 2] + left[0, 2]*right[2, 2]",
"return jit_host self.jit_host = jit_host def jit_device(func): return func self.jit_device = jit_device def",
":obj:`float` time_coarse : :class:`numpy.ndarray` of :class:`numpy.float64` (time_index) A coarse grained list of time",
"1j*math.sin(eq) result[0, 0] = Ca/ez - 1 result[1, 0] = Sa*ep result[0, 1]",
"features: http://numba.pydata.org/numba-doc/latest/reference/pysupported.html .. _Supported Numpy features: http://numba.pydata.org/numba-doc/latest/reference/numpysupported.html .. _Supported CUDA Python features: http://numba.pydata.org/numba-doc/latest/cuda/cudapysupported.html",
"start at. Measured in s. time_end : :obj:`float` The time that the experiment",
"- 1 result[1, 0] = Sa*ep result[0, 1] = Sa/ep result[1, 1] =",
"z. inner(left, right) : :obj:`callable` The inner (maths convention dot) product between two",
"Commutator free, fourth order Magnus based integrator. \"\"\" MIDPOINT_SAMPLE = \"midpoint_sample\" \"\"\" Euler",
"See :obj:`ExponentiationMethod` for more details. use_rotating_frame : :obj:`bool` Whether or not to use",
"self.threads_per_block = threads_per_block self.spin_quantum_number = spin_quantum_number self.device = device self.get_time_evolution_raw = None self.get_spin_raw",
"multiplicative identity, ie, :math:`1`. .. math:: \\\\begin{align*} (A)_{i, j} &= \\\\delta_{i, j}\\\\\\\\ &=",
"on an Nvidia cuda compatible GPU, in parallel. .. note :: To use",
"+= time_coarse get_field_jit(time_sample, sweep_parameter, field_sample[0, :]) time_sample = ((time_fine + 0.5*time_step_integration*(1 + 1/sqrt3))",
"operator[0, 0])*operator[0, 0] + operator[0, 1]*operator[1, 0] result[1, 0] = operator[1, 0]*operator[0, 0]",
"range(state.shape[1]): state[time_index, x_index] += time_evolution[time_index - 1, x_index, z_index]*state[time_index - 1, z_index] else:",
"that the experiment is to finish at. Measured in s. The duration of",
"time_coarse get_field_jit(time_sample, sweep_parameter, field_sample[0, :]) @jit_device_template(\"(complex128[:, :], complex128[:, :], float64[:, :], float64, float64,",
"w1*field_sample[1, 0]) field_sample[2, 1] = math.tau*time_step_integration*(w0*field_sample[0, 1] + w1*field_sample[1, 1]) field_sample[2, 2] =",
"l \\\\cdot r &= \\\\sum_i (l_i)^* r_i \\\\end{align*} Parameters: * **left** (:class:`numpy.ndarray` of",
"conjugate of z. complex_abs(z) : :obj:`callable` The absolute value of a complex number.",
"jit_host(func): return func return jit_host self.jit_host = jit_host def jit_device(func): return func self.jit_device",
"= math.tau*time_step_integration*(w1*field_sample[0, 3] + w0*field_sample[1, 3]) append_exponentiation(field_sample[2, :], time_evolution_fine, time_evolution_coarse) get_field_integration = get_field_integration_magnus_cf4",
"done when this option is set to :obj:`True` - no such approximations are",
"projection (Bloch vector) over time for a given time series of a quantum",
"2] = 0 if use_rotating_frame: time_sample = time_coarse[time_index] + time_step_output/2 get_field_jit(time_sample, sweep_parameter, field_sample[0,",
"temporary = temporary_group[roc.get_local_id(1), :, :] for power_index in range(hyper_cube_amount): matrix_square_residual(result, temporary) matrix_square_residual(temporary, result)",
"\\\\left(\\\\exp\\\\left(\\\\frac{1}{c}A\\\\right) \\\\exp\\\\left(\\\\frac{1}{c}B\\\\right)\\\\right)^c. **For spin half systems:** Assumes the exponent is an imaginary linear",
"output state in given out of the rotating frame. One can, of course,",
"Use the :func:`numba.jit()` LLVM compiler to compile the integrator to run on all",
"left[2, 2]*right[2, 1] result[0, 2] = left[0, 0]*right[0, 2] + left[0, 1]*right[1, 2]",
"0.5*cisz*(cx - cy - 1j*sx*sy) # cisz = math.cos(2*q/3) + 1j*math.sin(2*q/3) # result[0,",
"-1j*math.sin(a/2) ez = field_sample[2]/(2*precision) ez = math.cos(ez) + 1j*math.sin(ez) # eq = field_sample[3]/(6*precision)",
"class Results: \"\"\" The results of a an evaluation of the integrator. Attributes",
":obj:`callable` Conjugate of a complex number. .. math:: \\\\begin{align*} (a + ib)^* &=",
"z (and q for spin one) respectively, as described above. * **result** (:class:`numpy.ndarray`",
"+ c_Y + i s_Xs_Y)}{2} \\\\end{pmatrix}^{2^\\\\tau}\\\\\\\\ &= T^{2^\\\\tau}, \\\\end{align*} with .. math:: \\\\begin{align*}",
"dtype = np.complex128) temporary = temporary_group[roc.get_local_id(1), :, :] for power_index in range(hyper_cube_amount): matrix_square_residual(result,",
"+ left[0, 1]*right[1, 1] result[1, 1] = left[1, 0]*right[0, 1] + left[1, 1]*right[1,",
"get_field function into a roc device function.\\033[0m\\n\") raise time_evolution_coarse = time_evolution_coarse.copy_to_host() time =",
"self.jit_device_template = jit_device_template PYTHON = (\"python\", 0) \"\"\" Use pure python interpreted code",
"> 0: for z_index in range(state.shape[1]): state[time_index, x_index] += time_evolution[time_index - 1, x_index,",
"time_step_output, state_init): \"\"\" Integrates the time dependent Schroedinger equation and returns the quantum",
"1,&i = j\\\\\\\\ 0,&i\\\\neq j \\\\end{cases} \\\\end{align*} Parameters: * **operator** (:class:`numpy.ndarray` of :class:`numpy.complex128`,",
"Modifying might be able to increase execution time for different GPU models. \"\"\"",
"field. This is a four dimensional vector, with the first three entries being",
"be :func:`numba.cuda.jit()` compilable. See `Supported CUDA Python features`_ for compilable python features. \"\"\"",
"vector) over time. \"\"\" def __init__(self, get_field, spin_quantum_number, device = None, exponentiation_method =",
"(:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The matrix to copy from. * **result**",
"== 1: temporary = cuda.local.array((3, 3), dtype = np.complex128) elif device_index == 2:",
"integration_method == IntegrationMethod.MIDPOINT_SAMPLE: @jit_device_template(\"(float64, float64, float64, float64, float64[:, :], float64, complex128[:])\") def get_field_integration_midpoint(sweep_parameter,",
"s. The duration of the experiment is `time_end - time_start`. time_step_integration : :obj:`float`",
"# hyper_cube_amount = 0 # precision = 4**hyper_cube_amount # x = field_sample[0]/precision #",
"= jit_device def jit_device_template(template): def jit_device_template(func): return cuda.jit(template, device = True, inline =",
"@jit_device def set_to_zero(operator): operator[0, 0] = 0 operator[1, 0] = 0 operator[0, 1]",
"The evaluated quantum state of the spin system over time, written in terms",
"experiment is to start at. Measured in s. time_end : :obj:`float` The time",
":obj:`ExponentiationMethod.LIE_TROTTER` when `spin_quantum_number` is set to :obj:`SpinQuantumNumber.ONE`, and defaults to :obj:`ExponentiationMethod.ANALYTIC` when `spin_quantum_number`",
"called {}.\".format(self, attr_name)) class Simulator: \"\"\" Attributes ---------- spin_quantum_number : :obj:`SpinQuantumNumber` The option",
"in range(hyper_cube_amount): matrix_square_residual(result, temporary) matrix_square_residual(temporary, result) result[0, 0] += 1 result[1, 1] +=",
"math.cos(math.tau*rotating_wave*time_sample) + 1j*math.sin(math.tau*rotating_wave*time_sample) time_sample += time_coarse get_field_jit(time_sample, sweep_parameter, field_sample[0, :]) @jit_device_template(\"(complex128[:, :], complex128[:,",
"a complex number. .. math:: \\\\begin{align*} (a + ib)^* &= a - ib\\\\\\\\",
"for the integrator, ie, don't compile the integrator. \"\"\" CPU_SINGLE = (\"cpu_single\", 0)",
"result): result[0, 0] = conj(operator[0, 0]) result[1, 0] = conj(operator[0, 1]) result[0, 1]",
"the exponent is an imaginary linear combination of :math:`\\\\mathfrak{su}(2)`, being, .. math:: \\\\begin{align*}",
"ib\\|_2 = \\\\sqrt {\\\\left(\\\\sum_i a_i^2 + b_i^2\\\\right)} Parameters: * **z** (:class:`numpy.ndarray` of :class:`numpy.complex128`,",
"and finishes. In units of s. time_step_integration : :obj:`float` The time step used",
"get_field_integration_midpoint(sweep_parameter, time_fine, time_coarse, time_step_integration, field_sample, rotating_wave, rotating_wave_winding): time_sample = time_fine + 0.5*time_step_integration -",
"rotating_wave_winding = rotating_wave_winding_group[roc.get_local_id(1), :] time_coarse[time_index] = time_end_points[0] + time_step_output*time_index time_fine = time_coarse[time_index] #",
"& \\\\frac{e^{-i\\\\left(-Z + \\\\frac{Q}{3}\\\\right)}(c_X - c_Y + i s_Xs_Y)}{2} \\\\\\\\ \\\\frac{e^{-i\\\\left(Z + \\\\frac{Q}{3}\\\\right)}",
"utilities.complex_abs norm2 = utilities.norm2 inner = utilities.inner set_to = utilities.set_to set_to_one = utilities.set_to_one",
"when `spin_quantum_number` is set to :obj:`SpinQuantumNumber.ONE`, and defaults to :obj:`ExponentiationMethod.ANALYTIC` when `spin_quantum_number` is",
"result) : :obj:`callable` Calculates a matrix exponential based on the Lie Product Formula,",
"\\\\frac{1}{2}\\\\begin{pmatrix} 0 & 1 \\\\\\\\ 1 & 0 \\\\end{pmatrix},& J_y &= \\\\frac{1}{2}\\\\begin{pmatrix} 0",
"result[1, 0] = left[1, 0]*right[0, 0] + left[1, 1]*right[1, 0] result[0, 1] =",
"utility_set = spin_quantum_number.utility_set if not exponentiation_method: if spin_quantum_number == SpinQuantumNumber.ONE: exponentiation_method = ExponentiationMethod.LIE_TROTTER",
"---------- time : :obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index) The times that `state` was evaluated",
"np.complex128) elif device_index == 1: time_evolution_old = cuda.local.array((dimension, dimension), dtype = np.complex128) elif",
"The matrix to copy from. * **result** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) -",
"0 & 1 \\\\\\\\ 0 & 1 & 0 \\\\end{pmatrix},& J_y &= \\\\frac{1}{\\\\sqrt{2}}\\\\begin{pmatrix}",
"evolution operator between each time step. See :ref:`architecture` for some information. state :",
"transform_frame(field_sample[0, :], rotating_wave, rotating_wave_winding[0]) transform_frame(field_sample[1, :], rotating_wave, rotating_wave_winding[1]) w0 = (1.5 + sqrt3)/6",
"Ca*ez - 1 if device_index == 0: temporary = np.empty((2, 2), dtype =",
"that can be used for archiving. index : :obj:`int` A reference number, used",
"time, time_end_points, time_step_integration, time_step_output, time_evolution_coarse) except: print(\"\\033[31mspinsim error: numba.roc could not jit get_field",
"result[2, 2] = operator[2, 0]*operator[0, 2] + operator[2, 1]*operator[1, 2] + (2 +",
"operator[2, 2] = 1 @jit_device def set_to_zero(operator): operator[0, 0] = 0 operator[1, 0]",
"empty :class:`numpy.ndarray` with :func:`numpy.empty()`, or declare a :class:`numpy.ndarray` using :func:`numba.cuda.device_array_like()`. \"\"\" if device_index",
"\\\\end{pmatrix},& J_z &= \\\\frac{1}{2}\\\\begin{pmatrix} 1 & 0 \\\\\\\\ 0 & -1 \\\\end{pmatrix} \\\\end{align*}",
"between each element of `time_coarse`. In units of s. Determines the sample rate",
"and the time that the experiment is to finish at. Measured in s.",
"+ left[1, 1]*right[1, 0] + left[1, 2]*right[2, 0] result[2, 0] = left[2, 0]*right[0,",
"= (cx*cy + 1j*sx*sy)*cisz # if device_index == 0: # temporary = np.empty((2,",
":obj:`Device` The option to select which device will be targeted for integration. That",
"+ \\\\frac{Q}{3}\\\\right)} (-i s_X + c_X s_Y)}{\\\\sqrt{2}} & e^{i\\\\frac{2Q}{3}} c_X c_Y & \\\\frac{e^{-i(Z",
"features, and `Supported Numpy features`_ for compilable numpy features. \"\"\" CPU = (\"cpu\",",
"\\\\end{align*} Matrix can be in :math:`\\\\mathbb{C}^{2\\\\times2}` or :math:`\\\\mathbb{C}^{3\\\\times3}`. Parameters: * **operator** (:class:`numpy.ndarray` of",
"of spin system dynamics. This is not done when this option is set",
"a cuda device function.\\033[0m\\n\") raise time_evolution_coarse = time_evolution_coarse.copy_to_host() time = time.copy_to_host() elif self.device",
":math:`\\\\tau`, .. math:: \\\\begin{align*} \\\\exp(A) &= \\\\exp(-ix J_x - iy J_y - iz",
"&= \\\\frac{1}{2}\\\\begin{pmatrix} 0 & 1 \\\\\\\\ 1 & 0 \\\\end{pmatrix},& J_y &= \\\\frac{1}{2}\\\\begin{pmatrix}",
"result[1, 0] = (y - 1j*x)*s result[0, 1] = -(y + 1j*x)*s result[1,",
"A on object that contains definitions of all of the device functions (functions",
"cisz = math.cos(z + q/3) - 1j*math.sin(z + q/3) # result[0, 0] =",
"= math.ceil(trotter_cutoff/2) if hyper_cube_amount < 0: hyper_cube_amount = 0 precision = 4**hyper_cube_amount a",
"IntegrationMethod.MIDPOINT_SAMPLE: sample_index_max = 1 sample_index_end = 1 exponentiation_method_index = exponentiation_method.index if (exponentiation_method ==",
"@jit_device def inner(left, right): return conj(left[0])*right[0] + conj(left[1])*right[1] @jit_device def set_to(operator, result): result[0,",
"1] = operator[1, 0]*operator[0, 1] + (2 + operator[1, 1])*operator[1, 1] @jit_device def",
"output, so use an empty :class:`numpy.ndarray` with :func:`numpy.empty()`, or declare a :class:`numpy.ndarray` using",
"r z /= r c = math.cos(r/2) s = math.sin(r/2) result[0, 0] =",
"z direction. spin_calculator : :obj:`callable` Calculates the expected spin projection (Bloch vector) over",
"float64, float64, complex128[:])\") def append_exponentiation_integration_magnus_cf4(time_evolution_fine, time_evolution_coarse, field_sample, time_step_integration, rotating_wave, rotating_wave_winding): transform_frame(field_sample[0, :], rotating_wave,",
"with spin half systems. Will not work with spin one systems. Assumes the",
"1]*right[1, 1] @jit_device def matrix_square_residual(operator, result): result[0, 0] = (2 + operator[0, 0])*operator[0,",
"field_sample[0, :]) @jit_device_template(\"(complex128[:, :], complex128[:, :], float64[:, :], float64, float64, complex128[:])\") def append_exponentiation_integration_midpoint(time_evolution_fine,",
"dtype = np.complex128) # elif device_index == 1: # temporary = cuda.local.array((3, 3),",
"x_index)) - The matrix which the result of the exponentiation is to be",
"& 0 & 0 \\\\\\\\ 0 & -2 & 0 \\\\\\\\ 0 &",
"1])*(state[time_index, 0] + state[time_index, 2])/sqrt2).real spin[time_index, 1] = (2j*conj(state[time_index, 1])*(state[time_index, 0] - state[time_index,",
"c + 1j*z*s else: result[0, 0] = 1 result[1, 0] = 0 result[0,",
"of :class:`numpy.complex128`, (index)) - The vector to left multiply in the inner product.",
"2] result[2, 2] = left[2, 0]*right[0, 2] + left[2, 1]*right[1, 2] + left[2,",
"1]*right[1, 0] + left[0, 2]*right[2, 0] result[1, 0] = left[1, 0]*right[0, 0] +",
"the rotating frame optimisation. Defaults to :obj:`True`. If set to :obj:`True`, the integrator",
"of `time_coarse`. In units of s. Determines the sample rate of the outputs",
"some information. \"\"\" for time_index in range(state.shape[0]): # State = time evolution *",
"&= \\\\frac{1}{2}\\\\begin{pmatrix} 1 & 0 \\\\\\\\ 0 & -1 \\\\end{pmatrix} \\\\end{align*} Then the",
"is to be written to. matrix_exponential_lie_trotter(field_sample, result) : :obj:`callable` Calculates a matrix exponential",
"This is an output, so use an empty :class:`numpy.ndarray` with :func:`numpy.empty()`, or declare",
"**state** (:obj:`numpy.ndarray` of :obj:`numpy.complex128` (time_index, magnetic_quantum_number)) - The quantum state of the spin",
"append_exponentiation_integration_half_step elif integration_method == IntegrationMethod.MIDPOINT_SAMPLE: @jit_device_template(\"(float64, float64, float64, float64, float64[:, :], float64, complex128[:])\")",
"np.complex128) rotating_wave_winding = rotating_wave_winding_group[roc.get_local_id(1), :] time_coarse[time_index] = time_end_points[0] + time_step_output*time_index time_fine = time_coarse[time_index]",
"for integration. That is, whether the integrator is compiled for a CPU or",
"roc.device_array((time_index_max, self.spin_quantum_number.dimension, self.spin_quantum_number.dimension), np.complex128) blocks_per_grid = (time.size + (self.threads_per_block - 1)) // self.threads_per_block",
"= (2*conj(state[time_index, 1])*(state[time_index, 0] + state[time_index, 2])/sqrt2).real spin[time_index, 1] = (2j*conj(state[time_index, 1])*(state[time_index, 0]",
"= math.tau*time_step_integration*(w1*field_sample[0, 1] + w0*field_sample[1, 1]) field_sample[2, 2] = math.tau*time_step_integration*(w1*field_sample[0, 2] + w0*field_sample[1,",
"32, threads_per_block = 64, max_registers = 63): \"\"\" .. _Achieved Occupancy: https://docs.nvidia.com/gameworks/content/developertools/desktop/analysis/report/cudaexperiments/kernellevel/achievedoccupancy.htm Parameters",
"is found for. In units of s. This is an output, so use",
"the first time it is referenced by the user. Parameters: * **state** (:obj:`numpy.ndarray`",
"start of the simulation. state : :class:`numpy.ndarray` of :class:`numpy.complex128` (time_index, state_index) The state",
"np.complex128) # elif device_index == 1: # temporary = cuda.local.array((3, 3), dtype =",
"= left[0, 0]*right[0, 2] + left[0, 1]*right[1, 2] + left[0, 2]*right[2, 2] result[1,",
"nb.prange(spin.shape[0]): if dimension == 2: spin[time_index, 0] = (state[time_index, 0]*conj(state[time_index, 1])).real spin[time_index, 1]",
"= 1 exponentiation_method_index = exponentiation_method.index if (exponentiation_method == ExponentiationMethod.ANALYTIC) and (spin_quantum_number != SpinQuantumNumber.HALF):",
"of fewer resgiters being avaliable to each thread, meaning slower memory must be",
"= operator[2, 0]*operator[0, 0] + operator[2, 1]*operator[1, 0] + (2 + operator[2, 2])*operator[2,",
"\"\"\" Parameters ---------- time : :obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index) The times that `state`",
"model, the GPU must run fewer threads concurrently than it has Cuda cores,",
"result[1, 1] = operator[1, 0]*operator[0, 1] + (2 + operator[1, 1])*operator[1, 1] @jit_device",
"able to increase execution time for different GPU models. device : :obj:`Device` The",
":], rotating_wave, rotating_wave_winding[1]) field_sample[2, 0] = math.tau*time_step_integration*field_sample[0, 0]/2 field_sample[2, 1] = math.tau*time_step_integration*field_sample[0, 1]/2",
"elif value == \"cpu\": def jit_host(template, max_registers): def jit_host(func): return nb.njit(template, parallel =",
"= 1 operator[2, 1] = 0 operator[0, 2] = 0 operator[1, 2] =",
"# Run calculation for each coarse timestep in parallel time_index = roc.get_global_id(1) if",
"= np.finfo(np.float64).eps*1000 class Utilities: \"\"\" A on object that contains definitions of all",
"defined field function must be :func:`numba.jit()` compilable. See `Supported Python features`_ for compilable",
"varying `sweep_parameter`. time_start : :obj:`float` The time offset that the experiment is to",
"spin one) respectively, as described above. * **result** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index))",
"cy = math.cos(y) # sy = math.sin(y) # cisz = math.cos(z) + 1j*math.sin(z)",
"each coarse timestep in parallel time_index = roc.get_global_id(1) if time_index < time_coarse.size: get_time_evolution_loop(time_index,",
"\\\\end{cases} \\\\end{align*} Parameters: * **operator** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The matrix",
"value allocates more registers (fast memory) to each thread, out of a maximum",
"time_step_integration, field_sample, rotating_wave, rotating_wave_winding) append_exponentiation_integration(time_evolution_fine, time_evolution_coarse[time_index, :], field_sample, time_step_integration, rotating_wave, rotating_wave_winding) time_fine +=",
"vector. .. math:: \\|a + ib\\|_2 = \\\\sqrt {\\\\left(\\\\sum_i a_i^2 + b_i^2\\\\right)} Parameters:",
":class:`numpy.ndarray` with :func:`numpy.empty()`, or declare a :class:`numpy.ndarray` using :func:`numba.cuda.device_array_like()`. time_end_points : :class:`numpy.ndarray` of",
"the experiment is `time_end - time_start`. time_step_integration : :obj:`float` The integration time step.",
"Thus, there will be an optimal value of `max_registers` for each model of",
"value : :obj:`str` A text label that can be used for archiving. \"\"\"",
"GPU is said to have less occupancy. Lowering the value increases GPU occupancy,",
"+ time_step_output*time_index time_fine = time_coarse[time_index] # Initialise time evolution operator to 1 set_to_one(time_evolution_coarse[time_index,",
"+ y J_y + z J_z + q J_q), \\\\end{align*} with .. math::",
":, :], float64)\") def get_time_evolution_loop(time_index, time_coarse, time_step_output, time_step_integration, time_end_points, time_evolution_coarse, sweep_parameter): # Declare",
":class:`numpy.ndarray` using :func:`numba.cuda.device_array_like()`. \"\"\" if device_index == 0: for time_index in nb.prange(time_coarse.size): get_time_evolution_loop(time_index,",
"of the eigenstates of the spin projection operator in the z direction. Returns:",
"= spin.copy_to_host() return spin self.get_time_evolution_raw = get_time_evolution self.spin_calculator = spin_calculator def evaluate(self, sweep_parameter,",
"dtype = np.float64) rotating_wave_winding = np.empty(sample_index_end, dtype = np.complex128) elif device_index == 1:",
"result[2, 1] = sa*eq*ez*ep result[0, 2] = -((Sa*eq/ep)*(Sa*eq/ep)) result[1, 2] = sa*eq*ez/ep result[2,",
"one systems). .. note:: This function must be compilable for the device that",
"= math.tau*time_step_integration*field_sample[0, 2] if dimension > 2: field_sample[0, 3] = math.tau*time_step_integration*field_sample[0, 3] append_exponentiation(field_sample[0,",
"Work in progress, not currently functional! \"\"\" class Results: \"\"\" The results of",
"class ExponentiationMethod(Enum): \"\"\" The implementation to use for matrix exponentiation within the integrator.",
"1] = cisz*(sy - 1j*cy*sx)/sqrt2 # cisz = math.cos(z - q/3) + 1j*math.sin(z",
"& -i \\\\\\\\ i & 0 \\\\end{pmatrix},& J_z &= \\\\frac{1}{2}\\\\begin{pmatrix} 1 & 0",
"= -((Sa*ep/eq)*(Sa*ep/eq)) result[0, 1] = sa*eq/(ez*ep) result[1, 1] = ca*(eq*eq*eq*eq) - 1 result[2,",
":obj:`True` - no such approximations are made, and the output state in given",
"`time_step_integration`. Measured in s. * **time_evolution_coarse** (:obj:`numpy.ndarray` of :obj:`numpy.float128` (time_index, y_index, x_index)) -",
"times to obtain :math:`\\\\exp(A)`. Parameters: * **field_sample** (:class:`numpy.ndarray` of :class:`numpy.float64`, (y_index, x_index)) -",
": :obj:`callable` Copy the contents of one matrix into another. .. math:: (A)_{i,",
"http://numba.pydata.org/numba-doc/latest/reference/pysupported.html .. _Supported Numpy features: http://numba.pydata.org/numba-doc/latest/reference/numpysupported.html .. _Supported CUDA Python features: http://numba.pydata.org/numba-doc/latest/cuda/cudapysupported.html \"\"\"",
"of the experiment is `time_end - time_start`. time_step_integration : :obj:`float` The integration time",
"(self.threads_per_block - 1)) // self.threads_per_block try: self.get_time_evolution_raw[blocks_per_grid, self.threads_per_block](sweep_parameter, time, time_end_points, time_step_integration, time_step_output, time_evolution_coarse)",
"& (c_Xc_Y + is_Xs_Y) e^{iZ} \\\\end{pmatrix}^{2^\\\\tau}\\\\\\\\ &= T^{2^\\\\tau}, \\\\end{align*} with .. math:: \\\\begin{align*}",
"+ operator[0, 0])*operator[0, 0] + operator[0, 1]*operator[1, 0] result[1, 0] = operator[1, 0]*operator[0,",
"models. max_registers : :obj:`int` The maximum number of registers allocated per thread when",
"step. See :ref:`architecture` for some information. \"\"\" for time_index in range(state.shape[0]): # State",
"cuda.device_array(time_index_max, np.float64) time_evolution_coarse = cuda.device_array((time_index_max, self.spin_quantum_number.dimension, self.spin_quantum_number.dimension), np.complex128) blocks_per_grid = (time.size + (self.threads_per_block",
"(functions compiled for use on the target device) used in the integrator. These",
"to increase execution time for different GPU models. max_registers : :obj:`int` The maximum",
"device == Device.ROC: spin = roc.device_array((state.shape[0], 3), np.float64) blocks_per_grid = (state.shape[0] + (threads_per_block",
"on object constrution. Parameters: * **sweep_parameter** (:obj:`float`) - The input to the `get_field`",
"next timesteps, for each time sampled. See :math:`U(t)` in :ref:`overview_of_simulation_method`. This is an",
"J_z))^{2^\\\\tau}\\\\\\\\ &\\\\approx (\\\\exp(-i(2^{-\\\\tau} x) J_x) \\\\exp(-i(2^{-\\\\tau} y) J_y) \\\\exp(-i(2^{-\\\\tau} z) J_z)^{2^\\\\tau}\\\\\\\\ &= \\\\begin{pmatrix}",
"CUDA Python features: http://numba.pydata.org/numba-doc/latest/cuda/cudapysupported.html \"\"\" def __init__(self, value, index): super().__init__() self._value_ = value",
"label that can be used for archiving. index : :obj:`int` A reference number,",
"if hyper_cube_amount < 0: # hyper_cube_amount = 0 # precision = 4**hyper_cube_amount #",
"models. \"\"\" jit_device = device.jit_device device_index = device.index @jit_device def conj(z): return (z.real",
":], float64, float64, complex128[:])\") def append_exponentiation_integration_midpoint(time_evolution_fine, time_evolution_coarse, field_sample, time_step_integration, rotating_wave, rotating_wave_winding): transform_frame(field_sample[0, :],",
"elif integration_method == IntegrationMethod.HALF_STEP: sample_index_max = 3 sample_index_end = 4 elif integration_method ==",
"time evolution operator between each time step. See :ref:`architecture` for some information. \"\"\"",
"1j*math.sin(math.tau*rotating_wave*time_sample) time_sample += time_coarse get_field_jit(time_sample, sweep_parameter, field_sample[1, :]) @jit_device_template(\"(complex128[:, :], complex128[:, :], float64[:,",
"= 63): \"\"\" Compiles the integrator and spin calculation functions of the simulator.",
"0] result[2, 0] = operator[2, 0]*operator[0, 0] + operator[2, 1]*operator[1, 0] + (2",
"time_coarse get_field_jit(time_sample, sweep_parameter, field_sample[0, :]) time_sample = time_fine + time_step_integration - time_coarse rotating_wave_winding[1]",
"= (time.size + (self.threads_per_block - 1)) // self.threads_per_block try: self.get_time_evolution_raw[blocks_per_grid, self.threads_per_block](sweep_parameter, time, time_end_points,",
"set_to_one(operator) : :obj:`callable` Make a matrix the multiplicative identity, ie, :math:`1`. .. math::",
"on average take smaller steps. .. note :: The use of a rotating",
"defaults to :obj:`ExponentiationMethod.ANALYTIC` when `spin_quantum_number` is set to :obj:`SpinQuantumNumber.HALF`. See :obj:`ExponentiationMethod` for more",
"&= \\\\frac{1}{3}\\\\begin{pmatrix} 1 & 0 & 0 \\\\\\\\ 0 & -2 & 0",
"time_sample += time_coarse get_field_jit(time_sample, sweep_parameter, field_sample[1, :]) @jit_device_template(\"(complex128[:, :], complex128[:, :], float64[:, :],",
"\"\"\" def __init__(self, get_field, spin_quantum_number, device = None, exponentiation_method = None, use_rotating_frame =",
"@jit_device def set_to_zero(operator): operator[0, 0] = 0 operator[1, 0] = 0 operator[2, 0]",
"---------- value : :obj:`float` The numerical value of the spin quantum number. dimension",
"spin system over time, written in terms of the eigenstates of the spin",
"sweep_parameter : :obj:`float` time_coarse : :class:`numpy.ndarray` of :class:`numpy.float64` (time_index) A coarse grained list",
"operator[2, 0]*operator[0, 2] + operator[2, 1]*operator[1, 2] + (2 + operator[2, 2])*operator[2, 2]",
"= -1j*math.sin(a)/sqrt2 ez = field_sample[2]/(2*precision) ez = math.cos(ez) + 1j*math.sin(ez) eq = field_sample[3]/(6*precision)",
"self.label = label HALF = (1/2, 2, \"half\") \"\"\" For two level systems.",
"for power_index in range(hyper_cube_amount): # matrix_multiply(result, result, temporary) # matrix_multiply(temporary, temporary, result) self.conj",
"device functions are compiled for the chosen target device on construction of the",
"spin belong to. label : :obj:`str` A text label that can be used",
"rotating_wave_winding) time_fine += time_step_integration if use_rotating_frame: # Take out of rotating frame rotating_wave_winding[0]",
"transform_frame = transform_frame_spin_one_rotating else: @jit_device_template(\"(float64[:], float64, complex128)\") def transform_frame_spin_half_rotating(field_sample, rotating_wave, rotating_wave_winding): X =",
"max_registers): def jit_host(func): return nb.njit(template)(func) return jit_host self.jit_host = jit_host def jit_device(func): return",
"vectors. .. note:: The mathematics definition is used here rather than the physics",
"\\\\frac{1}{\\\\sqrt{2}}\\\\begin{pmatrix} 0 & -i & 0 \\\\\\\\ i & 0 & -i \\\\\\\\",
"the spin projection operator in the z direction. Returns: * **spin** (:obj:`numpy.ndarray` of",
"means that if more registers are allocated than are available for the GPU",
"**d** (:class:`numpy.complex128`) - The inner product of l and r. set_to(operator, result) :",
"= 4 elif integration_method == IntegrationMethod.MIDPOINT_SAMPLE: sample_index_max = 1 sample_index_end = 1 exponentiation_method_index",
"The quantum state of the spin system over time, written in terms of",
"self.jit_device = jit_device def jit_device_template(template): def jit_device_template(func): return cuda.jit(template, device = True, inline",
"return func self.jit_device = jit_device def jit_device_template(template): def jit_device_template(func): return func return jit_device_template",
"use_rotating_frame : :obj:`bool` Whether or not to use the rotating frame optimisation. Defaults",
"@jit_device_template(\"(float64, float64, float64, float64, float64[:, :], float64, complex128[:])\") def get_field_integration_half_step(sweep_parameter, time_fine, time_coarse, time_step_integration,",
"True)(func) self.jit_device = jit_device def jit_device_template(template): def jit_device_template(func): return roc.jit(template, device = True)(func)",
"time. This is calculated just in time using the JITed :obj:`callable` `spin_calculator`. spin_calculator",
"direction. spin : :obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index, spatial_direction) The expected spin projection (Bloch",
": :obj:`ExponentiationMethod` Which method to use for matrix exponentiation in the integration algorithm.",
"spin_quantum_number == SpinQuantumNumber.HALF: exponentiation_method = ExponentiationMethod.ANALYTIC if integration_method == IntegrationMethod.MAGNUS_CF4: sample_index_max = 3",
"elif device_index == 2: # temporary_group = roc.shared.array((threads_per_block, 3, 3), dtype = np.complex128)",
"result[0, 1] = left[0, 0]*right[0, 1] + left[0, 1]*right[1, 1] result[1, 1] =",
"np.complex128) elif device_index == 1: temporary = cuda.local.array((2, 2), dtype = np.complex128) elif",
"Occupancy: https://docs.nvidia.com/gameworks/content/developertools/desktop/analysis/report/cudaexperiments/kernellevel/achievedoccupancy.htm Parameters ---------- get_field : :obj:`callable` A python function that describes the",
"== Device.CUDA: spin = cuda.device_array((state.shape[0], 3), np.float64) blocks_per_grid = (state.shape[0] + (threads_per_block -",
"dimension), dtype = np.complex128) field_sample = np.empty((sample_index_max, lie_dimension), dtype = np.float64) rotating_wave_winding =",
"spin system in the lab frame, for each time sampled. See :math:`\\\\psi(t)` in",
"1]*operator[1, 0] result[1, 0] = operator[1, 0]*operator[0, 0] + (2 + operator[1, 1])*operator[1,",
"time_step_integration, rotating_wave, rotating_wave_winding) time_fine += time_step_integration if use_rotating_frame: # Take out of rotating",
"cy - 1j*sx*sy) # cisz = math.cos(2*q/3) + 1j*math.sin(2*q/3) # result[0, 1] =",
"z[0].imag**2 + z[1].real**2 + z[1].imag**2) @jit_device def inner(left, right): return conj(left[0])*right[0] + conj(left[1])*right[1]",
"= time_fine + time_step_integration - time_coarse rotating_wave_winding[1] = math.cos(math.tau*rotating_wave*time_sample) + 1j*math.sin(math.tau*rotating_wave*time_sample) time_sample +=",
"elif device_index == 1: temporary = cuda.local.array((3, 3), dtype = np.complex128) elif device_index",
"on an AMD ROCm compatible GPU, in parallel. .. warning :: Work in",
"np.empty((2, 2), dtype = np.complex128) elif device_index == 1: temporary = cuda.local.array((2, 2),",
"1] = math.tau*time_step_integration*field_sample[0, 1]/2 field_sample[2, 2] = math.tau*time_step_integration*field_sample[0, 2]/2 if dimension > 2:",
"# cisz = math.cos(2*q/3) + 1j*math.sin(2*q/3) # result[0, 1] = cisz*(-sy - 1j*cy*sx)/sqrt2",
"note:: The mathematics definition is used here rather than the physics definition, so",
"operator[0, 2] = 0 operator[1, 2] = 0 operator[2, 2] = 1 @jit_device",
"2] = 0.5*cisz*(cx - cy + 1j*sx*sy) # result[1, 2] = cisz*(-1j*sx -",
"output since the integrator will on average take smaller steps. .. note ::",
"2]*operator[2, 0] result[1, 0] = operator[1, 0]*operator[0, 0] + (2 + operator[1, 1])*operator[1,",
"a subspace of :math:`\\\\mathfrak{su}(2)`, being, .. math:: \\\\begin{align*} A &= -i(x J_x +",
"np.empty((3, 3), dtype = np.complex128) elif device_index == 1: temporary = cuda.local.array((3, 3),",
"set_to_zero(operator) : :obj:`callable` Make a matrix the additive identity, ie, :math:`0`. .. math::",
"1 & 0 \\\\end{pmatrix},& J_y &= \\\\frac{1}{2}\\\\begin{pmatrix} 0 & -i \\\\\\\\ i &",
"device on object constrution. Parameters: * **state** (:obj:`numpy.ndarray` of :obj:`numpy.complex128` (time_index, magnetic_quantum_number)) -",
"the spin quantum number of a system. Parameters ---------- value : :obj:`float` The",
"for each model of GPU running :mod:`spinsim`, balancing more threads vs faster running",
"This removes the (possibly large) z component of the field, which increases the",
"return func return jit_device_template self.jit_device_template = jit_device_template elif value == \"cpu_single\": def jit_host(template,",
"device.jit_host jit_device = device.jit_device jit_device_template = device.jit_device_template device_index = device.index dimension = spin_quantum_number.dimension",
"Utilities(spin_quantum_number, device, threads_per_block) conj = utilities.conj complex_abs = utilities.complex_abs norm2 = utilities.norm2 inner",
"(:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The matrix to left multiply by. *",
"left[2, 1]*right[1, 1] + left[2, 2]*right[2, 1] result[0, 2] = left[0, 0]*right[0, 2]",
"is set to :obj:`SpinQuantumNumber.HALF`. See :obj:`ExponentiationMethod` for more details. use_rotating_frame : :obj:`bool` Whether",
"if not exponentiation_method: if spin_quantum_number == SpinQuantumNumber.ONE: exponentiation_method = ExponentiationMethod.LIE_TROTTER elif spin_quantum_number ==",
"device functions (functions compiled for use on the target device) used in the",
"0 & 0 \\\\\\\\ 0 & 0 & 0 \\\\\\\\ 0 & 0",
"**az** (:class:`numpy.float64`) - The absolute value of z. norm2(z) : :obj:`callable` The 2",
"See `Achieved Occupancy`_ for Nvidia's official explanation. \"\"\" utilities = Utilities(spin_quantum_number, device, threads_per_block)",
"compatible, and defaults to :obj:`Device.CPU` otherwise. See :obj:`Device` for all options and more",
"x, y, z spatial directions (to model a magnetic field, for example), and",
"append_exponentiation(field_sample[2, :], time_evolution_fine, time_evolution_coarse) get_field_integration = get_field_integration_magnus_cf4 append_exponentiation_integration = append_exponentiation_integration_magnus_cf4 elif integration_method ==",
"& 1 \\\\\\\\ 0 & 1 & 0 \\\\end{pmatrix},& J_y &= \\\\frac{1}{\\\\sqrt{2}}\\\\begin{pmatrix} 0",
"all options and more details. exponentiation_method : :obj:`ExponentiationMethod` Which method to use for",
"2] + w0*field_sample[1, 2]) if dimension > 2: field_sample[2, 3] = math.tau*time_step_integration*(w1*field_sample[0, 3]",
"= operator[1, 1] result[2, 1] = operator[2, 1] result[0, 2] = operator[0, 2]",
"\\\\frac{1}{3}\\\\begin{pmatrix} 1 & 0 & 0 \\\\\\\\ 0 & -2 & 0 \\\\\\\\",
"projection (Bloch vector) over time. \"\"\" if device.index == 0: spin = np.empty((state.shape[0],",
"1] = left[2, 0]*right[0, 1] + left[2, 1]*right[1, 1] + left[2, 2]*right[2, 1]",
"conj(left[1])*right[1] + conj(left[2])*right[2] @jit_device def set_to(operator, result): result[0, 0] = operator[0, 0] result[1,",
"(spin wavefunction) of the system at the start of the simulation. state :",
"the integrator and spin calculation functions of the simulator. Parameters ---------- get_field :",
"\"cpu\": def jit_host(template, max_registers): def jit_host(func): return nb.njit(template, parallel = True)(func) return jit_host",
"- time_end_points[0])/time_step_output) if self.device.index == 0: time = np.empty(time_index_max, np.float64) time_evolution_coarse = np.empty((time_index_max,",
"spin calculation functions of the simulator. Parameters ---------- get_field : :obj:`callable` A python",
"+ 1j*math.sin(eq) # Ca = 1 # Sa = a/2 # ca =",
"conj(operator[1, 2]) result[0, 2] = conj(operator[2, 0]) result[1, 2] = conj(operator[2, 1]) result[2,",
"(field_sample[0] + 1j*field_sample[1])/a else: ep = 1 a = a/precision Ca = math.cos(a/2)",
"hermitian adjoint of a matrix. .. math:: \\\\begin{align*} A^\\\\dagger &\\\\equiv A^H\\\\\\\\ (A^\\\\dagger)_{y,x} &=",
"time_index < time_coarse.size: get_time_evolution_loop(time_index, time_coarse, time_step_output, time_step_integration, time_end_points, time_evolution_coarse, sweep_parameter) return @jit_host(\"(complex128[:, :],",
"integrate states in the rotating frame, using the rating wave approximation: just define",
"compiled for the chosen target device on construction of the object. Attributes ----------",
"as np import numba as nb from numba import cuda from numba import",
"a = math.sqrt(field_sample[0]*field_sample[0] + field_sample[1]*field_sample[1]) if a > 0: ep = (field_sample[0] +",
":obj:`numpy.ndarray` of :obj:`numpy.complex128` (time_index, magnetic_quantum_number) The evaluated quantum state of the spin system",
"the field function so the integrator can be used for many experiments, without",
"for archiving. \"\"\" MAGNUS_CF4 = \"magnus_cf4\" \"\"\" Commutator free, fourth order Magnus based",
"for spin one) respectively, as described above. * **result** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index,",
"== 1: time_evolution_old = cuda.local.array((dimension, dimension), dtype = np.complex128) elif device_index == 2:",
"@jit_device_template(\"(complex128[:, :], complex128[:, :], float64[:, :], float64, float64, complex128[:])\") def append_exponentiation_integration_magnus_cf4(time_evolution_fine, time_evolution_coarse, field_sample,",
":func:`numba.cuda.device_array_like()`. \"\"\" if device_index == 0: for time_index in nb.prange(time_coarse.size): get_time_evolution_loop(time_index, time_coarse, time_step_output,",
"> 2: field_sample[0, 3] = math.tau*time_step_integration*field_sample[0, 3] append_exponentiation(field_sample[0, :], time_evolution_fine, time_evolution_coarse) get_field_integration =",
"- A matrix to be filled with the result of the product. adjoint(operator)",
"coarse timestep in parallel time_index = cuda.grid(1) if time_index < time_coarse.size: get_time_evolution_loop(time_index, time_coarse,",
"conj(left[2]*right[0] - left[0]*right[2]) result[2] = conj(left[0]*right[1] - left[1]*right[0]) @jit_device def inner(left, right): return",
"vector) over time. This is calculated just in time using the JITed :obj:`callable`",
"The numerical value of the spin quantum number. dimension : :obj:`int` Dimension of",
"of :class:`numpy.complex128` The state (spin wavefunction) of the system at the start of",
"Occupancy`_ for Nvidia's official explanation. \"\"\" utilities = Utilities(spin_quantum_number, device, threads_per_block) conj =",
"_Supported Python features: http://numba.pydata.org/numba-doc/latest/reference/pysupported.html .. _Supported Numpy features: http://numba.pydata.org/numba-doc/latest/reference/numpysupported.html .. _Supported CUDA Python",
"method outside of spin half. Switching to a Lie Trotter method.\\033[0m\") exponentiation_method =",
"results : :obj:`Results` An object containing the results of the simulation. \"\"\" if",
":class:`numpy.float64`, (y_index, x_index)) - The values of x, y and z (and q",
"`get_field()` with field functions that use the rotating wave approximation in the rotating",
"jit_host def jit_device(func): return cuda.jit(device = True, inline = True)(func) self.jit_device = jit_device",
"float64, complex128[:, :, :])\", max_registers) def get_time_evolution(sweep_parameter, time_coarse, time_end_points, time_step_integration, time_step_output, time_evolution_coarse): \"\"\"",
"half :obj:`SpinQuantumNumber.HALF`, or spin one :obj:`SpinQuantumNumber.ONE` quantum system. threads_per_block : :obj:`int` The size",
"np.empty((sample_index_max, lie_dimension), dtype = np.float64) rotating_wave_winding = np.empty(sample_index_end, dtype = np.complex128) elif device_index",
"the quantum state timeseries of the 3 level atom. Parameters ---------- state_init :",
"\\\\frac{1}{\\\\sqrt{2}}\\\\begin{pmatrix} 0 & 1 & 0 \\\\\\\\ 1 & 0 & 1 \\\\\\\\",
"e^{iZ} \\\\end{pmatrix}^{2^\\\\tau}\\\\\\\\ &= T^{2^\\\\tau}, \\\\end{align*} with .. math:: \\\\begin{align*} X &= \\\\frac{1}{2}2^{-\\\\tau}x,\\\\\\\\ Y",
"np.empty((state.shape[0], 3), np.float64) get_spin(state, spin) elif device == Device.CUDA: spin = cuda.device_array((state.shape[0], 3),",
"step used within the integration algorithm. In units of s. time_step_output : :obj:`float`",
"device_index == 0: for time_index in nb.prange(spin.shape[0]): if dimension == 2: spin[time_index, 0]",
"The mathematics definition is used here rather than the physics definition, so the",
"math.tau*time_step_integration*field_sample[0, 1]/2 field_sample[2, 2] = math.tau*time_step_integration*field_sample[0, 2]/2 if dimension > 2: field_sample[2, 3]",
"is executed there just in time if the `spin` property is needed. Compiled",
"= index ANALYTIC = (\"analytic\", 0) \"\"\" Analytic expression of the matrix exponential.",
":class:`numpy.complex128`, (index)) - The vector to right multiply in the inner product. Returns",
"precision = 4**hyper_cube_amount # x = field_sample[0]/(2*precision) # y = field_sample[1]/(2*precision) # z",
"in the rotating frame. integration_method : :obj:`IntegrationMethod` Which integration method to use in",
"returned from :func:`Simulator.evaluate()`, and is executed there just in time if the `spin`",
"0: for time_index in nb.prange(time_coarse.size): get_time_evolution_loop(time_index, time_coarse, time_step_output, time_step_integration, time_end_points, time_evolution_coarse, sweep_parameter) elif",
"quantum state of the spin system over time, written in terms of the",
"Calculate each expected spin value in parallel. For spin half: .. math:: \\\\begin{align*}",
"a maximum number for the whole GPU, for each specific GPU model. This",
"evolution operator between each time step. See :ref:`architecture` for some information. \"\"\" for",
"- q/3) # result[0, 2] = 0.5*cisz*(cx - cy + 1j*sx*sy) # result[1,",
"three level systems. \"\"\" class IntegrationMethod(Enum): \"\"\" Options for describing which method is",
"1j*math.sin(z - q/3) # result[0, 2] = 0.5*cisz*(cx - cy + 1j*sx*sy) #",
"cuda.local.array((3, 3), dtype = np.complex128) elif device_index == 2: temporary_group = roc.shared.array((threads_per_block, 3,",
"\"cuda\": def jit_host(template, max_registers): def jit_host(func): return cuda.jit(template, debug = False, max_registers =",
".. math:: \\\\begin{align*} \\\\exp(A) &= \\\\exp(-ix J_x - iy J_y - iz J_z)\\\\\\\\",
"details. use_rotating_frame : :obj:`bool` Whether or not to use the rotating frame optimisation.",
"= np.complex128) elif device_index == 2: time_evolution_old_group = roc.shared.array((threads_per_block, dimension, dimension), dtype =",
"projection (Bloch vector) over time. \"\"\" def __init__(self, get_field, spin_quantum_number, device = None,",
"run on all CPU cores, in parallel. .. note :: To use this",
"complex_abs = utilities.complex_abs norm2 = utilities.norm2 inner = utilities.inner set_to = utilities.set_to set_to_one",
"half systems:** Assumes the exponent is an imaginary linear combination of a subspace",
"2: time_evolution_coarse[time_index, 0, 2] /= rotating_wave_winding[0] time_evolution_coarse[time_index, 2, 0] *= rotating_wave_winding[0] time_evolution_coarse[time_index, 2,",
"The number of squares made by the matrix exponentiator, if :obj:`ExponentiationMethod.LIE_TROTTER` is chosen.",
"registers allocated per thread when using :obj:`Device.CUDA` as the target device, and can",
"= operator[1, 2] result[2, 2] = operator[2, 2] @jit_device def set_to_one(operator): operator[0, 0]",
": :class:`numpy.ndarray` of :class:`numpy.complex128` The state (spin wavefunction) of the system at the",
"math.cos(a) sa = -1j*math.sin(a)/sqrt2 ez = field_sample[2]/(2*precision) ez = math.cos(ez) + 1j*math.sin(ez) eq",
"to. matrix_exponential_analytic(field_sample, result) : :obj:`callable` Calculates a :math:`\\\\mathfrak{su}(2)` matrix exponential based on its",
"---------- spin_quantum_number : :obj:`SpinQuantumNumber` The option to select whether the simulator will integrate",
"be targeted for integration. That is, whether the integrator is compiled for a",
"= (2 + operator[0, 0])*operator[0, 1] + operator[0, 1]*operator[1, 1] + operator[0, 2]*operator[2,",
"= jit_host def jit_device(func): return roc.jit(device = True)(func) self.jit_device = jit_device def jit_device_template(template):",
"np.asarray([time_start, time_end], np.float64) state_init = np.asarray(state_init, np.complex128) time_index_max = int((time_end_points[1] - time_end_points[0])/time_step_output) if",
"matrix exponentiation in the integration algorithm. Defaults to :obj:`ExponentiationMethod.LIE_TROTTER` when `spin_quantum_number` is set",
"= left[0, 0]*right[0, 1] + left[0, 1]*right[1, 1] + left[0, 2]*right[2, 1] result[1,",
"Y &= \\\\frac{1}{2}2^{-\\\\tau}y,\\\\\\\\ Z &= \\\\frac{1}{2}2^{-\\\\tau}z,\\\\\\\\ c_{\\\\theta} &= \\\\cos(\\\\theta),\\\\\\\\ s_{\\\\theta} &= \\\\sin(\\\\theta). \\\\end{align*}",
"range(hyper_cube_amount): matrix_square_residual(result, temporary) matrix_square_residual(temporary, result) # matrix_multiply(result, result, temporary) # matrix_multiply(temporary, temporary, result)",
"number. dimension : :obj:`int` Dimension of the hilbert space the states with this",
"time_fine_index in range(math.floor(time_step_output/time_step_integration + 0.5)): get_field_integration(sweep_parameter, time_fine, time_coarse[time_index], time_step_integration, field_sample, rotating_wave, rotating_wave_winding) append_exponentiation_integration(time_evolution_fine,",
"of :class:`numpy.complex128` (time_index, bra_state_index, ket_state_index) The evaluated time evolution operator between each time",
"= utilities.matrix_exponential_lie_trotter jit_host = device.jit_host jit_device = device.jit_device jit_device_template = device.jit_device_template device_index =",
"in s. * **time_step_integration** (:obj:`float`) - The integration time step. Measured in s.",
"method multiple times, each time varying `sweep_parameter`. time_start : :obj:`float` The time offset",
"cisz = math.cos(z - q/3) + 1j*math.sin(z - q/3) # result[0, 2] =",
"math:: \\\\begin{align*} \\\\langle F\\\\rangle(t) = \\\\begin{pmatrix} \\\\Re(\\\\sqrt{2}\\\\psi_{0}(t)^*(\\\\psi_{+1}(t) + \\\\psi_{-1}(t))\\\\\\\\ -\\\\Im(\\\\sqrt{2}\\\\psi_{0}(t)^*(\\\\psi_{+1}(t) - \\\\psi_{-1}(t))\\\\\\\\ |\\\\psi_{+1}(t)|^2",
"(Bloch vector) over time. \"\"\" def __init__(self, get_field, spin_quantum_number, device = None, exponentiation_method",
"== SpinQuantumNumber.ONE: exponentiation_method = ExponentiationMethod.LIE_TROTTER elif spin_quantum_number == SpinQuantumNumber.HALF: exponentiation_method = ExponentiationMethod.ANALYTIC if",
"\\\\psi_{-1}(t))\\\\\\\\ |\\\\psi_{+1}(t)|^2 - |\\\\psi_{-1}(t)|^2 \\\\end{pmatrix} \\\\end{align*} Parameters ---------- state : :class:`numpy.ndarray` of :class:`numpy.complex128`",
"changing this value could increase performance for your GPU. See `Achieved Occupancy`_ for",
"algorithm. Defaults to :obj:`ExponentiationMethod.LIE_TROTTER` when `spin_quantum_number` is set to :obj:`SpinQuantumNumber.ONE`, and defaults to",
"absolute value of. Returns * **az** (:class:`numpy.float64`) - The absolute value of z.",
"system. device : :obj:`Device` The option to select which device will be targeted",
"(time_index, spatial_direction) The expected spin projection (Bloch vector) over time. This is calculated",
"models. device : :obj:`Device` The option to select which device will be targeted",
"of :obj:`numpy.float64` (start/end)) - The time offset that the experiment is to start",
"0]) field_sample[2, 1] = math.tau*time_step_integration*(w1*field_sample[0, 1] + w0*field_sample[1, 1]) field_sample[2, 2] = math.tau*time_step_integration*(w1*field_sample[0,",
"Options for describing which method is used during the integration. Parameters ---------- value",
"np.complex128) elif device_index == 2: time_evolution_fine_group = roc.shared.array((threads_per_block, dimension, dimension), dtype = np.complex128)",
"= \\\\sqrt {\\\\left(\\\\sum_i a_i^2 + b_i^2\\\\right)} Parameters: * **z** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (index))",
"of the object. Attributes ---------- conj(z) : :obj:`callable` Conjugate of a complex number.",
"written to. * **trotter_cutoff** (:obj:`int`) - The number of squares to make to",
":func:`numpy.empty()`, or declare a :class:`numpy.ndarray` using :func:`numba.cuda.device_array_like()`. time_end_points : :class:`numpy.ndarray` of :class:`numpy.float64` (start",
"(1.5 - sqrt3)/6 field_sample[2, 0] = math.tau*time_step_integration*(w0*field_sample[0, 0] + w1*field_sample[1, 0]) field_sample[2, 1]",
"dtype = np.float64) field_sample = field_sample_group[roc.get_local_id(1), :, :] rotating_wave_winding_group = roc.shared.array((threads_per_block, sample_index_end), dtype",
"1] = 1 @jit_device def set_to_zero(operator): operator[0, 0] = 0 operator[1, 0] =",
"+ (self.threads_per_block - 1)) // self.threads_per_block try: self.get_time_evolution_raw[blocks_per_grid, self.threads_per_block](sweep_parameter, time, time_end_points, time_step_integration, time_step_output,",
"faster running threads, and changing this value could increase performance for your GPU.",
"use_rotating_frame = True, integration_method = IntegrationMethod.MAGNUS_CF4, trotter_cutoff = 32, threads_per_block = 64, max_registers",
"time_step_output, time_step_integration, time_end_points, time_evolution_coarse, sweep_parameter) elif device_index == 1: # Run calculation for",
"be filled with the result of the product. adjoint(operator) : :obj:`callable` Takes the",
"result[1, 0] = conj(operator[0, 1]) result[0, 1] = conj(operator[1, 0]) result[1, 1] =",
"between each time step. See :ref:`architecture` for some information. state : :obj:`numpy.ndarray` of",
"2] = 0 operator[2, 2] = 1 @jit_device def set_to_zero(operator): operator[0, 0] =",
"== IntegrationMethod.MAGNUS_CF4: @jit_device_template(\"(float64, float64, float64, float64, float64[:, :], float64, complex128[:])\") def get_field_integration_magnus_cf4(sweep_parameter, time_fine,",
"a four dimensional vector, with the first three entries being x, y, z",
"\\\\begin{align*} X &= 2^{-\\\\tau}x,\\\\\\\\ Y &= 2^{-\\\\tau}y,\\\\\\\\ Z &= 2^{-\\\\tau}z,\\\\\\\\ Q &= 2^{-\\\\tau}q,\\\\\\\\",
"spin): \"\"\" Calculate each expected spin value in parallel. For spin half: ..",
"w0 = (1.5 + sqrt3)/6 w1 = (1.5 - sqrt3)/6 field_sample[2, 0] =",
"optimal value of `max_registers` for each model of GPU running :mod:`spinsim`, balancing more",
"field_sample = np.empty((sample_index_max, lie_dimension), dtype = np.float64) rotating_wave_winding = np.empty(sample_index_end, dtype = np.complex128)",
"one matrix into another. .. math:: (A)_{i, j} = (B)_{i, j} Parameters: *",
"jit get_field function into a device function.\\033[0m\\n\") raise def compile_time_evolver(self, get_field, spin_quantum_number, device,",
"numba as nb from numba import cuda from numba import roc import math",
"the rating wave approximation: just define `get_field()` with field functions that use the",
"3] + w0*field_sample[1, 3]) append_exponentiation(field_sample[2, :], time_evolution_fine, time_evolution_coarse) get_field_integration = get_field_integration_magnus_cf4 append_exponentiation_integration =",
"transform_frame = transform_frame_spin_half_rotating else: @jit_device_template(\"(float64[:], float64, complex128)\") def transform_frame_lab(field_sample, rotating_wave, rotating_wave_winding): return transform_frame",
"CPU_SINGLE = (\"cpu_single\", 0) \"\"\" Use the :func:`numba.jit()` LLVM compiler to compile the",
"dimension > 2: field_sample[2, 3] = math.tau*time_step_integration*(w1*field_sample[0, 3] + w0*field_sample[1, 3]) append_exponentiation(field_sample[2, :],",
"timeseries of the 3 level atom. Parameters ---------- state_init : :class:`numpy.ndarray` of :class:`numpy.complex128`",
"compile the integrator to run on an Nvidia cuda compatible GPU, in parallel.",
"= conj(operator[1, 1]) @jit_device def matrix_exponential_analytic(field_sample, result): x = field_sample[0] y = field_sample[1]",
"progress, not currently functional! \"\"\" class Results: \"\"\" The results of a an",
"func return jit_device_template self.jit_device_template = jit_device_template elif value == \"cpu_single\": def jit_host(template, max_registers):",
"using the Lie Trotter theorem. \"\"\" class Device(Enum): \"\"\" The target device that",
"(B)_{i, j} Parameters: * **operator** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The matrix",
"jit_host(template, max_registers): def jit_host(func): return func return jit_host self.jit_host = jit_host def jit_device(func):",
"\\\\end{align*} Parameters: * **left** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (index)) - The vector to left",
"frame rotating_wave_winding[0] = math.cos(math.tau*rotating_wave*time_step_output) + 1j*math.sin(math.tau*rotating_wave*time_step_output) time_evolution_coarse[time_index, 0, 0] /= rotating_wave_winding[0] time_evolution_coarse[time_index, 0,",
"1]) result[2, 0] = conj(operator[0, 2]) result[0, 1] = conj(operator[1, 0]) result[1, 1]",
"step. Measured in s. * **time_step_output** (:obj:`float`) - The sample resolution of the",
"exponentiation_method_index == 1: matrix_exponential_lie_trotter(field_sample, time_evolution_fine, trotter_cutoff) # Premultiply to the exitsing time evolution",
"operator[1, 2]*operator[2, 0] result[2, 0] = operator[2, 0]*operator[0, 0] + operator[2, 1]*operator[1, 0]",
"def __init__(self, get_field, spin_quantum_number, device = None, exponentiation_method = None, use_rotating_frame = True,",
"= 3 sample_index_end = 4 elif integration_method == IntegrationMethod.HALF_STEP: sample_index_max = 3 sample_index_end",
"0] = (state[time_index, 0]*conj(state[time_index, 1])).real spin[time_index, 1] = (1j*state[time_index, 0]*conj(state[time_index, 1])).real spin[time_index, 2]",
"time_evolution self.state = state self.spin_calculator = spin_calculator def __getattr__(self, attr_name): if attr_name ==",
"linear combination of a subspace of :math:`\\\\mathfrak{su}(3)`, being, .. math:: \\\\begin{align*} A &=",
"= conj(operator[0, 0]) result[1, 0] = conj(operator[0, 1]) result[2, 0] = conj(operator[0, 2])",
"operator in the z direction. spin_calculator : :obj:`callable` Calculates the expected spin projection",
"matrix_multiply = utilities.matrix_multiply adjoint = utilities.adjoint matrix_exponential_analytic = utilities.matrix_exponential_analytic matrix_exponential_lie_trotter = utilities.matrix_exponential_lie_trotter jit_host",
"matrix_multiply(temporary, temporary, result) self.conj = conj self.complex_abs = complex_abs self.norm2 = norm2 self.inner",
"A text label that can be used for archiving. \"\"\" MAGNUS_CF4 = \"magnus_cf4\"",
"rotating_wave_winding = np.empty(sample_index_end, dtype = np.complex128) elif device_index == 1: time_evolution_fine = cuda.local.array((dimension,",
"z /= r c = math.cos(r/2) s = math.sin(r/2) result[0, 0] = c",
"machine_epsilon = np.finfo(np.float64).eps*1000 class Utilities: \"\"\" A on object that contains definitions of",
"= append_exponentiation_integration_midpoint @jit_device_template(\"(int64, float64[:], float64, float64, float64[:], complex128[:, :, :], float64)\") def get_time_evolution_loop(time_index,",
"2] = operator[2, 2] @jit_device def set_to_one(operator): operator[0, 0] = 1 operator[1, 0]",
"&= \\\\sin(\\\\theta). \\\\end{align*} **For spin one systems** Assumes the exponent is an imaginary",
"jit_device_template(func): return nb.njit(template)(func) return jit_device_template self.jit_device_template = jit_device_template elif value == \"cpu\": def",
"that the experiment is to start at. Measured in s. time_end : :obj:`float`",
"(y_index, x_index)) - An array to write the resultant adjoint to. matrix_exponential_analytic(field_sample, result)",
"2] = 0 @jit_device def matrix_multiply(left, right, result): result[0, 0] = left[0, 0]*right[0,",
"used during the integration. Parameters ---------- value : :obj:`str` A text label that",
"constrution. Parameters: * **state** (:obj:`numpy.ndarray` of :obj:`numpy.complex128` (time_index, magnetic_quantum_number)) - The quantum state",
"dimension : :obj:`int` Dimension of the hilbert space the states with this spin",
"2^{-\\\\tau}q,\\\\\\\\ c_{\\\\theta} &= \\\\cos(\\\\theta),\\\\\\\\ s_{\\\\theta} &= \\\\sin(\\\\theta). \\\\end{align*} Once :math:`T` is calculated, it",
"state = np.empty((time_index_max, self.spin_quantum_number.dimension), np.complex128) self.get_state(state_init, state, time_evolution_coarse) results = Results(time, time_evolution_coarse, state,",
"operator[0, 0] result[1, 0] = operator[1, 0] result[2, 0] = operator[2, 0] result[0,",
"testing. Note that one extra register per thread is always added to the",
"spin = cuda.device_array((state.shape[0], 3), np.float64) blocks_per_grid = (state.shape[0] + (threads_per_block - 1)) //",
"range(hyper_cube_amount): # matrix_multiply(result, result, temporary) # matrix_multiply(temporary, temporary, result) else: @jit_device def norm2(z):",
"(:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The matrix to set to :math:`0`. matrix_multiply(left,",
"time_evolution_coarse, field_sample, time_step_integration, rotating_wave, rotating_wave_winding): transform_frame(field_sample[0, :], rotating_wave, rotating_wave_winding[0]) transform_frame(field_sample[1, :], rotating_wave, rotating_wave_winding[1])",
"rotating_wave, rotating_wave_winding): X = (field_sample[0] + 1j*field_sample[1])/(rotating_wave_winding**2) field_sample[0] = X.real field_sample[1] = X.imag",
"physics definition, so the left vector is conjugated. Thus the inner product of",
"2].imag**2 elif device_index > 0: if device_index == 1: time_index = cuda.grid(1) elif",
"precision = 4**hyper_cube_amount a = math.sqrt(field_sample[0]*field_sample[0] + field_sample[1]*field_sample[1]) if a > 0: ep",
"& \\\\cos(\\\\frac{r}{2}) + i\\\\frac{z}{r}\\\\sin(\\\\frac{r}{2}) \\\\end{pmatrix} \\\\end{align*} with :math:`r = \\\\sqrt{x^2 + y^2 +",
"1]*right[1, 1] + left[0, 2]*right[2, 1] result[1, 1] = left[1, 0]*right[0, 1] +",
"time_coarse, time_end_points, time_step_integration, time_step_output, time_evolution_coarse): \"\"\" Find the stepwise time evolution opperator. Parameters",
"by the user. Parameters: * **state** (:obj:`numpy.ndarray` of :obj:`numpy.complex128` (time_index, magnetic_quantum_number)) - The",
"(cx*cy + 1j*sx*sy)*cisz # if device_index == 0: # temporary = np.empty((2, 2),",
"- time_coarse rotating_wave_winding[1] = math.cos(math.tau*rotating_wave*time_sample) + 1j*math.sin(math.tau*rotating_wave*time_sample) time_sample += time_coarse get_field_jit(time_sample, sweep_parameter, field_sample[1,",
"this device option, the user defined field function must be :func:`numba.cuda.jit()` compilable. See",
"& 0 \\\\end{pmatrix},& J_z &= \\\\frac{1}{2}\\\\begin{pmatrix} 1 & 0 \\\\\\\\ 0 & -1",
"+ operator[1, 1])*operator[1, 2] + operator[1, 2]*operator[2, 2] result[2, 2] = operator[2, 0]*operator[0,",
"operator[0, 1]*operator[1, 2] + operator[0, 2]*operator[2, 2] result[1, 2] = operator[1, 0]*operator[0, 2]",
"= field_sample_group[roc.get_local_id(1), :, :] rotating_wave_winding_group = roc.shared.array((threads_per_block, sample_index_end), dtype = np.complex128) rotating_wave_winding =",
"rotating_wave_winding): transform_frame(field_sample[0, :], rotating_wave, rotating_wave_winding[0]) transform_frame(field_sample[1, :], rotating_wave, rotating_wave_winding[1]) field_sample[2, 0] = math.tau*time_step_integration*field_sample[0,",
"to take the conjugate of. Returns * **cz** (:class:`numpy.complex128`) - The conjugate of",
"compiled for use on the target device) used in the integrator. These device",
"field function must be :func:`numba.jit()` compilable. See `Supported Python features`_ for compilable python",
"to be filled with the result of the product. adjoint(operator) : :obj:`callable` Takes",
"See :obj:`IntegrationMethod` for more details. trotter_cutoff : :obj:`int` The number of squares made",
"- c_Y - i s_Xs_Y)}{2} & \\\\frac{e^{i\\\\frac{2Q}{3}} (s_Y -i c_Y s_X)}{\\\\sqrt{2}} & \\\\frac{e^{-i\\\\left(-Z",
"operator to take the adjoint of. * **result** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index))",
"np.empty((2, 2), dtype = np.complex128) # elif device_index == 1: # temporary =",
"text label that can be used for archiving. \"\"\" def __init__(self, value, dimension,",
"approximation: just define `get_field()` with field functions that use the rotating wave approximation",
"wave approximation, a technique used to get approximate analytic solutions of spin system",
"1 result[1, 0] = 0 result[0, 1] = 0 result[1, 1] = 1",
"step, one sampling the field from the end of the time step. The",
"# hyper_cube_amount = 0 # precision = 4**hyper_cube_amount # x = field_sample[0]/(2*precision) #",
"__init__(self, value, index): super().__init__() self._value_ = value self.index = index if value ==",
"an imaginary linear combination of a subspace of :math:`\\\\mathfrak{su}(2)`, being, .. math:: \\\\begin{align*}",
"a quantum state. Used to calculate `spin` the first time it is referenced",
"@staticmethod @nb.njit def get_state(state_init, state, time_evolution): \"\"\" Use the stepwise time evolution operators",
"to increase execution time for different GPU models. device : :obj:`Device` The option",
"The expected spin projection (Bloch vector) over time. \"\"\" self.time = time self.time_evolution",
"= device.jit_device_template device_index = device.index dimension = spin_quantum_number.dimension lie_dimension = dimension + 1",
"complex vectors. .. note:: The mathematics definition is used here rather than the",
":obj:`numpy.float128` (time_index, y_index, x_index) The evaluated time evolution operator between each time step.",
"associated with the use of a rotating wave approximation, a technique used to",
"to sample the field at, in units of s. * **simulation_index** (:obj:`int`) -",
"frame optimisation. Defaults to :obj:`True`. If set to :obj:`True`, the integrator moves into",
"float64, float64, float64[:, :], float64, complex128[:])\") def get_field_integration_half_step(sweep_parameter, time_fine, time_coarse, time_step_integration, field_sample, rotating_wave,",
"Measured in s. The duration of the experiment is `time_end - time_start`. time_step_integration",
"np.float64) time_evolution_coarse = np.empty((time_index_max, self.spin_quantum_number.dimension, self.spin_quantum_number.dimension), np.complex128) self.get_time_evolution_raw(sweep_parameter, time, time_end_points, time_step_integration, time_step_output, time_evolution_coarse)",
"\"one\") \"\"\" For three level systems. \"\"\" class IntegrationMethod(Enum): \"\"\" Options for describing",
"result[0, 1] = conj(operator[1, 0]) result[1, 1] = conj(operator[1, 1]) @jit_device def matrix_exponential_analytic(field_sample,",
"field_sample[2, 0] = math.tau*time_step_integration*field_sample[0, 0]/2 field_sample[2, 1] = math.tau*time_step_integration*field_sample[0, 1]/2 field_sample[2, 2] =",
"& 0 \\\\\\\\ i & 0 & -i \\\\\\\\ 0 & i &",
"time_index_max = int((time_end_points[1] - time_end_points[0])/time_step_output) if self.device.index == 0: time = np.empty(time_index_max, np.float64)",
"+ left[2, 2]*right[2, 0] result[0, 1] = left[0, 0]*right[0, 1] + left[0, 1]*right[1,",
":] field_sample_group = roc.shared.array((threads_per_block, sample_index_max, lie_dimension), dtype = np.float64) field_sample = field_sample_group[roc.get_local_id(1), :,",
"sx = math.sin(x) # cy = math.cos(y) # sy = math.sin(y) # cisz",
"left[1, 0]*right[0, 1] + left[1, 1]*right[1, 1] @jit_device def matrix_square_residual(operator, result): result[0, 0]",
"\\\\Re(\\\\sqrt{2}\\\\psi_{0}(t)^*(\\\\psi_{+1}(t) + \\\\psi_{-1}(t))\\\\\\\\ -\\\\Im(\\\\sqrt{2}\\\\psi_{0}(t)^*(\\\\psi_{+1}(t) - \\\\psi_{-1}(t))\\\\\\\\ |\\\\psi_{+1}(t)|^2 - |\\\\psi_{-1}(t)|^2 \\\\end{pmatrix} \\\\end{align*} Parameters ----------",
"1j*sx*sy) # result[1, 2] = cisz*(-1j*sx - cx*sy)/sqrt2 # result[2, 2] = 0.5*cisz*(cx",
"the inner product. * **right** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (index)) - The vector to",
"the integrator is being compiled for. See :class:`Device` for more information and links.",
"timestep in parallel time_index = cuda.grid(1) if time_index < time_coarse.size: get_time_evolution_loop(time_index, time_coarse, time_step_output,",
"function into a device function.\\033[0m\\n\") raise def compile_time_evolver(self, get_field, spin_quantum_number, device, use_rotating_frame =",
"(only appearing, and required, in spin one systems). .. note:: This function must",
"0 operator[0, 2] = 0 operator[1, 2] = 0 operator[2, 2] = 0",
"3]/2 append_exponentiation(field_sample[2, :], time_evolution_fine, time_evolution_coarse) get_field_integration = get_field_integration_half_step append_exponentiation_integration = append_exponentiation_integration_half_step elif integration_method",
"otherwise. See :obj:`Device` for all options and more details. get_time_evolution_raw : :obj:`callable` The",
"== 0: time_evolution_fine = np.empty((dimension, dimension), dtype = np.complex128) field_sample = np.empty((sample_index_max, lie_dimension),",
"each model of GPU running :mod:`spinsim`, balancing more threads vs faster running threads,",
"numba import cuda from numba import roc import math sqrt2 = math.sqrt(2) sqrt3",
"rotating frame. One can, of course, use :mod:`spinsim` to integrate states in the",
"0]*operator[0, 2] + operator[2, 1]*operator[1, 2] + (2 + operator[2, 2])*operator[2, 2] @jit_device",
"= np.empty((3, 3), dtype = np.complex128) # elif device_index == 1: # temporary",
"exponentiation_method_index == 0: matrix_exponential_analytic(field_sample, time_evolution_fine) elif exponentiation_method_index == 1: matrix_exponential_lie_trotter(field_sample, time_evolution_fine, trotter_cutoff) #",
"in s. time_end : :obj:`float` The time that the experiment is to finish",
":obj:`numpy.complex128` (time_index, magnetic_quantum_number)) - The quantum state of the spin system over time,",
"value for hyperfine spin of the spin system in the lab frame, for",
"0: time = np.empty(time_index_max, np.float64) time_evolution_coarse = np.empty((time_index_max, self.spin_quantum_number.dimension, self.spin_quantum_number.dimension), np.complex128) self.get_time_evolution_raw(sweep_parameter, time,",
"four dimensional vector, with the first three entries being x, y, z spatial",
"left[1, 0]*right[0, 0] + left[1, 1]*right[1, 0] + left[1, 2]*right[2, 0] result[2, 0]",
"elif device_index == 1: time_evolution_fine = cuda.local.array((dimension, dimension), dtype = np.complex128) field_sample =",
"& \\\\frac{e^{-i(Z - \\\\frac{Q}{3})} (-i s_X - c_X s_Y)}{\\\\sqrt{2}} \\\\\\\\ \\\\frac{e^{-i\\\\left(Z + \\\\frac{Q}{3}\\\\right)}(c_X",
"matrix to copy from. * **result** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The",
"result, temporary) # matrix_multiply(temporary, temporary, result) self.conj = conj self.complex_abs = complex_abs self.norm2",
"operator[1, 1] @jit_device def set_to_one(operator): operator[0, 0] = 1 operator[1, 0] = 0",
"c_Y s_X)}{\\\\sqrt{2}} & \\\\frac{e^{-i\\\\left(-Z + \\\\frac{Q}{3}\\\\right)}(c_X + c_Y + i s_Xs_Y)}{2} \\\\end{pmatrix}^{2^\\\\tau}\\\\\\\\ &=",
":obj:`numpy.float128` (time_index, y_index, x_index)) - The evaluated time evolution operator between each time",
"\\\\cdot r &= \\\\sum_i (l_i)^* r_i \\\\end{align*} Parameters: * **left** (:class:`numpy.ndarray` of :class:`numpy.complex128`,",
"result[0, 0] = 1 result[1, 0] = 0 result[0, 1] = 0 result[1,",
"+ 1j*math.sin(math.tau*rotating_wave*time_sample) time_sample += time_coarse get_field_jit(time_sample, sweep_parameter, field_sample[0, :]) @jit_device_template(\"(complex128[:, :], complex128[:, :],",
"j} = 0 \\\\end{align*} Parameters: * **operator** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) -",
"= field_sample[2] - rotating_wave transform_frame = transform_frame_spin_one_rotating else: @jit_device_template(\"(float64[:], float64, complex128)\") def transform_frame_spin_half_rotating(field_sample,",
"eq = 1 + 1j*eq result[0, 0] = (Ca/(eq*ez))*(Ca/(eq*ez)) - 1 result[1, 0]",
"1]*right[1, 1] result[1, 1] = left[1, 0]*right[0, 1] + left[1, 1]*right[1, 1] @jit_device",
"coarse timestep in parallel time_index = roc.get_global_id(1) if time_index < time_coarse.size: get_time_evolution_loop(time_index, time_coarse,",
"= operator[2, 0]*operator[0, 1] + operator[2, 1]*operator[1, 1] + (2 + operator[2, 2])*operator[2,",
"-i c_Y s_X)}{\\\\sqrt{2}} & \\\\frac{e^{-i\\\\left(-Z + \\\\frac{Q}{3}\\\\right)}(c_X - c_Y + i s_Xs_Y)}{2} \\\\\\\\",
"return math.sqrt(z[0].real**2 + z[0].imag**2 + z[1].real**2 + z[1].imag**2 + z[2].real**2 + z[2].imag**2) @jit_device",
":func:`numpy.empty()`, or declare a :class:`numpy.ndarray` using :func:`numba.cuda.device_array_like()`. \"\"\" if device_index == 0: for",
"from . import utilities from enum import Enum import numpy as np import",
"such approximations are made, and the output state in given out of the",
"See :ref:`architecture` for some information. state : :obj:`numpy.ndarray` of :obj:`numpy.complex128` (time_index, magnetic_quantum_number) The",
"device_index == 0: temporary = np.empty((3, 3), dtype = np.complex128) elif device_index ==",
"time.copy_to_host() elif self.device == Device.ROC: time = roc.device_array(time_index_max, np.float64) time_evolution_coarse = roc.device_array((time_index_max, self.spin_quantum_number.dimension,",
"elif device_index == 1: # temporary = cuda.local.array((3, 3), dtype = np.complex128) #",
"- no such approximations are made, and the output state in given out",
"max_registers) def get_time_evolution(sweep_parameter, time_coarse, time_end_points, time_step_integration, time_step_output, time_evolution_coarse): \"\"\" Find the stepwise time",
"written to. matrix_exponential_lie_trotter(field_sample, result) : :obj:`callable` Calculates a matrix exponential based on the",
"0] = left[0, 0]*right[0, 0] + left[0, 1]*right[1, 0] + left[0, 2]*right[2, 0]",
"conj(left[1])*right[1] @jit_device def set_to(operator, result): result[0, 0] = operator[0, 0] result[1, 0] =",
"0])*operator[0, 2] + operator[0, 1]*operator[1, 2] + operator[0, 2]*operator[2, 2] result[1, 2] =",
"the exponent is an imaginary linear combination of a subspace of :math:`\\\\mathfrak{su}(2)`, being,",
"device_index == 1: time_evolution_old = cuda.local.array((dimension, dimension), dtype = np.complex128) elif device_index ==",
"\\\\frac{e^{i\\\\frac{2Q}{3}} (-s_Y -i c_Y s_X)}{\\\\sqrt{2}} & \\\\frac{e^{-i\\\\left(-Z + \\\\frac{Q}{3}\\\\right)}(c_X - c_Y + i",
"device, threads_per_block): \"\"\" Parameters ---------- spin_quantum_number : :obj:`SpinQuantumNumber` The option to select whether",
"of :class:`numpy.complex128`, (index)) - The vector to take the 2 norm of. Returns",
"@jit_device def set_to_one(operator): operator[0, 0] = 1 operator[1, 0] = 0 operator[0, 1]",
"temporary_group[roc.get_local_id(1), :, :] for power_index in range(hyper_cube_amount): matrix_square_residual(result, temporary) matrix_square_residual(temporary, result) result[0, 0]",
"get_time_evolution_loop(time_index, time_coarse, time_step_output, time_step_integration, time_end_points, time_evolution_coarse, sweep_parameter): # Declare variables if device_index ==",
"*= rotating_wave_winding[0] time_evolution_coarse[time_index, 2, 2] *= rotating_wave_winding[0] else: time_evolution_coarse[time_index, 1, 0] *= rotating_wave_winding[0]",
"a parameter that can be swept over when multiple simulations need to be",
"allocates more registers (fast memory) to each thread, out of a maximum number",
"= field_sample[1] z = field_sample[2] r = math.sqrt(x**2 + y**2 + z**2) if",
"your GPU. See `Achieved Occupancy`_ for Nvidia's official explanation. \"\"\" if not device:",
"+ operator[0, 0])*operator[0, 1] + operator[0, 1]*operator[1, 1] result[1, 1] = operator[1, 0]*operator[0,",
"integration_method == IntegrationMethod.MIDPOINT_SAMPLE: sample_index_max = 1 sample_index_end = 1 exponentiation_method_index = exponentiation_method.index if",
"of a subspace of :math:`\\\\mathfrak{su}(2)`, being, .. math:: \\\\begin{align*} A &= -i(x J_x",
"J_q))^{2^\\\\tau}\\\\\\\\ &\\\\approx (\\\\exp(-i(2^{-\\\\tau} x) J_x) \\\\exp(-i(2^{-\\\\tau} y) J_y) \\\\exp(-i(2^{-\\\\tau} z J_z + (2^{-\\\\tau}",
"a complex vector. .. math:: \\|a + ib\\|_2 = \\\\sqrt {\\\\left(\\\\sum_i a_i^2 +",
"method. \"\"\" HALF_STEP = \"half_step\" \"\"\" Integration method from AtomicPy. Makes two Euler",
"operator[1, 0] = 0 operator[0, 1] = 0 operator[1, 1] = 1 @jit_device",
"(A)_{i, j} = 0 \\\\end{align*} Parameters: * **operator** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index))",
":obj:`SpinQuantumNumber.HALF`, or spin one :obj:`SpinQuantumNumber.ONE` quantum system. device : :obj:`Device` The option to",
"0 & 0 & 0 \\\\\\\\ 0 & 0 & -1 \\\\end{pmatrix},& J_q",
"are available for the GPU model, the GPU must run fewer threads concurrently",
"time for a given time series of a quantum state. This :obj:`callable` is",
"just in time if the `spin` property is needed. Compiled for chosen device",
"the conjugate of. Returns * **cz** (:class:`numpy.complex128`) - The conjugate of z. complex_abs(z)",
"1 if device_index == 0: temporary = np.empty((2, 2), dtype = np.complex128) elif",
"1])*(state[time_index, 0] - state[time_index, 2])/sqrt2).real spin[time_index, 2] = state[time_index, 0].real**2 + state[time_index, 0].imag**2",
"= 0 operator[1, 0] = 0 operator[0, 1] = 0 operator[1, 1] =",
"use an empty :class:`numpy.ndarray` with :func:`numpy.empty()`, or declare a :class:`numpy.ndarray` using :func:`numba.cuda.device_array_like()`. \"\"\"",
":class:`numpy.ndarray` of :class:`numpy.complex128` (time_index, state_index) The state (wavefunction) of the spin system in",
"operator[1, 0] = 0 operator[0, 1] = 0 operator[1, 1] = 0 @jit_device",
"self.jit_device_template = jit_device_template elif value == \"roc\": def jit_host(template, max_registers): def jit_host(func): return",
"operator[2, 0] result[0, 1] = operator[0, 1] result[1, 1] = operator[1, 1] result[2,",
"\"\"\" Analytic expression of the matrix exponential. For spin half :obj:`SpinQuantumNumber.HALF` systems only.",
"def compile_time_evolver(self, get_field, spin_quantum_number, device, use_rotating_frame = True, integration_method = IntegrationMethod.MAGNUS_CF4, exponentiation_method =",
"e^{-iZ} & -(c_Xs_Y + is_Xc_Y) e^{iZ} \\\\\\\\ (c_Xs_Y - is_Xc_Y) e^{-iZ} & (c_Xc_Y",
"atom. Parameters ---------- state_init : :class:`numpy.ndarray` of :class:`numpy.complex128` The state (spin wavefunction) of",
"temporary_group = roc.shared.array((threads_per_block, 3, 3), dtype = np.complex128) # temporary = temporary_group[roc.get_local_id(1), :,",
"// threads_per_block get_spin[blocks_per_grid, threads_per_block](roc.to_device(state), spin) spin = spin.copy_to_host() return spin self.get_time_evolution_raw = get_time_evolution",
"the time evolution operator in parallel. Compiled for chosen device on object constrution.",
"The vector to take the 2 norm of. Returns * **nz** (:class:`numpy.float64`) -",
"w0*field_sample[1, 3]) append_exponentiation(field_sample[2, :], time_evolution_fine, time_evolution_coarse) get_field_integration = get_field_integration_magnus_cf4 append_exponentiation_integration = append_exponentiation_integration_magnus_cf4 elif",
"(1/2, 2, \"half\") \"\"\" For two level systems. \"\"\" ONE = (1, 3,",
"numba could not jit get_field function into a device function.\\033[0m\\n\") raise def compile_time_evolver(self,",
"is used to define the bias field strength in `get_field`, then one can",
"else: ep = 1 a = a/precision Ca = math.cos(a/2) Sa = math.sin(a/2)",
"the fourth entry being the amplitude of the quadratic shift (only appearing, and",
"elif exponentiation_method_index == 1: matrix_exponential_lie_trotter(field_sample, time_evolution_fine, trotter_cutoff) # Premultiply to the exitsing time",
"**operator** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The matrix to copy from. *",
"time evolution operator between each time step. See :ref:`architecture` for some information. state",
"calculated as .. math:: \\\\begin{align*} \\\\exp(A) &= \\\\exp(-ix J_x - iy J_y -",
"for z_index in range(state.shape[1]): state[time_index, x_index] += time_evolution[time_index - 1, x_index, z_index]*state[time_index -",
"1 exponentiation_method_index = exponentiation_method.index if (exponentiation_method == ExponentiationMethod.ANALYTIC) and (spin_quantum_number != SpinQuantumNumber.HALF): print(\"\\033[31mspinsim",
"1 # utility_set = spin_quantum_number.utility_set if not exponentiation_method: if spin_quantum_number == SpinQuantumNumber.ONE: exponentiation_method",
"the experiment is to start at. Measured in s. time_end : :obj:`float` The",
"simulations, sweeping through bias values, by calling this method multiple times, each time",
"= operator[1, 1] @jit_device def set_to_one(operator): operator[0, 0] = 1 operator[1, 0] =",
"the integrator, ie, don't compile the integrator. \"\"\" CPU_SINGLE = (\"cpu_single\", 0) \"\"\"",
"jit_device def jit_device_template(template): def jit_device_template(func): return func return jit_device_template self.jit_device_template = jit_device_template elif",
"= operator[0, 0] result[1, 0] = operator[1, 0] result[0, 1] = operator[0, 1]",
"LLVM compiler to compile the integrator to run on all CPU cores, in",
"i\\\\frac{z}{r}\\\\sin(\\\\frac{r}{2}) & -\\\\frac{y + ix}{r}\\\\sin(\\\\frac{r}{2})\\\\\\\\ \\\\frac{y - ix}{r}\\\\sin(\\\\frac{r}{2}) & \\\\cos(\\\\frac{r}{2}) + i\\\\frac{z}{r}\\\\sin(\\\\frac{r}{2}) \\\\end{pmatrix}",
"take the 2 norm of. Returns * **nz** (:class:`numpy.float64`) - The 2 norm",
"The use of a rotating frame is commonly associated with the use of",
"the exponential if exponentiation_method_index == 0: matrix_exponential_analytic(field_sample, time_evolution_fine) elif exponentiation_method_index == 1: matrix_exponential_lie_trotter(field_sample,",
"# elif device_index == 1: # temporary = cuda.local.array((2, 2), dtype = np.complex128)",
"for different GPU models. device : :obj:`Device` The option to select which device",
"operator[0, 1]*operator[1, 1] + operator[0, 2]*operator[2, 1] result[1, 1] = operator[1, 0]*operator[0, 1]",
"Which integration method to use in the integration. Defaults to :obj:`IntegrationMethod.MAGNUS_CF4`. See :obj:`IntegrationMethod`",
"(\\\\exp(-i(2^{-\\\\tau} x) J_x) \\\\exp(-i(2^{-\\\\tau} y) J_y) \\\\exp(-i(2^{-\\\\tau} z J_z + (2^{-\\\\tau} q) J_q)))^{2^\\\\tau}\\\\\\\\",
"the number of threads (workitems) they each contain, when running on the GPU",
"= math.cos(a) sa = -1j*math.sin(a)/sqrt2 ez = field_sample[2]/(2*precision) ez = math.cos(ez) + 1j*math.sin(ez)",
"- 1j*z*s result[1, 0] = (y - 1j*x)*s result[0, 1] = -(y +",
"= time evolution * previous state for x_index in nb.prange(state.shape[1]): state[time_index, x_index] =",
"0.5*(state[time_index, 0].real**2 + state[time_index, 0].imag**2 - state[time_index, 1].real**2 - state[time_index, 1].imag**2) else: spin[time_index,",
"= \\\\begin{pmatrix} \\\\Re(\\\\psi_{+\\\\frac{1}{2}}(t)\\\\psi_{-\\\\frac{1}{2}}(t)^*)\\\\\\\\ -\\\\Im(\\\\psi_{+\\\\frac{1}{2}}(t)\\\\psi_{-\\\\frac{1}{2}}(t)^*)\\\\\\\\ \\\\frac{1}{2}(|\\\\psi_{+\\\\frac{1}{2}}(t)|^2 - |\\\\psi_{-\\\\frac{1}{2}}(t)|^2) \\\\end{pmatrix} \\\\end{align*} For spin one: ..",
"inner(left, right): return conj(left[0])*right[0] + conj(left[1])*right[1] + conj(left[2])*right[2] @jit_device def set_to(operator, result): result[0,",
"exponentiation_method, trotter_cutoff, threads_per_block, max_registers) except: print(\"\\033[31mspinsim error: numba could not jit get_field function",
"+= 1 # @jit_device # def matrix_exponential_lie_trotter(field_sample, result, trotter_cutoff): # hyper_cube_amount = math.ceil(trotter_cutoff/2)",
"append_exponentiation_integration_midpoint @jit_device_template(\"(int64, float64[:], float64, float64, float64[:], complex128[:, :, :], float64)\") def get_time_evolution_loop(time_index, time_coarse,",
"1]*operator[1, 1] + operator[0, 2]*operator[2, 1] result[1, 1] = operator[1, 0]*operator[0, 1] +",
"ca = math.cos(a) sa = -1j*math.sin(a)/sqrt2 ez = field_sample[2]/(2*precision) ez = math.cos(ez) +",
"0]*right[0, 2] + left[0, 1]*right[1, 2] + left[0, 2]*right[2, 2] result[1, 2] =",
"---------- state : :obj:`numpy.ndarray` of :obj:`numpy.complex128` (time_index, magnetic_quantum_number) The quantum state of the",
"specific GPU model. Defaults to 63 (optimal for GTX1070, the device used for",
"See `Supported CUDA Python features`_ for compilable python features. \"\"\" ROC = (\"roc\",",
"= 0 result[1, 1] = 1 @jit_device def matrix_exponential_lie_trotter(field_sample, result, trotter_cutoff): hyper_cube_amount =",
"< spin.shape[0]: if dimension == 2: spin[time_index, 0] = (state[time_index, 0]*conj(state[time_index, 1])).real spin[time_index,",
"is to finish at. Measured in s. The duration of the experiment is",
"time evolution operator between each time step. See :ref:`architecture` for some information. spin_calculator",
"of the product. adjoint(operator) : :obj:`callable` Takes the hermitian adjoint of a matrix.",
"set_to_zero = utilities.set_to_zero matrix_multiply = utilities.matrix_multiply adjoint = utilities.adjoint matrix_exponential_analytic = utilities.matrix_exponential_analytic matrix_exponential_lie_trotter",
"set to :obj:`SpinQuantumNumber.HALF`. See :obj:`ExponentiationMethod` for more details. use_rotating_frame : :obj:`bool` Whether or",
"from the start of the time step, one sampling the field from the",
"is to start at, and the time that the experiment is to finish",
"**time_evolution_coarse** (:obj:`numpy.ndarray` of :obj:`numpy.float128` (time_index, y_index, x_index)) - The evaluated time evolution operator",
"exponentiation in the integration algorithm. Defaults to :obj:`ExponentiationMethod.LIE_TROTTER` when `spin_quantum_number` is set to",
"\\\\\\\\ 1 & 0 \\\\end{pmatrix},& J_y &= \\\\frac{1}{2}\\\\begin{pmatrix} 0 & -i \\\\\\\\ i",
"field_sample[0]/precision # y = field_sample[1]/precision # z = field_sample[2]/precision # q = field_sample[3]/precision",
"more details. trotter_cutoff : :obj:`int` The number of squares made by the matrix",
"return nb.njit(template)(func) return jit_host self.jit_host = jit_host def jit_device(func): return nb.njit()(func) self.jit_device =",
"GPU model, the GPU must run fewer threads concurrently than it has Cuda",
"2] = math.tau*time_step_integration*field_sample[1, 2]/2 if dimension > 2: field_sample[2, 3] = math.tau*time_step_integration*field_sample[1, 3]/2",
"= utilities.inner set_to = utilities.set_to set_to_one = utilities.set_to_one set_to_zero = utilities.set_to_zero matrix_multiply =",
"matrix_exponential_analytic = utilities.matrix_exponential_analytic matrix_exponential_lie_trotter = utilities.matrix_exponential_lie_trotter jit_host = device.jit_host jit_device = device.jit_device jit_device_template",
"to write the resultant adjoint to. matrix_exponential_analytic(field_sample, result) : :obj:`callable` Calculates a :math:`\\\\mathfrak{su}(2)`",
"== 0: time = np.empty(time_index_max, np.float64) time_evolution_coarse = np.empty((time_index_max, self.spin_quantum_number.dimension, self.spin_quantum_number.dimension), np.complex128) self.get_time_evolution_raw(sweep_parameter,",
"conj(left[2])*right[2] @jit_device def set_to(operator, result): result[0, 0] = operator[0, 0] result[1, 0] =",
"**spin** (:obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index, spatial_direction)) - The expected spin projection (Bloch vector)",
"the adjoint of. * **result** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - An array",
"+ 0.5*time_step_integration*(1 + 1/sqrt3)) - time_coarse) rotating_wave_winding[1] = math.cos(math.tau*rotating_wave*time_sample) + 1j*math.sin(math.tau*rotating_wave*time_sample) time_sample +=",
"self.jit_device_template = jit_device_template elif value == \"cuda\": def jit_host(template, max_registers): def jit_host(func): return",
"/= 2 # For every fine step for time_fine_index in range(math.floor(time_step_output/time_step_integration + 0.5)):",
"to define the bias field strength in `get_field`, then one can run many",
"\"\"\" Options for describing which method is used during the integration. Parameters ----------",
"to the `get_field` function supplied by the user. Modifies the field function so",
"a whole number multiple of `time_step_integration`. Measured in s. * **time_evolution_coarse** (:obj:`numpy.ndarray` of",
"LIE_TROTTER = (\"lie_trotter\", 1) \"\"\" Approximation using the Lie Trotter theorem. \"\"\" class",
"given time series of a quantum state. Used to calculate `spin` the first",
"if r > 0: x /= r y /= r z /= r",
"half systems. Will not work with spin one systems. Assumes the exponent is",
"- c_X s_Y)}{\\\\sqrt{2}} \\\\\\\\ \\\\frac{e^{-i\\\\left(Z + \\\\frac{Q}{3}\\\\right)}(c_X - c_Y - i s_Xs_Y)}{2} &",
"will on average take smaller steps. .. note :: The use of a",
"= math.tau*time_step_integration*field_sample[1, 1]/2 field_sample[2, 2] = math.tau*time_step_integration*field_sample[1, 2]/2 if dimension > 2: field_sample[2,",
"quantum system. threads_per_block : :obj:`int` The size of each thread block (workgroup), in",
"time_step_integration, field_sample, rotating_wave, rotating_wave_winding): time_sample = time_fine + 0.5*time_step_integration - time_coarse rotating_wave_winding[0] =",
"elif self.device == Device.CUDA: time = cuda.device_array(time_index_max, np.float64) time_evolution_coarse = cuda.device_array((time_index_max, self.spin_quantum_number.dimension, self.spin_quantum_number.dimension),",
"Resetting time_step_integration to {time_step_output/round(time_step_output/time_step_integration):8.4e}.\\033[0m\\n\") time_step_integration = time_step_output/round(time_step_output/time_step_integration) time_end_points = np.asarray([time_start, time_end], np.float64) state_init",
"a device function.\\033[0m\\n\") raise def compile_time_evolver(self, get_field, spin_quantum_number, device, use_rotating_frame = True, integration_method",
"is_Xs_Y) e^{-iZ} & -(c_Xs_Y + is_Xc_Y) e^{iZ} \\\\\\\\ (c_Xs_Y - is_Xc_Y) e^{-iZ} &",
"= math.cos(z + q/3) - 1j*math.sin(z + q/3) # result[0, 0] = 0.5*cisz*(cx",
"0] = c - 1j*z*s result[1, 0] = (y - 1j*x)*s result[0, 1]",
": :obj:`float` time_coarse : :class:`numpy.ndarray` of :class:`numpy.float64` (time_index) A coarse grained list of",
"the time step. The equivalent of the trapezoidal method. \"\"\" class ExponentiationMethod(Enum): \"\"\"",
"/= r c = math.cos(r/2) s = math.sin(r/2) result[0, 0] = c -",
"is to start at. Measured in s. time_end : :obj:`float` The time that",
"// threads_per_block get_spin[blocks_per_grid, threads_per_block](cuda.to_device(state), spin) spin = spin.copy_to_host() elif device == Device.ROC: spin",
"that the integrator is being compiled for. .. _Supported Python features: http://numba.pydata.org/numba-doc/latest/reference/pysupported.html ..",
"**trotter_cutoff** (:obj:`int`) - The number of squares to make to the approximate matrix",
"for your GPU. See `Achieved Occupancy`_ for Nvidia's official explanation. \"\"\" if not",
"rating wave approximation: just define `get_field()` with field functions that use the rotating",
"integrator. \"\"\" MIDPOINT_SAMPLE = \"midpoint_sample\" \"\"\" Euler integration method. \"\"\" HALF_STEP = \"half_step\"",
"dtype = np.complex128) elif device_index == 2: temporary_group = roc.shared.array((threads_per_block, 2, 2), dtype",
"conj(left[0])*right[0] + conj(left[1])*right[1] + conj(left[2])*right[2] @jit_device def set_to(operator, result): result[0, 0] = operator[0,",
"maximum number of registers allocated per thread when using :obj:`Device.CUDA` as the target",
"& \\\\frac{e^{-i\\\\left(-Z + \\\\frac{Q}{3}\\\\right)}(c_X + c_Y + i s_Xs_Y)}{2} \\\\end{pmatrix}^{2^\\\\tau}\\\\\\\\ &= T^{2^\\\\tau}, \\\\end{align*}",
"math:: \\\\begin{align*} \\\\langle F\\\\rangle(t) = \\\\begin{pmatrix} \\\\Re(\\\\psi_{+\\\\frac{1}{2}}(t)\\\\psi_{-\\\\frac{1}{2}}(t)^*)\\\\\\\\ -\\\\Im(\\\\psi_{+\\\\frac{1}{2}}(t)\\\\psi_{-\\\\frac{1}{2}}(t)^*)\\\\\\\\ \\\\frac{1}{2}(|\\\\psi_{+\\\\frac{1}{2}}(t)|^2 - |\\\\psi_{-\\\\frac{1}{2}}(t)|^2) \\\\end{pmatrix} \\\\end{align*}",
"i & 0 \\\\end{pmatrix},\\\\\\\\ J_z &= \\\\begin{pmatrix} 1 & 0 & 0 \\\\\\\\",
"and z respectively, as described above. * **result** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index))",
":] rotating_wave_winding_group = roc.shared.array((threads_per_block, sample_index_end), dtype = np.complex128) rotating_wave_winding = rotating_wave_winding_group[roc.get_local_id(1), :] time_coarse[time_index]",
"jit_host self.jit_host = jit_host def jit_device(func): return cuda.jit(device = True, inline = True)(func)",
"in parallel. .. warning :: Work in progress, not currently functional! \"\"\" class",
"spin half: .. math:: \\\\begin{align*} \\\\langle F\\\\rangle(t) = \\\\begin{pmatrix} \\\\Re(\\\\psi_{+\\\\frac{1}{2}}(t)\\\\psi_{-\\\\frac{1}{2}}(t)^*)\\\\\\\\ -\\\\Im(\\\\psi_{+\\\\frac{1}{2}}(t)\\\\psi_{-\\\\frac{1}{2}}(t)^*)\\\\\\\\ \\\\frac{1}{2}(|\\\\psi_{+\\\\frac{1}{2}}(t)|^2 -",
"for each time sampled. See :math:`\\\\psi(t)` in :ref:`overview_of_simulation_method`. spin : :class:`numpy.ndarray` of :class:`numpy.float64`",
"cy + 1j*sx*sy) # result[1, 2] = cisz*(-1j*sx - cx*sy)/sqrt2 # result[2, 2]",
"= cisz*(sy - 1j*cy*sx)/sqrt2 # cisz = math.cos(z - q/3) + 1j*math.sin(z -",
"s. time_step_output : :obj:`float` The time difference between each element of `time_coarse`. In",
"time_evolution_coarse, state, self.spin_calculator) return results @staticmethod @nb.njit def get_state(state_init, state, time_evolution): \"\"\" Use",
"inner(left, right): return conj(left[0])*right[0] + conj(left[1])*right[1] @jit_device def set_to(operator, result): result[0, 0] =",
"def get_state(state_init, state, time_evolution): \"\"\" Use the stepwise time evolution operators in succession",
"Modifying might be able to increase execution time for different GPU models. device",
"\\\\begin{align*} A &= -i(x J_x + y J_y + z J_z), \\\\end{align*} with",
"time_coarse) rotating_wave_winding[0] = math.cos(math.tau*rotating_wave*time_sample) + 1j*math.sin(math.tau*rotating_wave*time_sample) time_sample += time_coarse get_field_jit(time_sample, sweep_parameter, field_sample[0, :])",
"= 64, max_registers = 63): \"\"\" Compiles the integrator and spin calculation functions",
"operator[0, 2] result[1, 2] = operator[1, 2] result[2, 2] = operator[2, 2] @jit_device",
"inline = True)(func) self.jit_device = jit_device def jit_device_template(template): def jit_device_template(func): return cuda.jit(template, device",
"of each thread block (workgroup), in terms of the number of threads (workitems)",
"Device.CUDA: spin = cuda.device_array((state.shape[0], 3), np.float64) blocks_per_grid = (state.shape[0] + (threads_per_block - 1))",
"that the experiment is to start at, and the time that the experiment",
"\\\\\\\\ 0 & -1 \\\\end{pmatrix} \\\\end{align*} Then the exponential can be approximated as,",
"Makes two Euler integration steps, one sampling the field from the start of",
"of :class:`numpy.float64` (time_index) A coarse grained list of time samples that the time",
"operator in the z direction. Returns ------- results : :obj:`Results` An object containing",
"+ 1j*field_sample[1])/(rotating_wave_winding**2) field_sample[0] = X.real field_sample[1] = X.imag field_sample[2] = field_sample[2] - 2*rotating_wave",
"modified to increase the execution speed for a specific GPU model. Defaults to",
"z J_z + q J_q), \\\\end{align*} with .. math:: \\\\begin{align*} J_x &= \\\\frac{1}{\\\\sqrt{2}}\\\\begin{pmatrix}",
"range(hyper_cube_amount): matrix_square_residual(result, temporary) matrix_square_residual(temporary, result) result[0, 0] += 1 result[1, 1] += 1",
"The size of each thread block (workgroup), in terms of the number of",
"write the resultant adjoint to. matrix_exponential_analytic(field_sample, result) : :obj:`callable` Calculates a :math:`\\\\mathfrak{su}(2)` matrix",
"= sa*eq*ez*ep result[0, 2] = -((Sa*eq/ep)*(Sa*eq/ep)) result[1, 2] = sa*eq*ez/ep result[2, 2] =",
"first time it is referenced by the user. Parameters: * **state** (:obj:`numpy.ndarray` of",
"compilable python features. \"\"\" ROC = (\"roc\", 2) \"\"\" Use the :func:`numba.roc.jit()` LLVM",
"1]*right[1, 2] + left[2, 2]*right[2, 2] @jit_device def matrix_square_residual(operator, result): result[0, 0] =",
"dimension), dtype = np.complex128) elif device_index == 1: time_evolution_old = cuda.local.array((dimension, dimension), dtype",
"use an analytic exponentiation method outside of spin half. Switching to a Lie",
"= np.complex128) time_evolution_fine = time_evolution_fine_group[roc.get_local_id(1), :, :] field_sample_group = roc.shared.array((threads_per_block, sample_index_max, lie_dimension), dtype",
"a rotating frame is commonly associated with the use of a rotating wave",
"device: if cuda.is_available(): device = Device.CUDA else: device = Device.CPU self.threads_per_block = threads_per_block",
"0] = sa*eq*ep/ez result[2, 0] = -((Sa*ep/eq)*(Sa*ep/eq)) result[0, 1] = sa*eq/(ez*ep) result[1, 1]",
": :obj:`numpy.ndarray` of :obj:`numpy.complex128` (time_index, magnetic_quantum_number) The evaluated quantum state of the spin",
"= 0 operator[1, 2] = 0 operator[2, 2] = 0 @jit_device def matrix_multiply(left,",
"0 \\\\\\\\ 0 & -1 \\\\end{pmatrix} \\\\end{align*} Then the exponential can be calculated",
"IntegrationMethod.MIDPOINT_SAMPLE: @jit_device_template(\"(float64, float64, float64, float64, float64[:, :], float64, complex128[:])\") def get_field_integration_midpoint(sweep_parameter, time_fine, time_coarse,",
"append_exponentiation_integration_half_step(time_evolution_fine, time_evolution_coarse, field_sample, time_step_integration, rotating_wave, rotating_wave_winding): transform_frame(field_sample[0, :], rotating_wave, rotating_wave_winding[0]) transform_frame(field_sample[1, :], rotating_wave,",
":math:`\\\\mathfrak{su}(2)` matrix exponential based on its analytic form. .. warning:: Only available for",
"conj(operator[0, 1]) result[2, 0] = conj(operator[0, 2]) result[0, 1] = conj(operator[1, 0]) result[1,",
"operator between each time step. See :ref:`architecture` for some information. state : :obj:`numpy.ndarray`",
"to 64. Modifying might be able to increase execution time for different GPU",
"1] = Ca*ez - 1 if device_index == 0: temporary = np.empty((2, 2),",
"0]*right[0, 2] + left[2, 1]*right[1, 2] + left[2, 2]*right[2, 2] @jit_device def matrix_square_residual(operator,",
"Simulator: \"\"\" Attributes ---------- spin_quantum_number : :obj:`SpinQuantumNumber` The option to select whether the",
"= math.cos(z - q/3) + 1j*math.sin(z - q/3) # result[0, 2] = 0.5*cisz*(cx",
"get_spin[blocks_per_grid, threads_per_block](roc.to_device(state), spin) spin = spin.copy_to_host() return spin self.get_time_evolution_raw = get_time_evolution self.spin_calculator =",
"by the user. Modifies the field function so the integrator can be used",
"1] result[2, 1] = operator[2, 1] result[0, 2] = operator[0, 2] result[1, 2]",
"x) J_x) \\\\exp(-i(2^{-\\\\tau} y) J_y) \\\\exp(-i(2^{-\\\\tau} z J_z + (2^{-\\\\tau} q) J_q)))^{2^\\\\tau}\\\\\\\\ &=",
"time_end_points, time_evolution_coarse, sweep_parameter) elif device_index == 1: # Run calculation for each coarse",
"over time. \"\"\" if device.index == 0: spin = np.empty((state.shape[0], 3), np.float64) get_spin(state,",
": :class:`numpy.ndarray` of :class:`numpy.float64` (time_index, spatial_index) The expected value for hyperfine spin of",
"math.sqrt(3) machine_epsilon = np.finfo(np.float64).eps*1000 class Utilities: \"\"\" A on object that contains definitions",
"\\\\end{pmatrix},\\\\\\\\ J_z &= \\\\begin{pmatrix} 1 & 0 & 0 \\\\\\\\ 0 & 0",
"\\\\begin{align*} (A)_{i, j} = 0 \\\\end{align*} Parameters: * **operator** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index,",
"\\\\exp(-i(2^{-\\\\tau} z J_z + (2^{-\\\\tau} q) J_q)))^{2^\\\\tau}\\\\\\\\ &= \\\\begin{pmatrix} \\\\frac{e^{-i\\\\left(Z + \\\\frac{Q}{3}\\\\right)}(c_X +",
"left[1, 2]*right[2, 2] result[2, 2] = left[2, 0]*right[0, 2] + left[2, 1]*right[1, 2]",
"+ w1*field_sample[1, 1]) field_sample[2, 2] = math.tau*time_step_integration*(w0*field_sample[0, 2] + w1*field_sample[1, 2]) if dimension",
"and more details. get_time_evolution_raw : :obj:`callable` The internal function for evaluating the time",
"---------- get_field : :obj:`callable` A python function that describes the field that the",
"Attributes ---------- conj(z) : :obj:`callable` Conjugate of a complex number. .. math:: \\\\begin{align*}",
"4**hyper_cube_amount # x = field_sample[0]/(2*precision) # y = field_sample[1]/(2*precision) # z = field_sample[2]/(2*precision)",
"= spin_calculator def __getattr__(self, attr_name): if attr_name == \"spin\": spin = self.spin_calculator(self.state) setattr(self,",
"- The matrix to set to :math:`0`. matrix_multiply(left, right, result) : :obj:`callable` Multiply",
"the lab frame, for each time sampled. Units of :math:`\\\\hbar`. This is an",
"time_step_integration, time_end_points, time_evolution_coarse, sweep_parameter) elif device_index == 1: # Run calculation for each",
"integrator to run on all CPU cores, in parallel. .. note :: To",
"to set to :math:`0`. matrix_multiply(left, right, result) : :obj:`callable` Multiply matrices left and",
"= np.complex128) elif device_index == 1: temporary = cuda.local.array((2, 2), dtype = np.complex128)",
"0]*right[0, 0] + left[2, 1]*right[1, 0] + left[2, 2]*right[2, 0] result[0, 1] =",
"of :obj:`numpy.float64` (time_index, spatial_direction)) - The expected spin projection (Bloch vector) over time.",
"e^{iZ} \\\\\\\\ (c_Xs_Y - is_Xc_Y) e^{-iZ} & (c_Xc_Y + is_Xs_Y) e^{iZ} \\\\end{pmatrix}^{2^\\\\tau}\\\\\\\\ &=",
"\\\\exp(2^{-\\\\tau}(-ix J_x - iy J_y - iz J_z))^{2^\\\\tau}\\\\\\\\ &\\\\approx (\\\\exp(-i(2^{-\\\\tau} x) J_x) \\\\exp(-i(2^{-\\\\tau}",
"jit_device_template elif value == \"cpu\": def jit_host(template, max_registers): def jit_host(func): return nb.njit(template, parallel",
"sample_index_end = 1 exponentiation_method_index = exponentiation_method.index if (exponentiation_method == ExponentiationMethod.ANALYTIC) and (spin_quantum_number !=",
"the stepwise time evolution opperator. Parameters ---------- sweep_parameter : :obj:`float` time_coarse : :class:`numpy.ndarray`",
"iy J_y - iz J_z - iq J_q))^{2^\\\\tau}\\\\\\\\ &\\\\approx (\\\\exp(-i(2^{-\\\\tau} x) J_x) \\\\exp(-i(2^{-\\\\tau}",
"one sampling the field from the start of the time step, one sampling",
"= np.complex128) # elif device_index == 2: # temporary_group = roc.shared.array((threads_per_block, 3, 3),",
"The state (spin wavefunction) of the system at the start of the simulation.",
"time_fine, time_coarse, time_step_integration, field_sample, rotating_wave, rotating_wave_winding): time_sample = ((time_fine + 0.5*time_step_integration*(1 - 1/sqrt3))",
"\\\\cos(\\\\theta),\\\\\\\\ s_{\\\\theta} &= \\\\sin(\\\\theta). \\\\end{align*} Once :math:`T` is calculated, it is then recursively",
"The expected spin projection (Bloch vector) over time. \"\"\" def __init__(self, get_field, spin_quantum_number,",
": :obj:`int` The size of each thread block (workgroup), in terms of the",
":]) rotating_wave = field_sample[0, 2] if dimension == 2: rotating_wave /= 2 #",
"------- spin : :obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index, spatial_direction) The expected spin projection (Bloch",
"values, by calling this method multiple times, each time varying `sweep_parameter`. time_start :",
"the integration algorithm. Defaults to :obj:`ExponentiationMethod.LIE_TROTTER` when `spin_quantum_number` is set to :obj:`SpinQuantumNumber.ONE`, and",
"transform_frame(field_sample[0, :], rotating_wave, rotating_wave_winding[0]) field_sample[0, 0] = math.tau*time_step_integration*field_sample[0, 0] field_sample[0, 1] = math.tau*time_step_integration*field_sample[0,",
"(:obj:`float`) - The integration time step. Measured in s. * **time_step_output** (:obj:`float`) -",
"\\\\frac{Q}{3}\\\\right)}(c_X + c_Y - i s_Xs_Y)}{2} & \\\\frac{e^{i\\\\frac{2Q}{3}} (-s_Y -i c_Y s_X)}{\\\\sqrt{2}} &",
"3), np.float64) get_spin(state, spin) elif device == Device.CUDA: spin = cuda.device_array((state.shape[0], 3), np.float64)",
"0] + left[0, 1]*right[1, 0] result[1, 0] = left[1, 0]*right[0, 0] + left[1,",
"range(hyper_cube_amount): # matrix_multiply(result, result, temporary) # matrix_multiply(temporary, temporary, result) self.conj = conj self.complex_abs",
"2] + left[2, 1]*right[1, 2] + left[2, 2]*right[2, 2] @jit_device def matrix_square_residual(operator, result):",
"defaults to :obj:`Device.CPU` otherwise. See :obj:`Device` for all options and more details. threads_per_block",
"1 \\\\end{pmatrix} \\\\end{align*} Then the exponential can be approximated as, for large :math:`\\\\tau`,",
"@jit_device def matrix_exponential_analytic(field_sample, result): x = field_sample[0] y = field_sample[1] z = field_sample[2]",
"See :obj:`Device` for all options and more details. get_time_evolution_raw : :obj:`callable` The internal",
"(A)_{i, j} = (B)_{i, j} Parameters: * **operator** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index))",
"like enums cannot be interpreted. \"\"\" def __init__(self, value, index): super().__init__() self._value_ =",
"(-s_Y -i c_Y s_X)}{\\\\sqrt{2}} & \\\\frac{e^{-i\\\\left(-Z + \\\\frac{Q}{3}\\\\right)}(c_X - c_Y + i s_Xs_Y)}{2}",
"Attributes ---------- time : :obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index) The times that `state` was",
"\"\"\" For two level systems. \"\"\" ONE = (1, 3, \"one\") \"\"\" For",
"thread when using :obj:`Device.CUDA` as the target device, and can be modified to",
"field_sample[0, 2] if dimension == 2: rotating_wave /= 2 # For every fine",
"subspace of :math:`\\\\mathfrak{su}(2)`, being, .. math:: \\\\begin{align*} A &= -i(x J_x + y",
"with .. math:: \\\\begin{align*} X &= \\\\frac{1}{2}2^{-\\\\tau}x,\\\\\\\\ Y &= \\\\frac{1}{2}2^{-\\\\tau}y,\\\\\\\\ Z &= \\\\frac{1}{2}2^{-\\\\tau}z,\\\\\\\\",
"(\"lie_trotter\", 1) \"\"\" Approximation using the Lie Trotter theorem. \"\"\" class Device(Enum): \"\"\"",
"z direction. Returns ------- spin : :obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index, spatial_direction) The expected",
"method from AtomicPy. Makes two Euler integration steps, one sampling the field from",
"if dimension == 3: @jit_device_template(\"(float64[:], float64, complex128)\") def transform_frame_spin_one_rotating(field_sample, rotating_wave, rotating_wave_winding): X =",
"eigenstates of the spin projection operator in the z direction. spin : :obj:`numpy.ndarray`",
"time values for when the experiment is to start and finishes. In units",
"Nvidia Cuda compatible, and defaults to :obj:`Device.CPU` otherwise. See :obj:`Device` for all options",
"from :func:`Simulator.evaluate()`, and is executed there just in time if the `spin` property",
"functions are compiled for the chosen target device on construction of the object.",
"(time_index, spatial_index) The expected value for hyperfine spin of the spin system in",
"the result of the product. adjoint(operator) : :obj:`callable` Takes the hermitian adjoint of",
"and r. set_to(operator, result) : :obj:`callable` Copy the contents of one matrix into",
"or declare a :class:`numpy.ndarray` using :func:`numba.cuda.device_array_like()`. time_end_points : :class:`numpy.ndarray` of :class:`numpy.float64` (start time",
"dtype = np.complex128) # elif device_index == 1: # temporary = cuda.local.array((2, 2),",
"linear combination of :math:`\\\\mathfrak{su}(2)`, being, .. math:: \\\\begin{align*} A &= -i(x J_x +",
"np.complex128) time_evolution_fine = time_evolution_fine_group[roc.get_local_id(1), :, :] field_sample_group = roc.shared.array((threads_per_block, sample_index_max, lie_dimension), dtype =",
"numerical value of the spin quantum number. dimension : :obj:`int` Dimension of the",
"2]*right[2, 0] result[0, 1] = left[0, 0]*right[0, 1] + left[0, 1]*right[1, 1] +",
"label): super().__init__() self._value_ = value self.dimension = dimension self.label = label HALF =",
"1j*x)*s result[1, 1] = c + 1j*z*s else: result[0, 0] = 1 result[1,",
"- The values of x, y and z respectively, as described above. *",
"matrix_exponential_analytic(field_sample, result, trotter_cutoff): pass @jit_device def matrix_exponential_lie_trotter(field_sample, result, trotter_cutoff): hyper_cube_amount = math.ceil(trotter_cutoff/2) if",
":obj:`Results` An object containing the results of the simulation. \"\"\" if math.fabs(time_step_output/time_step_integration -",
"time for different GPU models. \"\"\" jit_device = device.jit_device device_index = device.index @jit_device",
"dimension), dtype = np.complex128) time_evolution_fine = time_evolution_fine_group[roc.get_local_id(1), :, :] field_sample_group = roc.shared.array((threads_per_block, sample_index_max,",
"time_end_points, time_step_integration, time_step_output, time_evolution_coarse) elif self.device == Device.CUDA: time = cuda.device_array(time_index_max, np.float64) time_evolution_coarse",
"j} = (B)_{i, j} Parameters: * **operator** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) -",
"* **left** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The matrix to left multiply",
"label HALF = (1/2, 2, \"half\") \"\"\" For two level systems. \"\"\" ONE",
"def get_field_integration_half_step(sweep_parameter, time_fine, time_coarse, time_step_integration, field_sample, rotating_wave, rotating_wave_winding): time_sample = time_fine - time_coarse",
"of `time_step_integration`. Measured in s. * **time_evolution_coarse** (:obj:`numpy.ndarray` of :obj:`numpy.float128` (time_index, y_index, x_index))",
"the exponential can be calculated as .. math:: \\\\begin{align*} \\\\exp(A) &= \\\\exp(-ix J_x",
"0 & -1 \\\\end{pmatrix},& J_q &= \\\\frac{1}{3}\\\\begin{pmatrix} 1 & 0 & 0 \\\\\\\\",
"of the rotating frame. One can, of course, use :mod:`spinsim` to integrate states",
"is being compiled for. .. _Supported Python features: http://numba.pydata.org/numba-doc/latest/reference/pysupported.html .. _Supported Numpy features:",
"rotating_wave_winding[0]) transform_frame(field_sample[1, :], rotating_wave, rotating_wave_winding[1]) w0 = (1.5 + sqrt3)/6 w1 = (1.5",
"= np.empty((sample_index_max, lie_dimension), dtype = np.float64) rotating_wave_winding = np.empty(sample_index_end, dtype = np.complex128) elif",
"`sweep_parameter`. * **time_coarse** (:obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index)) - The times that `state` was",
"(2 + operator[1, 1])*operator[1, 1] + operator[1, 2]*operator[2, 1] result[2, 1] = operator[2,",
"together, to be returned in result. .. math:: \\\\begin{align*} (LR)_{i,k} = \\\\sum_j (L)_{i,j}",
"then one can run many simulations, sweeping through bias values, by calling this",
"execution time for different GPU models. \"\"\" jit_device = device.jit_device device_index = device.index",
"0.5*time_step_integration*(1 + 1/sqrt3)) - time_coarse) rotating_wave_winding[1] = math.cos(math.tau*rotating_wave*time_sample) + 1j*math.sin(math.tau*rotating_wave*time_sample) time_sample += time_coarse",
"result[0, 1] = -(cx*sy + 1j*sx*cy)*cisz # result[1, 1] = (cx*cy + 1j*sx*sy)*cisz",
"x_index] = 0 if time_index > 0: for z_index in range(state.shape[1]): state[time_index, x_index]",
":obj:`callable` The absolute value of a complex number. .. math:: \\\\begin{align*} |a +",
"= jit_device_template elif value == \"cpu_single\": def jit_host(template, max_registers): def jit_host(func): return nb.njit(template)(func)",
"time_evolution): \"\"\" Use the stepwise time evolution operators in succession to find the",
"None self.get_spin_raw = None try: self.compile_time_evolver(get_field, spin_quantum_number, device, use_rotating_frame, integration_method, exponentiation_method, trotter_cutoff, threads_per_block,",
"state, spin_calculator): \"\"\" Parameters ---------- time : :obj:`numpy.ndarray` of :obj:`numpy.float64` (time_index) The times",
"2]/2 if dimension > 2: field_sample[2, 3] = math.tau*time_step_integration*field_sample[0, 3]/2 append_exponentiation(field_sample[2, :], time_evolution_fine,",
"get_field_integration_magnus_cf4 append_exponentiation_integration = append_exponentiation_integration_magnus_cf4 elif integration_method == IntegrationMethod.HALF_STEP: @jit_device_template(\"(float64, float64, float64, float64, float64[:,",
"simulator will integrate a spin half :obj:`SpinQuantumNumber.HALF`, or spin one :obj:`SpinQuantumNumber.ONE` quantum system.",
"cores are inactive, and the GPU is said to have less occupancy. Lowering",
"+ conj(left[1])*right[1] @jit_device def set_to(operator, result): result[0, 0] = operator[0, 0] result[1, 0]",
"= np.asarray(state_init, np.complex128) time_index_max = int((time_end_points[1] - time_end_points[0])/time_step_output) if self.device.index == 0: time",
"spin half :obj:`SpinQuantumNumber.HALF` systems only. \"\"\" LIE_TROTTER = (\"lie_trotter\", 1) \"\"\" Approximation using",
"get_field_integration = get_field_integration_midpoint append_exponentiation_integration = append_exponentiation_integration_midpoint @jit_device_template(\"(int64, float64[:], float64, float64, float64[:], complex128[:, :,",
"that can be swept over when multiple simulations need to be run. For",
"adjoint of a matrix. .. math:: \\\\begin{align*} A^\\\\dagger &\\\\equiv A^H\\\\\\\\ (A^\\\\dagger)_{y,x} &= ((A)_{x,y})^*",
"the spin system is being put under. It must have three arguments: *",
"- The matrix which the result of the exponentiation is to be written",
"http://numba.pydata.org/numba-doc/latest/cuda/cudapysupported.html \"\"\" def __init__(self, value, index): super().__init__() self._value_ = value self.index = index",
"when this option is set to :obj:`True` - no such approximations are made,",
"= cisz*cx*cy # result[2, 1] = cisz*(sy - 1j*cy*sx)/sqrt2 # cisz = math.cos(z",
"number multiple of `time_step_integration`. Measured in s. state_init : :obj:`numpy.ndarray` of :obj:`numpy.complex128` (magnetic_quantum_number)",
"1 operator[1, 0] = 0 operator[2, 0] = 0 operator[0, 1] = 0",
"time_step_integration, time_step_output, state_init): \"\"\" Integrates the time dependent Schroedinger equation and returns the",
"-1j*sx*cy)/cisz # result[0, 1] = -(cx*sy + 1j*sx*cy)*cisz # result[1, 1] = (cx*cy",
"= index if value == \"python\": def jit_host(template, max_registers): def jit_host(func): return func",
"utilities from enum import Enum import numpy as np import numba as nb",
"state of the spin system over time, written in terms of the eigenstates",
"See :ref:`architecture` for some information. \"\"\" for time_index in range(state.shape[0]): # State =",
"0, 2] /= rotating_wave_winding[0] time_evolution_coarse[time_index, 2, 0] *= rotating_wave_winding[0] time_evolution_coarse[time_index, 2, 1] *=",
"# result[1, 1] = (cx*cy + 1j*sx*sy)*cisz # if device_index == 0: #",
"&= \\\\begin{pmatrix} 1 & 0 & 0 \\\\\\\\ 0 & 0 & 0",
".. warning:: Only available for use with spin half systems. Will not work",
"+ i\\\\frac{z}{r}\\\\sin(\\\\frac{r}{2}) \\\\end{pmatrix} \\\\end{align*} with :math:`r = \\\\sqrt{x^2 + y^2 + z^2}`. Parameters:",
"ez = field_sample[2]/(2*precision) ez = math.cos(ez) + 1j*math.sin(ez) # eq = field_sample[3]/(6*precision) #",
"be used for many experiments, without the need for slow recompilation. For example,",
"\\\\\\\\ 0 & i & 0 \\\\end{pmatrix},\\\\\\\\ J_z &= \\\\begin{pmatrix} 1 & 0",
"dynamics. This is not done when this option is set to :obj:`True` -",
"Product Formula, .. math:: \\\\exp(A + B) = \\\\lim_{c \\\\to \\\\infty} \\\\left(\\\\exp\\\\left(\\\\frac{1}{c}A\\\\right) \\\\exp\\\\left(\\\\frac{1}{c}B\\\\right)\\\\right)^c.",
"temporary = cuda.local.array((3, 3), dtype = np.complex128) # elif device_index == 2: #",
"index if value == \"python\": def jit_host(template, max_registers): def jit_host(func): return func return",
"+ 1j*sx*sy) # result[1, 2] = cisz*(-1j*sx - cx*sy)/sqrt2 # result[2, 2] =",
"in `get_field`, then one can run many simulations, sweeping through bias values, by",
"(z.real - 1j*z.imag) @jit_device def complex_abs(z): return math.sqrt(z.real**2 + z.imag**2) if spin_quantum_number ==",
"device_index == 1: # Run calculation for each coarse timestep in parallel time_index",
"= np.float64) field_sample = field_sample_group[roc.get_local_id(1), :, :] rotating_wave_winding_group = roc.shared.array((threads_per_block, sample_index_end), dtype =",
"system, written in terms of the eigenstates of the spin projection operator in",
"number specified for control, so really this number is 64). Raising this value",
"with the first three entries being x, y, z spatial directions (to model",
"------- results : :obj:`Results` An object containing the results of the simulation. \"\"\"",
"spin : :class:`numpy.ndarray` of :class:`numpy.float64` (time_index, spatial_index) The expected value for hyperfine spin",
"at. Measured in s. time_end : :obj:`float` The time that the experiment is",
"sweep_parameter, field_sample[1, :]) @jit_device_template(\"(complex128[:, :], complex128[:, :], float64[:, :], float64, float64, complex128[:])\") def",
"(y_index, x_index)) - The matrix to set to :math:`1`. set_to_zero(operator) : :obj:`callable` Make",
"The expected spin projection (Bloch vector) over time. \"\"\" def __init__(self, time, time_evolution,",
"are inactive, and the GPU is said to have less occupancy. Lowering the",
"all options and more details. get_time_evolution_raw : :obj:`callable` The internal function for evaluating",
"integrator. \"\"\" CPU_SINGLE = (\"cpu_single\", 0) \"\"\" Use the :func:`numba.jit()` LLVM compiler to",
"calculation functions of the simulator. Parameters ---------- get_field : :obj:`callable` A python function",
"2: rotating_wave /= 2 # For every fine step for time_fine_index in range(math.floor(time_step_output/time_step_integration",
"for time_index in nb.prange(spin.shape[0]): if dimension == 2: spin[time_index, 0] = (state[time_index, 0]*conj(state[time_index,",
"# result[1, 0] = (cx*sy -1j*sx*cy)/cisz # result[0, 1] = -(cx*sy + 1j*sx*cy)*cisz",
"(:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The matrix to set to :math:`1`. set_to_zero(operator)",
"allocated than are available for the GPU model, the GPU must run fewer",
"J_y) \\\\exp(-i(2^{-\\\\tau} z) J_z)^{2^\\\\tau}\\\\\\\\ &= \\\\begin{pmatrix} (c_Xc_Y - is_Xs_Y) e^{-iZ} & -(c_Xs_Y +",
"coarse grained list of time samples that the time evolution operator is found",
"Ca = 1 # Sa = a/2 # ca = 1 # sa",
"temporary = cuda.local.array((2, 2), dtype = np.complex128) # elif device_index == 2: #",
"self.inner = inner self.set_to = set_to self.set_to_one = set_to_one self.set_to_zero = set_to_zero self.matrix_multiply",
"3] + w1*field_sample[1, 3]) append_exponentiation(field_sample[2, :], time_evolution_fine, time_evolution_coarse) field_sample[2, 0] = math.tau*time_step_integration*(w1*field_sample[0, 0]",
"subspace of :math:`\\\\mathfrak{su}(3)`, being, .. math:: \\\\begin{align*} A &= -i(x J_x + y",
"i s_Xs_Y)}{2} \\\\end{pmatrix}^{2^\\\\tau}\\\\\\\\ &= T^{2^\\\\tau}, \\\\end{align*} with .. math:: \\\\begin{align*} X &= 2^{-\\\\tau}x,\\\\\\\\",
"Returns * **cz** (:class:`numpy.complex128`) - The conjugate of z. complex_abs(z) : :obj:`callable` The",
"self.get_time_evolution_raw[blocks_per_grid, self.threads_per_block](sweep_parameter, time, time_end_points, time_step_integration, time_step_output, time_evolution_coarse) except: print(\"\\033[31mspinsim error: numba.cuda could not",
"time_step_integration : :obj:`float` The time step used within the integration algorithm. In units",
"one :obj:`SpinQuantumNumber.ONE` quantum system. threads_per_block : :obj:`int` The size of each thread block",
"Run calculation for each coarse timestep in parallel time_index = roc.get_global_id(1) if time_index",
"0] = 0 operator[1, 0] = 0 operator[2, 0] = 0 operator[0, 1]",
"utilities.matrix_exponential_analytic matrix_exponential_lie_trotter = utilities.matrix_exponential_lie_trotter jit_host = device.jit_host jit_device = device.jit_device jit_device_template = device.jit_device_template",
"2] = operator[2, 0]*operator[0, 2] + operator[2, 1]*operator[1, 2] + (2 + operator[2,",
"result[0, 0] = left[0, 0]*right[0, 0] + left[0, 1]*right[1, 0] result[1, 0] =",
"label that can be used for archiving. \"\"\" def __init__(self, value, dimension, label):",
"= X.imag field_sample[2] = field_sample[2] - 2*rotating_wave transform_frame = transform_frame_spin_half_rotating else: @jit_device_template(\"(float64[:], float64,",
"get_field_jit(time_sample, sweep_parameter, field_sample[0, :]) @jit_device_template(\"(complex128[:, :], complex128[:, :], float64[:, :], float64, float64, complex128[:])\")",
"operator[1, 1] = 0 operator[2, 1] = 0 operator[0, 2] = 0 operator[1,",
"get_spin(state, spin): \"\"\" Calculate each expected spin value in parallel. For spin half:",
"-i(x J_x + y J_y + z J_z + q J_q), \\\\end{align*} with",
"get_field function into a cuda device function.\\033[0m\\n\") raise time_evolution_coarse = time_evolution_coarse.copy_to_host() time =",
"threads_per_block, max_registers) except: print(\"\\033[31mspinsim error: numba could not jit get_field function into a",
"import cuda from numba import roc import math sqrt2 = math.sqrt(2) sqrt3 =",
"1] = (1j*state[time_index, 0]*conj(state[time_index, 1])).real spin[time_index, 2] = 0.5*(state[time_index, 0].real**2 + state[time_index, 0].imag**2",
"== IntegrationMethod.MAGNUS_CF4: sample_index_max = 3 sample_index_end = 4 elif integration_method == IntegrationMethod.HALF_STEP: sample_index_max",
"in s. The duration of the experiment is `time_end - time_start`. time_step_integration :",
"as the target device, and can be modified to increase the execution speed",
"* **right** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (index)) - The vector to right multiply in",
"== IntegrationMethod.MIDPOINT_SAMPLE: @jit_device_template(\"(float64, float64, float64, float64, float64[:, :], float64, complex128[:])\") def get_field_integration_midpoint(sweep_parameter, time_fine,",
"number, used when compiling the integrator, where higher level objects like enums cannot",
"= left[0, 0]*right[0, 0] + left[0, 1]*right[1, 0] result[1, 0] = left[1, 0]*right[0,",
"* **operator** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The operator to take the",
"math.sin(x) # cy = math.cos(y) # sy = math.sin(y) # cisz = math.cos(z",
"2] = 0.5*(state[time_index, 0].real**2 + state[time_index, 0].imag**2 - state[time_index, 1].real**2 - state[time_index, 1].imag**2)",
"set_to(time_evolution_coarse, time_evolution_old) matrix_multiply(time_evolution_fine, time_evolution_old, time_evolution_coarse) if use_rotating_frame: if dimension == 3: @jit_device_template(\"(float64[:], float64,",
"& 0 \\\\\\\\ 0 & 0 & -1 \\\\end{pmatrix},& J_q &= \\\\frac{1}{3}\\\\begin{pmatrix} 1",
"amount defined by the field in the z direction. This removes the (possibly",
"spin.copy_to_host() elif device == Device.ROC: spin = roc.device_array((state.shape[0], 3), np.float64) blocks_per_grid = (state.shape[0]",
"0 & 0 \\\\\\\\ 0 & -2 & 0 \\\\\\\\ 0 & 0",
"sa = -1j*a/sqrt2 # ez = field_sample[2]/(2*precision) # ez = 1 + 1j*ez",
": :class:`numpy.ndarray` of :class:`numpy.complex128` (time_index, bra_state_index, ket_state_index) The evaluated time evolution operator between",
"result[0, 0] = conj(operator[0, 0]) result[1, 0] = conj(operator[0, 1]) result[2, 0] =",
"1 a = a/precision Ca = math.cos(a/2) Sa = math.sin(a/2) ca = math.cos(a)",
"\\\\end{align*} Then the exponential can be calculated as .. math:: \\\\begin{align*} \\\\exp(A) &=",
"time_coarse.size: get_time_evolution_loop(time_index, time_coarse, time_step_output, time_step_integration, time_end_points, time_evolution_coarse, sweep_parameter) elif device_index == 2: #",
"time_evolution_fine, time_evolution_coarse): if device_index == 0: time_evolution_old = np.empty((dimension, dimension), dtype = np.complex128)",
"+ w0*field_sample[1, 3]) append_exponentiation(field_sample[2, :], time_evolution_fine, time_evolution_coarse) get_field_integration = get_field_integration_magnus_cf4 append_exponentiation_integration = append_exponentiation_integration_magnus_cf4",
": :obj:`float` The time offset that the experiment is to start at. Measured",
"= conj self.complex_abs = complex_abs self.norm2 = norm2 self.inner = inner self.set_to =",
"(y - 1j*x)*s result[0, 1] = -(y + 1j*x)*s result[1, 1] = c",
"time_end], np.float64) state_init = np.asarray(state_init, np.complex128) time_index_max = int((time_end_points[1] - time_end_points[0])/time_step_output) if self.device.index",
"from enum import Enum import numpy as np import numba as nb from",
":obj:`ExponentiationMethod` for more details. use_rotating_frame : :obj:`bool` Whether or not to use the",
"get_field_jit(time_sample, sweep_parameter, field_sample[0, :]) time_sample = ((time_fine + 0.5*time_step_integration*(1 + 1/sqrt3)) - time_coarse)",
"and returns the quantum state of the spin system over time. Parameters ----------",
"transform_frame_spin_one_rotating(field_sample, rotating_wave, rotating_wave_winding): X = (field_sample[0] + 1j*field_sample[1])/rotating_wave_winding field_sample[0] = X.real field_sample[1] =",
"np.complex128) temporary = temporary_group[roc.get_local_id(1), :, :] for power_index in range(hyper_cube_amount): matrix_square_residual(result, temporary) matrix_square_residual(temporary,",
"0] + operator[2, 1]*operator[1, 0] + (2 + operator[2, 2])*operator[2, 0] result[0, 1]",
"Analytic expression of the matrix exponential. For spin half :obj:`SpinQuantumNumber.HALF` systems only. \"\"\"",
"result[0, 1] = operator[0, 1] result[1, 1] = operator[1, 1] result[2, 1] =",
"*= rotating_wave_winding[0] time_evolution_coarse[time_index, 1, 1] *= rotating_wave_winding[0] @jit_host(\"(float64, float64[:], float64[:], float64, float64, complex128[:,",
".. math:: \\\\exp(A + B) = \\\\lim_{c \\\\to \\\\infty} \\\\left(\\\\exp\\\\left(\\\\frac{1}{c}A\\\\right) \\\\exp\\\\left(\\\\frac{1}{c}B\\\\right)\\\\right)^c. **For spin",
"\"\"\" Euler integration method. \"\"\" HALF_STEP = \"half_step\" \"\"\" Integration method from AtomicPy.",
"the `get_field` function supplied by the user. Modifies the field function so the",
"the execution speed for a specific GPU model. Defaults to 63 (optimal for",
":: Work in progress, not currently functional! \"\"\" class Results: \"\"\" The results",
"*= rotating_wave_winding[0] @jit_host(\"(float64, float64[:], float64[:], float64, float64, complex128[:, :, :])\", max_registers) def get_time_evolution(sweep_parameter,",
"of :math:`\\\\mathfrak{su}(3)`, being, .. math:: \\\\begin{align*} A &= -i(x J_x + y J_y",
"register per thread is always added to the number specified for control, so",
"features, and `Supported Numpy features`_ for compilable numpy features. \"\"\" CUDA = (\"cuda\",",
"result[1, 1] = conj(operator[1, 1]) result[2, 1] = conj(operator[1, 2]) result[0, 2] =",
"another. .. math:: (A)_{i, j} = (B)_{i, j} Parameters: * **operator** (:class:`numpy.ndarray` of",
"of x, y and z (and q for spin one) respectively, as described",
"0] = Sa*ep result[0, 1] = Sa/ep result[1, 1] = Ca*ez - 1",
"target device, and can be modified to increase the execution speed for a",
"1 \\\\\\\\ 0 & 1 & 0 \\\\end{pmatrix},& J_y &= \\\\frac{1}{\\\\sqrt{2}}\\\\begin{pmatrix} 0 &",
"difference between each element of `time_coarse`. In units of s. Determines the sample",
"1] = left[0, 0]*right[0, 1] + left[0, 1]*right[1, 1] + left[0, 2]*right[2, 1]",
"Takes the hermitian adjoint of a matrix. .. math:: \\\\begin{align*} A^\\\\dagger &\\\\equiv A^H\\\\\\\\",
"of :math:`\\\\hbar`. This is an output, so use an empty :class:`numpy.ndarray` with :func:`numpy.empty()`,",
":]) @jit_device_template(\"(complex128[:, :], complex128[:, :], float64[:, :], float64, float64, complex128[:])\") def append_exponentiation_integration_half_step(time_evolution_fine, time_evolution_coarse,",
"result) : :obj:`callable` Multiply matrices left and right together, to be returned in",
":mod:`spinsim` to integrate states in the rotating frame, using the rating wave approximation:",
"x_index)) - The values of x, y and z respectively, as described above.",
"w0*field_sample[1, 2]) if dimension > 2: field_sample[2, 3] = math.tau*time_step_integration*(w1*field_sample[0, 3] + w0*field_sample[1,",
"x_index)) - The values of x, y and z (and q for spin",
"1]) @jit_device def matrix_exponential_analytic(field_sample, result): x = field_sample[0] y = field_sample[1] z =",
"a single CPU core. .. note :: To use this device option, the",
"Euler integration method. \"\"\" HALF_STEP = \"half_step\" \"\"\" Integration method from AtomicPy. Makes",
"finishes. In units of s. time_step_integration : :obj:`float` The time step used within",
"\"\"\" ONE = (1, 3, \"one\") \"\"\" For three level systems. \"\"\" class",
"time_coarse rotating_wave_winding[0] = math.cos(math.tau*rotating_wave*time_sample) + 1j*math.sin(math.tau*rotating_wave*time_sample) time_sample += time_coarse get_field_jit(time_sample, sweep_parameter, field_sample[0, :])",
"= math.sin(r/2) result[0, 0] = c - 1j*z*s result[1, 0] = (y -",
"the integration. Defaults to :obj:`IntegrationMethod.MAGNUS_CF4`. See :obj:`IntegrationMethod` for more details. trotter_cutoff : :obj:`int`",
"= jit_device_template elif value == \"cuda\": def jit_host(template, max_registers): def jit_host(func): return cuda.jit(template,",
"`time_coarse` and `time_evolution_coarse`. time_evolution_coarse : :class:`numpy.ndarray` of :class:`numpy.complex128` (time_index, bra_state_index, ket_state_index) Time evolution",
"class Utilities: \"\"\" A on object that contains definitions of all of the",
"0: temporary = np.empty((2, 2), dtype = np.complex128) elif device_index == 1: temporary",
".. math:: \\|a + ib\\|_2 = \\\\sqrt {\\\\left(\\\\sum_i a_i^2 + b_i^2\\\\right)} Parameters: *",
"q/3) - 1j*math.sin(z + q/3) # result[0, 0] = 0.5*cisz*(cx + cy -",
"amplitude of the quadratic shift (only appearing, and required, in spin one systems).",
"designed for. * **field_sample** (:class:`numpy.ndarray` of :class:`numpy.float64` (spatial_index)) the returned value of the",
"+ 1j*math.sin(math.tau*rotating_wave*time_sample) time_sample += time_coarse get_field_jit(time_sample, sweep_parameter, field_sample[0, :]) time_sample = ((time_fine +",
"lab frame, for each time sampled. See :math:`\\\\psi(t)` in :ref:`overview_of_simulation_method`. spin : :class:`numpy.ndarray`",
"time it is referenced by the user. Parameters: * **state** (:obj:`numpy.ndarray` of :obj:`numpy.complex128`",
"z axis by an amount defined by the field in the z direction.",
"the rotating frame. integration_method : :obj:`IntegrationMethod` Which integration method to use in the",
"of :obj:`numpy.float128` (time_index, y_index, x_index)) - The evaluated time evolution operator between each",
"spin system in the lab frame, for each time sampled. time_evolution : :class:`numpy.ndarray`",
"= roc.get_global_id(1) if time_index < time_coarse.size: get_time_evolution_loop(time_index, time_coarse, time_step_output, time_step_integration, time_end_points, time_evolution_coarse, sweep_parameter)",
"= spin_quantum_number.dimension lie_dimension = dimension + 1 # utility_set = spin_quantum_number.utility_set if not",
"- iz J_z - iq J_q))^{2^\\\\tau}\\\\\\\\ &\\\\approx (\\\\exp(-i(2^{-\\\\tau} x) J_x) \\\\exp(-i(2^{-\\\\tau} y) J_y)",
"self.spin_quantum_number.dimension), np.complex128) self.get_time_evolution_raw(sweep_parameter, time, time_end_points, time_step_integration, time_step_output, time_evolution_coarse) elif self.device == Device.CUDA: time",
"Assumes the exponent is an imaginary linear combination of a subspace of :math:`\\\\mathfrak{su}(2)`,",
"using :func:`numba.cuda.device_array_like()`. \"\"\" if device_index == 0: for time_index in nb.prange(time_coarse.size): get_time_evolution_loop(time_index, time_coarse,",
"one can run many simulations, sweeping through bias values, by calling this method",
"(time_index, magnetic_quantum_number)) - The quantum state of the spin system over time, written",
"&\\\\approx (\\\\exp(-i(2^{-\\\\tau} x) J_x) \\\\exp(-i(2^{-\\\\tau} y) J_y) \\\\exp(-i(2^{-\\\\tau} z J_z + (2^{-\\\\tau} q)",
"this device option, the user defined field function must be :func:`numba.jit()` compilable. See",
"The matrix to right multiply by. * **result** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index))",
"utilities.norm2 inner = utilities.inner set_to = utilities.set_to set_to_one = utilities.set_to_one set_to_zero = utilities.set_to_zero",
"level atom. Parameters ---------- state_init : :class:`numpy.ndarray` of :class:`numpy.complex128` The state (spin wavefunction)",
"The time values for when the experiment is to start and finishes. In",
"== \"roc\": def jit_host(template, max_registers): def jit_host(func): return roc.jit(template)(func) return jit_host self.jit_host =",
"\\\\end{pmatrix} \\\\end{align*} For spin one: .. math:: \\\\begin{align*} \\\\langle F\\\\rangle(t) = \\\\begin{pmatrix} \\\\Re(\\\\sqrt{2}\\\\psi_{0}(t)^*(\\\\psi_{+1}(t)",
"Defaults to :obj:`True`. If set to :obj:`True`, the integrator moves into a frame",
"2] = 0.5*cisz*(cx + cy + 1j*sx*sy) # if device_index == 0: #",
"target device on construction of the object. Attributes ---------- conj(z) : :obj:`callable` Conjugate",
"(time_index, magnetic_quantum_number) The evaluated quantum state of the spin system over time, written",
"objects like enums cannot be interpreted. \"\"\" def __init__(self, value, index): super().__init__() self._value_",
"3), dtype = np.complex128) # temporary = temporary_group[roc.get_local_id(1), :, :] # for power_index",
"note :: To use this device option, the user defined field function must",
"-i c_Y s_X)}{\\\\sqrt{2}} & \\\\frac{e^{-i\\\\left(-Z + \\\\frac{Q}{3}\\\\right)}(c_X + c_Y + i s_Xs_Y)}{2} \\\\end{pmatrix}^{2^\\\\tau}\\\\\\\\",
"set_to(operator, result): result[0, 0] = operator[0, 0] result[1, 0] = operator[1, 0] result[2,",
"= np.complex128) elif device_index == 2: temporary_group = roc.shared.array((threads_per_block, 2, 2), dtype =",
"get_spin[blocks_per_grid, threads_per_block](cuda.to_device(state), spin) spin = spin.copy_to_host() elif device == Device.ROC: spin = roc.device_array((state.shape[0],",
"states with this spin belong to. label : :obj:`str` A text label that",
"s. * **time_step_output** (:obj:`float`) - The sample resolution of the output timeseries for",
"&= \\\\cos(\\\\theta),\\\\\\\\ s_{\\\\theta} &= \\\\sin(\\\\theta). \\\\end{align*} **For spin one systems** Assumes the exponent",
"outside of spin half. Switching to a Lie Trotter method.\\033[0m\") exponentiation_method = ExponentiationMethod.LIE_TROTTER",
"with spin one systems. Assumes the exponent is an imaginary linear combination of",
"+ operator[0, 2]*operator[2, 1] result[1, 1] = operator[1, 0]*operator[0, 1] + (2 +",
":obj:`numpy.ndarray` of :obj:`numpy.complex128` (time_index, magnetic_quantum_number) The quantum state of the spin system over",
"jit_device_template(template): def jit_device_template(func): return func return jit_device_template self.jit_device_template = jit_device_template elif value ==",
"+ 1j*z*s else: result[0, 0] = 1 result[1, 0] = 0 result[0, 1]",
"simulator. Parameters ---------- get_field : :obj:`callable` A python function that describes the field",
"0,&i\\\\neq j \\\\end{cases} \\\\end{align*} Parameters: * **operator** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) -",
"* **sweep_parameter** (:obj:`float`) - The input to the `get_field` function supplied by the",
"= (2 + operator[0, 0])*operator[0, 2] + operator[0, 1]*operator[1, 2] + operator[0, 2]*operator[2,",
"- iz J_z - iq J_q)\\\\\\\\ &= \\\\exp(2^{-\\\\tau}(-ix J_x - iy J_y -",
"dtype = np.complex128) time_evolution_old = time_evolution_old_group[roc.get_local_id(1), :, :] # Calculate the exponential if",
"rotating frame optimisation. Defaults to :obj:`True`. If set to :obj:`True`, the integrator moves",
"left multiply by. * **right** (:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - The matrix",
"exponentiation is to be written to. matrix_exponential_lie_trotter(field_sample, result) : :obj:`callable` Calculates a matrix",
"ca = 1 # sa = -1j*a/sqrt2 # ez = field_sample[2]/(2*precision) # ez",
"else: @jit_device_template(\"(float64[:], float64, complex128)\") def transform_frame_lab(field_sample, rotating_wave, rotating_wave_winding): return transform_frame = transform_frame_lab get_field_jit",
"math.sin(y) # cisz = math.cos(z + q/3) - 1j*math.sin(z + q/3) # result[0,",
".. note:: This function must be compilable for the device that the integrator",
"expected spin projection (Bloch vector) over time. \"\"\" def __init__(self, time, time_evolution, state,",
"\\\\cos(\\\\frac{r}{2}) + i\\\\frac{z}{r}\\\\sin(\\\\frac{r}{2}) \\\\end{pmatrix} \\\\end{align*} with :math:`r = \\\\sqrt{x^2 + y^2 + z^2}`.",
"1]) result[0, 1] = conj(operator[1, 0]) result[1, 1] = conj(operator[1, 1]) @jit_device def",
"is calculated just in time using the JITed :obj:`callable` `spin_calculator`. spin_calculator : :obj:`callable`",
"of l and r. set_to(operator, result) : :obj:`callable` Copy the contents of one",
"use_rotating_frame: if dimension == 3: @jit_device_template(\"(float64[:], float64, complex128)\") def transform_frame_spin_one_rotating(field_sample, rotating_wave, rotating_wave_winding): X",
"2, \"half\") \"\"\" For two level systems. \"\"\" ONE = (1, 3, \"one\")",
":func:`numba.jit()` compilable. See `Supported Python features`_ for compilable python features, and `Supported Numpy",
"rotating_wave, rotating_wave_winding) time_fine += time_step_integration if use_rotating_frame: # Take out of rotating frame",
"&= a - ib\\\\\\\\ a, b &\\\\in \\\\mathbb{R} \\\\end{align*} Parameters: * **z** (:class:`numpy.complex128`)",
"0 \\\\end{pmatrix},& J_y &= \\\\frac{1}{2}\\\\begin{pmatrix} 0 & -i \\\\\\\\ i & 0 \\\\end{pmatrix},&",
"@jit_device_template(\"(float64[:], float64, complex128)\") def transform_frame_lab(field_sample, rotating_wave, rotating_wave_winding): return transform_frame = transform_frame_lab get_field_jit =",
"value, dimension, label): super().__init__() self._value_ = value self.dimension = dimension self.label = label",
"= 0 if use_rotating_frame: time_sample = time_coarse[time_index] + time_step_output/2 get_field_jit(time_sample, sweep_parameter, field_sample[0, :])",
"math.tau*time_step_integration*field_sample[1, 3]/2 append_exponentiation(field_sample[2, :], time_evolution_fine, time_evolution_coarse) get_field_integration = get_field_integration_half_step append_exponentiation_integration = append_exponentiation_integration_half_step elif",
"self.compile_time_evolver(get_field, spin_quantum_number, device, use_rotating_frame, integration_method, exponentiation_method, trotter_cutoff, threads_per_block, max_registers) except: print(\"\\033[31mspinsim error: numba",
"# @jit_device # def matrix_exponential_lie_trotter(field_sample, result, trotter_cutoff): # hyper_cube_amount = math.ceil(trotter_cutoff/2) # if",
"rotating_wave, rotating_wave_winding): time_sample = time_fine + 0.5*time_step_integration - time_coarse rotating_wave_winding[0] = math.cos(math.tau*rotating_wave*time_sample) +",
"(c_Xs_Y - is_Xc_Y) e^{-iZ} & (c_Xc_Y + is_Xs_Y) e^{iZ} \\\\end{pmatrix}^{2^\\\\tau}\\\\\\\\ &= T^{2^\\\\tau}, \\\\end{align*}",
"use in the integration. Defaults to :obj:`IntegrationMethod.MAGNUS_CF4`. See :obj:`IntegrationMethod` for more details. trotter_cutoff",
"time_evolution_coarse) get_field_integration = get_field_integration_midpoint append_exponentiation_integration = append_exponentiation_integration_midpoint @jit_device_template(\"(int64, float64[:], float64, float64, float64[:], complex128[:,",
"(:class:`numpy.ndarray` of :class:`numpy.complex128`, (y_index, x_index)) - A matrix to be filled with the",
": :obj:`callable` The inner (maths convention dot) product between two complex vectors. ..",
"ez = math.cos(ez) + 1j*math.sin(ez) # eq = field_sample[3]/(6*precision) # eq = math.cos(eq)",
"element of `time_coarse`. In units of s. Determines the sample rate of the",
":class:`numpy.complex128`, (y_index, x_index)) - The matrix to copy to. set_to_one(operator) : :obj:`callable` Make",
"units of s. Determines the sample rate of the outputs `time_coarse` and `time_evolution_coarse`.",
"+ operator[1, 1])*operator[1, 1] @jit_device def adjoint(operator, result): result[0, 0] = conj(operator[0, 0])",
"roc.device_array((state.shape[0], 3), np.float64) blocks_per_grid = (state.shape[0] + (threads_per_block - 1)) // threads_per_block get_spin[blocks_per_grid,",
"state[time_index, 1].real**2 - state[time_index, 1].imag**2) else: spin[time_index, 0] = (2*conj(state[time_index, 1])*(state[time_index, 0] +",
"\"\"\" class Device(Enum): \"\"\" The target device that the integrator is being compiled",
"left[0, 2]*right[2, 2] result[1, 2] = left[1, 0]*right[0, 2] + left[1, 1]*right[1, 2]",
"= math.sqrt(3) machine_epsilon = np.finfo(np.float64).eps*1000 class Utilities: \"\"\" A on object that contains",
"field at, in units of s. * **simulation_index** (:obj:`int`) - a parameter that",
"operator[2, 1] = 0 operator[0, 2] = 0 operator[1, 2] = 0 operator[2,",
"the time dependent Schroedinger equation and returns the quantum state of the spin",
"time_fine, time_coarse, time_step_integration, field_sample, rotating_wave, rotating_wave_winding): time_sample = time_fine - time_coarse rotating_wave_winding[0] =",
"1] = 1 operator[2, 1] = 0 operator[0, 2] = 0 operator[1, 2]",
"0] + operator[1, 2]*operator[2, 0] result[2, 0] = operator[2, 0]*operator[0, 0] + operator[2,",
"((time_fine + 0.5*time_step_integration*(1 + 1/sqrt3)) - time_coarse) rotating_wave_winding[1] = math.cos(math.tau*rotating_wave*time_sample) + 1j*math.sin(math.tau*rotating_wave*time_sample) time_sample",
"self.device == Device.ROC: time = roc.device_array(time_index_max, np.float64) time_evolution_coarse = roc.device_array((time_index_max, self.spin_quantum_number.dimension, self.spin_quantum_number.dimension), np.complex128)",
"device on construction of the object. Attributes ---------- conj(z) : :obj:`callable` Conjugate of",
"A text label that can be used for archiving. \"\"\" def __init__(self, value,",
"1] *= rotating_wave_winding[0] time_evolution_coarse[time_index, 2, 2] *= rotating_wave_winding[0] else: time_evolution_coarse[time_index, 1, 0] *=",
"IntegrationMethod(Enum): \"\"\" Options for describing which method is used during the integration. Parameters",
"using the rating wave approximation: just define `get_field()` with field functions that use",
"the experiment is to start and finishes. In units of s. time_step_integration :",
"state[time_index, 0].imag**2 - state[time_index, 2].real**2 - state[time_index, 2].imag**2 elif device_index > 0: if",
"= get_field_integration_half_step append_exponentiation_integration = append_exponentiation_integration_half_step elif integration_method == IntegrationMethod.MIDPOINT_SAMPLE: @jit_device_template(\"(float64, float64, float64, float64,",
"of a quantum state. Used to calculate `spin` the first time it is",
"q) J_q)))^{2^\\\\tau}\\\\\\\\ &= \\\\begin{pmatrix} \\\\frac{e^{-i\\\\left(Z + \\\\frac{Q}{3}\\\\right)}(c_X + c_Y - i s_Xs_Y)}{2} &",
"exponentiator, if :obj:`ExponentiationMethod.LIE_TROTTER` is chosen. threads_per_block : :obj:`int` The size of each thread",
"1]*operator[1, 1] + (2 + operator[2, 2])*operator[2, 1] result[0, 2] = (2 +",
"s. time_step_integration : :obj:`float` The time step used within the integration algorithm. In",
"each coarse timestep in parallel time_index = cuda.grid(1) if time_index < time_coarse.size: get_time_evolution_loop(time_index,",
"\\\\infty} \\\\left(\\\\exp\\\\left(\\\\frac{1}{c}A\\\\right) \\\\exp\\\\left(\\\\frac{1}{c}B\\\\right)\\\\right)^c. **For spin half systems:** Assumes the exponent is an imaginary",
"for power_index in range(hyper_cube_amount): # matrix_multiply(result, result, temporary) # matrix_multiply(temporary, temporary, result) else:",
"= math.cos(a/2) Sa = -1j*math.sin(a/2) ez = field_sample[2]/(2*precision) ez = math.cos(ez) + 1j*math.sin(ez)",
"2) \"\"\" Use the :func:`numba.roc.jit()` LLVM compiler to compile the integrator to run",
"+ i s_Xs_Y)}{2} \\\\\\\\ \\\\frac{e^{-i\\\\left(Z + \\\\frac{Q}{3}\\\\right)} (-i s_X + c_X s_Y)}{\\\\sqrt{2}} &",
"1]*right[1, 1] + left[2, 2]*right[2, 1] result[0, 2] = left[0, 0]*right[0, 2] +",
"end of the time step. The equivalent of the trapezoidal method. \"\"\" class",
"X.real field_sample[1] = X.imag field_sample[2] = field_sample[2] - rotating_wave transform_frame = transform_frame_spin_one_rotating else:",
"to compile the integrator to run on an AMD ROCm compatible GPU, in",
"= cuda.local.array((dimension, dimension), dtype = np.complex128) field_sample = cuda.local.array((sample_index_max, lie_dimension), dtype = np.float64)",
"s. state_init : :obj:`numpy.ndarray` of :obj:`numpy.complex128` (magnetic_quantum_number) The initial quantum state of the",
"\"midpoint_sample\" \"\"\" Euler integration method. \"\"\" HALF_STEP = \"half_step\" \"\"\" Integration method from",
"- 1j*cy*sx)/sqrt2 # cisz = math.cos(z - q/3) + 1j*math.sin(z - q/3) #",
"== \"spin\": spin = self.spin_calculator(self.state) setattr(self, attr_name, spin) return self.spin raise AttributeError(\"{} has",
"state. Must be a whole number multiple of `time_step_integration`. Measured in s. state_init",
"= (1, 3, \"one\") \"\"\" For three level systems. \"\"\" class IntegrationMethod(Enum): \"\"\"",
"= value self.index = index if value == \"python\": def jit_host(template, max_registers): def",
"matrix exponential based on its analytic form. .. warning:: Only available for use"
] |
[
"esp: # # SPI 1 bit M5Stack Core os.mount(machine.SDCard(slot=2, width=1, sck=18, miso=19, mosi=23,",
"try: import pyb pyb.country(\"US\") # ISO 3166-1 Alpha-2 code, eg US, GB, DE,",
"# machine.Pin(2,mode=machine.Pin.IN, pull=machine.Pin.PULL_UP) # os.mount(machine.SDCard(slot=1, width=4), \"/sd\") # SD mode 4 bit if",
"to /sd try: # Some boards have pulldown and/or LED on GPIO2, pullup",
"import machine import uos as os try: import esp esp.osdebug(None) except ImportError: esp",
"Alpha-2 code, eg US, GB, DE, AU pyb.usb_mode(\"VCP+MSC\") # act as a serial",
"SD = False if SD: # Mount SD to /sd try: # Some",
"pyb.country(\"US\") # ISO 3166-1 Alpha-2 code, eg US, GB, DE, AU pyb.usb_mode(\"VCP+MSC\") #",
"os.mount(machine.SDCard(slot=2, width=1, sck=18, miso=19, mosi=23, cs=4), \"/sd\") # SPI 1 bit M5Stack Core",
"sck=18, miso=19, mosi=23, cs=4), \"/sd\") # SPI 1 bit M5Stack Core print(\"SD Card",
"act as a serial and a storage device # pyb.main('main.py') # main script",
"boot (including wake-boot from deepsleep) import machine import uos as os try: import",
"# ISO 3166-1 Alpha-2 code, eg US, GB, DE, AU pyb.usb_mode(\"VCP+MSC\") # act",
"mosi=23, cs=4), \"/sd\") # SPI 1 bit M5Stack Core print(\"SD Card mounted\") except",
"and/or LED on GPIO2, pullup avoids issues on TTGO 8 v1.8 # machine.Pin(2,mode=machine.Pin.IN,",
"\"/sd\") # SD mode 4 bit if esp: # # SPI 1 bit",
"8 v1.8 # machine.Pin(2,mode=machine.Pin.IN, pull=machine.Pin.PULL_UP) # os.mount(machine.SDCard(slot=1, width=4), \"/sd\") # SD mode 4",
"pyb pyb.country(\"US\") # ISO 3166-1 Alpha-2 code, eg US, GB, DE, AU pyb.usb_mode(\"VCP+MSC\")",
"# SPI 1 bit M5Stack Core os.mount(machine.SDCard(slot=2, width=1, sck=18, miso=19, mosi=23, cs=4), \"/sd\")",
"executed on every boot (including wake-boot from deepsleep) import machine import uos as",
"GPIO2, pullup avoids issues on TTGO 8 v1.8 # machine.Pin(2,mode=machine.Pin.IN, pull=machine.Pin.PULL_UP) # os.mount(machine.SDCard(slot=1,",
"avoids issues on TTGO 8 v1.8 # machine.Pin(2,mode=machine.Pin.IN, pull=machine.Pin.PULL_UP) # os.mount(machine.SDCard(slot=1, width=4), \"/sd\")",
"this one except ImportError: pass SD = False if SD: # Mount SD",
"esp = None try: import pyb pyb.country(\"US\") # ISO 3166-1 Alpha-2 code, eg",
"machine import uos as os try: import esp esp.osdebug(None) except ImportError: esp =",
"# SD mode 4 bit if esp: # # SPI 1 bit M5Stack",
"as os try: import esp esp.osdebug(None) except ImportError: esp = None try: import",
"print(\"SD Card mounted\") except OSError as e: if e.args[0] == 16: print(\"No SD",
"mounted\") except OSError as e: if e.args[0] == 16: print(\"No SD Card found\")",
"Card mounted\") except OSError as e: if e.args[0] == 16: print(\"No SD Card",
"and a storage device # pyb.main('main.py') # main script to run after this",
"Core print(\"SD Card mounted\") except OSError as e: if e.args[0] == 16: print(\"No",
"pullup avoids issues on TTGO 8 v1.8 # machine.Pin(2,mode=machine.Pin.IN, pull=machine.Pin.PULL_UP) # os.mount(machine.SDCard(slot=1, width=4),",
"SPI 1 bit M5Stack Core print(\"SD Card mounted\") except OSError as e: if",
"if esp: # # SPI 1 bit M5Stack Core os.mount(machine.SDCard(slot=2, width=1, sck=18, miso=19,",
"ImportError: esp = None try: import pyb pyb.country(\"US\") # ISO 3166-1 Alpha-2 code,",
"3166-1 Alpha-2 code, eg US, GB, DE, AU pyb.usb_mode(\"VCP+MSC\") # act as a",
"bit if esp: # # SPI 1 bit M5Stack Core os.mount(machine.SDCard(slot=2, width=1, sck=18,",
"serial and a storage device # pyb.main('main.py') # main script to run after",
"uos as os try: import esp esp.osdebug(None) except ImportError: esp = None try:",
"pull=machine.Pin.PULL_UP) # os.mount(machine.SDCard(slot=1, width=4), \"/sd\") # SD mode 4 bit if esp: #",
"esp.osdebug(None) except ImportError: esp = None try: import pyb pyb.country(\"US\") # ISO 3166-1",
"on GPIO2, pullup avoids issues on TTGO 8 v1.8 # machine.Pin(2,mode=machine.Pin.IN, pull=machine.Pin.PULL_UP) #",
"import uos as os try: import esp esp.osdebug(None) except ImportError: esp = None",
"SD to /sd try: # Some boards have pulldown and/or LED on GPIO2,",
"<filename>board/boot.py<gh_stars>1-10 # This file is executed on every boot (including wake-boot from deepsleep)",
"# pyb.main('main.py') # main script to run after this one except ImportError: pass",
"is executed on every boot (including wake-boot from deepsleep) import machine import uos",
"every boot (including wake-boot from deepsleep) import machine import uos as os try:",
"SD mode 4 bit if esp: # # SPI 1 bit M5Stack Core",
"issues on TTGO 8 v1.8 # machine.Pin(2,mode=machine.Pin.IN, pull=machine.Pin.PULL_UP) # os.mount(machine.SDCard(slot=1, width=4), \"/sd\") #",
"# main script to run after this one except ImportError: pass SD =",
"pyb.usb_mode(\"VCP+MSC\") # act as a serial and a storage device # pyb.main('main.py') #",
"deepsleep) import machine import uos as os try: import esp esp.osdebug(None) except ImportError:",
"import esp esp.osdebug(None) except ImportError: esp = None try: import pyb pyb.country(\"US\") #",
"import pyb pyb.country(\"US\") # ISO 3166-1 Alpha-2 code, eg US, GB, DE, AU",
"except ImportError: pass SD = False if SD: # Mount SD to /sd",
"on TTGO 8 v1.8 # machine.Pin(2,mode=machine.Pin.IN, pull=machine.Pin.PULL_UP) # os.mount(machine.SDCard(slot=1, width=4), \"/sd\") # SD",
"Core os.mount(machine.SDCard(slot=2, width=1, sck=18, miso=19, mosi=23, cs=4), \"/sd\") # SPI 1 bit M5Stack",
"storage device # pyb.main('main.py') # main script to run after this one except",
"1 bit M5Stack Core print(\"SD Card mounted\") except OSError as e: if e.args[0]",
"one except ImportError: pass SD = False if SD: # Mount SD to",
"1 bit M5Stack Core os.mount(machine.SDCard(slot=2, width=1, sck=18, miso=19, mosi=23, cs=4), \"/sd\") # SPI",
"# os.mount(machine.SDCard(slot=1, width=4), \"/sd\") # SD mode 4 bit if esp: # #",
"from deepsleep) import machine import uos as os try: import esp esp.osdebug(None) except",
"code, eg US, GB, DE, AU pyb.usb_mode(\"VCP+MSC\") # act as a serial and",
"width=4), \"/sd\") # SD mode 4 bit if esp: # # SPI 1",
"pulldown and/or LED on GPIO2, pullup avoids issues on TTGO 8 v1.8 #",
"This file is executed on every boot (including wake-boot from deepsleep) import machine",
"SD: # Mount SD to /sd try: # Some boards have pulldown and/or",
"to run after this one except ImportError: pass SD = False if SD:",
"# SPI 1 bit M5Stack Core print(\"SD Card mounted\") except OSError as e:",
"a storage device # pyb.main('main.py') # main script to run after this one",
"on every boot (including wake-boot from deepsleep) import machine import uos as os",
"main script to run after this one except ImportError: pass SD = False",
"# Mount SD to /sd try: # Some boards have pulldown and/or LED",
"try: # Some boards have pulldown and/or LED on GPIO2, pullup avoids issues",
"SPI 1 bit M5Stack Core os.mount(machine.SDCard(slot=2, width=1, sck=18, miso=19, mosi=23, cs=4), \"/sd\") #",
"os try: import esp esp.osdebug(None) except ImportError: esp = None try: import pyb",
"machine.Pin(2,mode=machine.Pin.IN, pull=machine.Pin.PULL_UP) # os.mount(machine.SDCard(slot=1, width=4), \"/sd\") # SD mode 4 bit if esp:",
"pass SD = False if SD: # Mount SD to /sd try: #",
"= False if SD: # Mount SD to /sd try: # Some boards",
"ISO 3166-1 Alpha-2 code, eg US, GB, DE, AU pyb.usb_mode(\"VCP+MSC\") # act as",
"False if SD: # Mount SD to /sd try: # Some boards have",
"esp esp.osdebug(None) except ImportError: esp = None try: import pyb pyb.country(\"US\") # ISO",
"bit M5Stack Core os.mount(machine.SDCard(slot=2, width=1, sck=18, miso=19, mosi=23, cs=4), \"/sd\") # SPI 1",
"TTGO 8 v1.8 # machine.Pin(2,mode=machine.Pin.IN, pull=machine.Pin.PULL_UP) # os.mount(machine.SDCard(slot=1, width=4), \"/sd\") # SD mode",
"file is executed on every boot (including wake-boot from deepsleep) import machine import",
"# act as a serial and a storage device # pyb.main('main.py') # main",
"LED on GPIO2, pullup avoids issues on TTGO 8 v1.8 # machine.Pin(2,mode=machine.Pin.IN, pull=machine.Pin.PULL_UP)",
"mode 4 bit if esp: # # SPI 1 bit M5Stack Core os.mount(machine.SDCard(slot=2,",
"cs=4), \"/sd\") # SPI 1 bit M5Stack Core print(\"SD Card mounted\") except OSError",
"a serial and a storage device # pyb.main('main.py') # main script to run",
"ImportError: pass SD = False if SD: # Mount SD to /sd try:",
"after this one except ImportError: pass SD = False if SD: # Mount",
"try: import esp esp.osdebug(None) except ImportError: esp = None try: import pyb pyb.country(\"US\")",
"miso=19, mosi=23, cs=4), \"/sd\") # SPI 1 bit M5Stack Core print(\"SD Card mounted\")",
"AU pyb.usb_mode(\"VCP+MSC\") # act as a serial and a storage device # pyb.main('main.py')",
"# This file is executed on every boot (including wake-boot from deepsleep) import",
"GB, DE, AU pyb.usb_mode(\"VCP+MSC\") # act as a serial and a storage device",
"/sd try: # Some boards have pulldown and/or LED on GPIO2, pullup avoids",
"except ImportError: esp = None try: import pyb pyb.country(\"US\") # ISO 3166-1 Alpha-2",
"boards have pulldown and/or LED on GPIO2, pullup avoids issues on TTGO 8",
"v1.8 # machine.Pin(2,mode=machine.Pin.IN, pull=machine.Pin.PULL_UP) # os.mount(machine.SDCard(slot=1, width=4), \"/sd\") # SD mode 4 bit",
"os.mount(machine.SDCard(slot=1, width=4), \"/sd\") # SD mode 4 bit if esp: # # SPI",
"Mount SD to /sd try: # Some boards have pulldown and/or LED on",
"(including wake-boot from deepsleep) import machine import uos as os try: import esp",
"run after this one except ImportError: pass SD = False if SD: #",
"4 bit if esp: # # SPI 1 bit M5Stack Core os.mount(machine.SDCard(slot=2, width=1,",
"None try: import pyb pyb.country(\"US\") # ISO 3166-1 Alpha-2 code, eg US, GB,",
"M5Stack Core print(\"SD Card mounted\") except OSError as e: if e.args[0] == 16:",
"wake-boot from deepsleep) import machine import uos as os try: import esp esp.osdebug(None)",
"M5Stack Core os.mount(machine.SDCard(slot=2, width=1, sck=18, miso=19, mosi=23, cs=4), \"/sd\") # SPI 1 bit",
"DE, AU pyb.usb_mode(\"VCP+MSC\") # act as a serial and a storage device #",
"have pulldown and/or LED on GPIO2, pullup avoids issues on TTGO 8 v1.8",
"pyb.main('main.py') # main script to run after this one except ImportError: pass SD",
"\"/sd\") # SPI 1 bit M5Stack Core print(\"SD Card mounted\") except OSError as",
"script to run after this one except ImportError: pass SD = False if",
"# Some boards have pulldown and/or LED on GPIO2, pullup avoids issues on",
"device # pyb.main('main.py') # main script to run after this one except ImportError:",
"as a serial and a storage device # pyb.main('main.py') # main script to",
"if SD: # Mount SD to /sd try: # Some boards have pulldown",
"= None try: import pyb pyb.country(\"US\") # ISO 3166-1 Alpha-2 code, eg US,",
"bit M5Stack Core print(\"SD Card mounted\") except OSError as e: if e.args[0] ==",
"width=1, sck=18, miso=19, mosi=23, cs=4), \"/sd\") # SPI 1 bit M5Stack Core print(\"SD",
"# # SPI 1 bit M5Stack Core os.mount(machine.SDCard(slot=2, width=1, sck=18, miso=19, mosi=23, cs=4),",
"US, GB, DE, AU pyb.usb_mode(\"VCP+MSC\") # act as a serial and a storage",
"Some boards have pulldown and/or LED on GPIO2, pullup avoids issues on TTGO",
"eg US, GB, DE, AU pyb.usb_mode(\"VCP+MSC\") # act as a serial and a"
] |
[
"ControlType.Withdraw: # delta 는 양의 정수만 입력된다고 가정 if control == ControlType.Withdraw: delta",
"int]: pass def finish(self) -> None: self.accounts = None self.pin = None self.card_number",
"self.code = code self.desc = desc class AbstractAtmController(metaclass=ABCMeta): def __init__(self, bank: Bank, cash_bin:",
"ABCMeta, abstractmethod from enum import Enum from bank import Bank from cash_bin import",
"enum import Enum from bank import Bank from cash_bin import CashBin from card_reader",
"None self.card_number = None self.account = None class AtmController(AbstractAtmController): def input_pin(self, pin: str)",
"str]: pass @abstractmethod def control_account( self, control: ControlType, delta: int = 0 )",
"authentication(self, card_number: str, pin: str) -> Tuple[bool, str]: pass @abstractmethod def select_account(self, account:",
"= (False, \"실패(예외처리용)\", balance) else: result = (True, \"성공\", balance) else: result =",
"account: str) -> Tuple[bool, str]: if account in self.accounts: self.account = account return",
"ControlType.Deposit or control == ControlType.Withdraw: # delta 는 양의 정수만 입력된다고 가정 if",
"__future__ import annotations from typing import List, Dict, Tuple, Optional from abc import",
"self.bank.check_pin(self.card_number, self.pin) if self.accounts is None: self.finish() return (False, \"인증 실패.\") else: return",
"\"입금\") Withdraw = (2, \"출금\") def __init__(self, code: int, desc: str) -> None:",
"desc class AbstractAtmController(metaclass=ABCMeta): def __init__(self, bank: Bank, cash_bin: CashBin, card_reader: CardReader) -> None:",
"class AbstractAtmController(metaclass=ABCMeta): def __init__(self, bank: Bank, cash_bin: CashBin, card_reader: CardReader) -> None: self.bank",
"self.finish() return (False, \"카드번호 혹은 계좌가 존재하지 않습니다.\", -1) if control == ControlType.SeeBalance:",
"-> None: self.code = code self.desc = desc class AbstractAtmController(metaclass=ABCMeta): def __init__(self, bank:",
"str) -> Tuple[bool, str]: pass @abstractmethod def select_account(self, account: str) -> Tuple[bool, str]:",
"or self.account is None: self.finish() return (False, \"카드번호 혹은 계좌가 존재하지 않습니다.\", -1)",
"ControlType(Enum): SeeBalance = (0, \"잔고 출력\") Deposit = (1, \"입금\") Withdraw = (2,",
"control == ControlType.Withdraw: delta = delta * -1 if self.cash_bin.get_balance() + delta <",
"str]: pass @abstractmethod def select_account(self, account: str) -> Tuple[bool, str]: pass @abstractmethod def",
"self.account is None: self.finish() return (False, \"카드번호 혹은 계좌가 존재하지 않습니다.\", -1) if",
"self.card_number = None self.account = None class AtmController(AbstractAtmController): def input_pin(self, pin: str) ->",
"pin: str) -> Tuple[bool, str]: pass @abstractmethod def select_account(self, account: str) -> Tuple[bool,",
"return (False, \"인증 실패.\") else: return (True, \"성공\") def select_account(self, account: str) ->",
"= None class AtmController(AbstractAtmController): def input_pin(self, pin: str) -> None: self.pin = pin",
"\"카드번호 혹은 계좌가 존재하지 않습니다.\", -1) if control == ControlType.SeeBalance: result = (True,",
"bank import Bank from cash_bin import CashBin from card_reader import CardReader class ControlType(Enum):",
"# delta 는 양의 정수만 입력된다고 가정 if control == ControlType.Withdraw: delta =",
"if self.accounts is None: self.finish() return (False, \"인증 실패.\") else: return (True, \"성공\")",
"Tuple[bool, str]: pass @abstractmethod def select_account(self, account: str) -> Tuple[bool, str]: pass @abstractmethod",
"account: str) -> Tuple[bool, str]: pass @abstractmethod def control_account( self, control: ControlType, delta:",
"None or self.account is None: self.finish() return (False, \"카드번호 혹은 계좌가 존재하지 않습니다.\",",
"계좌가 존재하지 않습니다.\", -1) if control == ControlType.SeeBalance: result = (True, \"성공\", self.bank.get_balance(self.card_number,",
"= self.bank.control_balance(self.card_number, self.account, delta) if balance < 0: result = (False, \"실패(예외처리용)\", balance)",
"balance < 0: result = (False, \"실패(예외처리용)\", balance) else: result = (True, \"성공\",",
") -> Tuple[bool, str, int]: # 중복된 예외처리 존재, 개선 필요 # 카드",
"abstractmethod from enum import Enum from bank import Bank from cash_bin import CashBin",
"= None self.pin = None self.card_number = None self.account = None class AtmController(AbstractAtmController):",
"str) -> Tuple[bool, str]: pass @abstractmethod def control_account( self, control: ControlType, delta: int",
"< 0: self.finish() return (False, \"계좌에 잔고가 부족합니다.\", -1) balance = self.bank.control_balance(self.card_number, self.account,",
"self.cash_bin = cash_bin self.card_reader = card_reader self.accounts = None self.pin = None self.card_number",
"code: int, desc: str) -> None: self.code = code self.desc = desc class",
"from card_reader import CardReader class ControlType(Enum): SeeBalance = (0, \"잔고 출력\") Deposit =",
"None: pass @abstractmethod def authentication(self, card_number: str, pin: str) -> Tuple[bool, str]: pass",
"채크함 if self.card_number is None or self.account is None: self.finish() return (False, \"카드번호",
"0: result = (False, \"실패(예외처리용)\", balance) else: result = (True, \"성공\", balance) else:",
"@abstractmethod def authentication(self, card_number: str, pin: str) -> Tuple[bool, str]: pass @abstractmethod def",
"= (2, \"출금\") def __init__(self, code: int, desc: str) -> None: self.code =",
"Optional from abc import ABCMeta, abstractmethod from enum import Enum from bank import",
"delta) if balance < 0: result = (False, \"실패(예외처리용)\", balance) else: result =",
"import CardReader class ControlType(Enum): SeeBalance = (0, \"잔고 출력\") Deposit = (1, \"입금\")",
"AtmController(AbstractAtmController): def input_pin(self, pin: str) -> None: self.pin = pin def authentication(self) ->",
"= account return (True, \"성공\") else: self.finish() return (False, \"존재하지 않는 계좌 입니다.\")",
"self, control: ControlType, delta: int = 0 ) -> Tuple[bool, str, int]: #",
"control == ControlType.SeeBalance: result = (True, \"성공\", self.bank.get_balance(self.card_number, self.account)) elif control == ControlType.Deposit",
"-> None: self.bank = bank self.cash_bin = cash_bin self.card_reader = card_reader self.accounts =",
"or control == ControlType.Withdraw: # delta 는 양의 정수만 입력된다고 가정 if control",
"control_account와 bank의 control_balnce에서 동시에 채크함 if self.card_number is None or self.account is None:",
"def input_pin(self, pin: str) -> None: self.pin = pin def authentication(self) -> Tuple[bool,",
"None self.pin = None self.card_number = None self.account = None class AtmController(AbstractAtmController): def",
"-> Tuple[bool, str, int]: # 중복된 예외처리 존재, 개선 필요 # 카드 번호,",
"(False, \"인증 실패.\") else: return (True, \"성공\") def select_account(self, account: str) -> Tuple[bool,",
"if balance < 0: result = (False, \"실패(예외처리용)\", balance) else: result = (True,",
"pass @abstractmethod def select_account(self, account: str) -> Tuple[bool, str]: pass @abstractmethod def control_account(",
"None class AtmController(AbstractAtmController): def input_pin(self, pin: str) -> None: self.pin = pin def",
"-> Tuple[bool, str]: pass @abstractmethod def select_account(self, account: str) -> Tuple[bool, str]: pass",
"@abstractmethod def input_pin(self, pin: str) -> None: pass @abstractmethod def authentication(self, card_number: str,",
"실패.\") else: return (True, \"성공\") def select_account(self, account: str) -> Tuple[bool, str]: if",
"card_number: str, pin: str) -> Tuple[bool, str]: pass @abstractmethod def select_account(self, account: str)",
"self.pin = None self.card_number = None self.account = None @abstractmethod def input_pin(self, pin:",
"(False, \"계좌에 잔고가 부족합니다.\", -1) balance = self.bank.control_balance(self.card_number, self.account, delta) if balance <",
"번호, 계좌가 없을시 atmcontroller의 control_account와 bank의 control_balnce에서 동시에 채크함 if self.card_number is None",
"self.account = None class AtmController(AbstractAtmController): def input_pin(self, pin: str) -> None: self.pin =",
") -> Tuple[bool, str, int]: pass def finish(self) -> None: self.accounts = None",
"is None or self.account is None: self.finish() return (False, \"카드번호 혹은 계좌가 존재하지",
"card_reader import CardReader class ControlType(Enum): SeeBalance = (0, \"잔고 출력\") Deposit = (1,",
"def control_account( self, control: ControlType, delta: int = 0 ) -> Tuple[bool, str,",
"0 ) -> Tuple[bool, str, int]: # 중복된 예외처리 존재, 개선 필요 #",
"def select_account(self, account: str) -> Tuple[bool, str]: if account in self.accounts: self.account =",
"is None: self.finish() return (False, \"인증 실패.\") else: return (True, \"성공\") def select_account(self,",
"self.accounts = self.bank.check_pin(self.card_number, self.pin) if self.accounts is None: self.finish() return (False, \"인증 실패.\")",
"ControlType.SeeBalance: result = (True, \"성공\", self.bank.get_balance(self.card_number, self.account)) elif control == ControlType.Deposit or control",
"부족합니다.\", -1) balance = self.bank.control_balance(self.card_number, self.account, delta) if balance < 0: result =",
"Tuple[bool, str]: pass @abstractmethod def control_account( self, control: ControlType, delta: int = 0",
"-1) if control == ControlType.SeeBalance: result = (True, \"성공\", self.bank.get_balance(self.card_number, self.account)) elif control",
"abc import ABCMeta, abstractmethod from enum import Enum from bank import Bank from",
"from typing import List, Dict, Tuple, Optional from abc import ABCMeta, abstractmethod from",
"CardReader) -> None: self.bank = bank self.cash_bin = cash_bin self.card_reader = card_reader self.accounts",
"str, int]: # 중복된 예외처리 존재, 개선 필요 # 카드 번호, 계좌가 없을시",
"= None self.pin = None self.card_number = None self.account = None @abstractmethod def",
"= None self.account = None @abstractmethod def input_pin(self, pin: str) -> None: pass",
"필요 # 카드 번호, 계좌가 없을시 atmcontroller의 control_account와 bank의 control_balnce에서 동시에 채크함 if",
"return (False, \"존재하지 않는 계좌 입니다.\") def control_account( self, control: ControlType, delta: int",
"= bank self.cash_bin = cash_bin self.card_reader = card_reader self.accounts = None self.pin =",
"-> Tuple[bool, str, int]: pass def finish(self) -> None: self.accounts = None self.pin",
"bank의 control_balnce에서 동시에 채크함 if self.card_number is None or self.account is None: self.finish()",
"pass @abstractmethod def authentication(self, card_number: str, pin: str) -> Tuple[bool, str]: pass @abstractmethod",
"pass @abstractmethod def control_account( self, control: ControlType, delta: int = 0 ) ->",
"-> None: self.pin = pin def authentication(self) -> Tuple[bool, str]: self.card_number = self.card_reader.get_card_number()",
"정수만 입력된다고 가정 if control == ControlType.Withdraw: delta = delta * -1 if",
"class ControlType(Enum): SeeBalance = (0, \"잔고 출력\") Deposit = (1, \"입금\") Withdraw =",
"-1) balance = self.bank.control_balance(self.card_number, self.account, delta) if balance < 0: result = (False,",
"self.finish() return (False, \"계좌에 잔고가 부족합니다.\", -1) balance = self.bank.control_balance(self.card_number, self.account, delta) if",
"\"성공\") else: self.finish() return (False, \"존재하지 않는 계좌 입니다.\") def control_account( self, control:",
"== ControlType.Deposit or control == ControlType.Withdraw: # delta 는 양의 정수만 입력된다고 가정",
"@abstractmethod def control_account( self, control: ControlType, delta: int = 0 ) -> Tuple[bool,",
"typing import List, Dict, Tuple, Optional from abc import ABCMeta, abstractmethod from enum",
"부족합니다.\", -1) if self.bank.get_balance(self.card_number, self.account) + delta < 0: self.finish() return (False, \"계좌에",
"None @abstractmethod def input_pin(self, pin: str) -> None: pass @abstractmethod def authentication(self, card_number:",
"없을시 atmcontroller의 control_account와 bank의 control_balnce에서 동시에 채크함 if self.card_number is None or self.account",
"None: self.pin = pin def authentication(self) -> Tuple[bool, str]: self.card_number = self.card_reader.get_card_number() self.accounts",
"= delta * -1 if self.cash_bin.get_balance() + delta < 0: self.finish() return (False,",
"= (1, \"입금\") Withdraw = (2, \"출금\") def __init__(self, code: int, desc: str)",
"self.bank.get_balance(self.card_number, self.account)) elif control == ControlType.Deposit or control == ControlType.Withdraw: # delta 는",
"self.accounts: self.account = account return (True, \"성공\") else: self.finish() return (False, \"존재하지 않는",
"int]: # 중복된 예외처리 존재, 개선 필요 # 카드 번호, 계좌가 없을시 atmcontroller의",
"-> None: self.accounts = None self.pin = None self.card_number = None self.account =",
"None: self.finish() return (False, \"인증 실패.\") else: return (True, \"성공\") def select_account(self, account:",
"delta: int = 0 ) -> Tuple[bool, str, int]: pass def finish(self) ->",
"cash_bin: CashBin, card_reader: CardReader) -> None: self.bank = bank self.cash_bin = cash_bin self.card_reader",
"(False, \"실패(예외처리용)\", balance) else: result = (True, \"성공\", balance) else: result = (False,",
"else: result = (True, \"성공\", balance) else: result = (False, \"잘못된 제어구문입니다.\", -1)",
"= None self.card_number = None self.account = None @abstractmethod def input_pin(self, pin: str)",
"is None: self.finish() return (False, \"카드번호 혹은 계좌가 존재하지 않습니다.\", -1) if control",
"atmcontroller의 control_account와 bank의 control_balnce에서 동시에 채크함 if self.card_number is None or self.account is",
"None self.card_number = None self.account = None @abstractmethod def input_pin(self, pin: str) ->",
"(False, \"현금통에 현금이 부족합니다.\", -1) if self.bank.get_balance(self.card_number, self.account) + delta < 0: self.finish()",
"= (True, \"성공\", self.bank.get_balance(self.card_number, self.account)) elif control == ControlType.Deposit or control == ControlType.Withdraw:",
"card_reader self.accounts = None self.pin = None self.card_number = None self.account = None",
"Withdraw = (2, \"출금\") def __init__(self, code: int, desc: str) -> None: self.code",
"str) -> None: self.pin = pin def authentication(self) -> Tuple[bool, str]: self.card_number =",
"== ControlType.Withdraw: delta = delta * -1 if self.cash_bin.get_balance() + delta < 0:",
"self.account = account return (True, \"성공\") else: self.finish() return (False, \"존재하지 않는 계좌",
"\"성공\") def select_account(self, account: str) -> Tuple[bool, str]: if account in self.accounts: self.account",
"(1, \"입금\") Withdraw = (2, \"출금\") def __init__(self, code: int, desc: str) ->",
"result = (True, \"성공\", balance) else: result = (False, \"잘못된 제어구문입니다.\", -1) self.finish()",
"result = (False, \"실패(예외처리용)\", balance) else: result = (True, \"성공\", balance) else: result",
"\"존재하지 않는 계좌 입니다.\") def control_account( self, control: ControlType, delta: int = 0",
"__init__(self, bank: Bank, cash_bin: CashBin, card_reader: CardReader) -> None: self.bank = bank self.cash_bin",
"self.bank.get_balance(self.card_number, self.account) + delta < 0: self.finish() return (False, \"계좌에 잔고가 부족합니다.\", -1)",
"from bank import Bank from cash_bin import CashBin from card_reader import CardReader class",
"List, Dict, Tuple, Optional from abc import ABCMeta, abstractmethod from enum import Enum",
"self.accounts is None: self.finish() return (False, \"인증 실패.\") else: return (True, \"성공\") def",
"@abstractmethod def select_account(self, account: str) -> Tuple[bool, str]: pass @abstractmethod def control_account( self,",
"self.pin) if self.accounts is None: self.finish() return (False, \"인증 실패.\") else: return (True,",
"from __future__ import annotations from typing import List, Dict, Tuple, Optional from abc",
"str) -> None: self.code = code self.desc = desc class AbstractAtmController(metaclass=ABCMeta): def __init__(self,",
"def finish(self) -> None: self.accounts = None self.pin = None self.card_number = None",
"str, pin: str) -> Tuple[bool, str]: pass @abstractmethod def select_account(self, account: str) ->",
"control == ControlType.Deposit or control == ControlType.Withdraw: # delta 는 양의 정수만 입력된다고",
"== ControlType.SeeBalance: result = (True, \"성공\", self.bank.get_balance(self.card_number, self.account)) elif control == ControlType.Deposit or",
"\"출금\") def __init__(self, code: int, desc: str) -> None: self.code = code self.desc",
"\"인증 실패.\") else: return (True, \"성공\") def select_account(self, account: str) -> Tuple[bool, str]:",
"desc: str) -> None: self.code = code self.desc = desc class AbstractAtmController(metaclass=ABCMeta): def",
"input_pin(self, pin: str) -> None: self.pin = pin def authentication(self) -> Tuple[bool, str]:",
"존재, 개선 필요 # 카드 번호, 계좌가 없을시 atmcontroller의 control_account와 bank의 control_balnce에서 동시에",
"if control == ControlType.Withdraw: delta = delta * -1 if self.cash_bin.get_balance() + delta",
"self.bank.control_balance(self.card_number, self.account, delta) if balance < 0: result = (False, \"실패(예외처리용)\", balance) else:",
"import CashBin from card_reader import CardReader class ControlType(Enum): SeeBalance = (0, \"잔고 출력\")",
"(True, \"성공\") def select_account(self, account: str) -> Tuple[bool, str]: if account in self.accounts:",
"+ delta < 0: self.finish() return (False, \"현금통에 현금이 부족합니다.\", -1) if self.bank.get_balance(self.card_number,",
"import List, Dict, Tuple, Optional from abc import ABCMeta, abstractmethod from enum import",
"* -1 if self.cash_bin.get_balance() + delta < 0: self.finish() return (False, \"현금통에 현금이",
"balance = self.bank.control_balance(self.card_number, self.account, delta) if balance < 0: result = (False, \"실패(예외처리용)\",",
"import Bank from cash_bin import CashBin from card_reader import CardReader class ControlType(Enum): SeeBalance",
"= self.card_reader.get_card_number() self.accounts = self.bank.check_pin(self.card_number, self.pin) if self.accounts is None: self.finish() return (False,",
"(True, \"성공\") else: self.finish() return (False, \"존재하지 않는 계좌 입니다.\") def control_account( self,",
"는 양의 정수만 입력된다고 가정 if control == ControlType.Withdraw: delta = delta *",
"self.account) + delta < 0: self.finish() return (False, \"계좌에 잔고가 부족합니다.\", -1) balance",
"잔고가 부족합니다.\", -1) balance = self.bank.control_balance(self.card_number, self.account, delta) if balance < 0: result",
"ControlType, delta: int = 0 ) -> Tuple[bool, str, int]: # 중복된 예외처리",
"self.bank = bank self.cash_bin = cash_bin self.card_reader = card_reader self.accounts = None self.pin",
"self.cash_bin.get_balance() + delta < 0: self.finish() return (False, \"현금통에 현금이 부족합니다.\", -1) if",
"int = 0 ) -> Tuple[bool, str, int]: # 중복된 예외처리 존재, 개선",
"None self.account = None @abstractmethod def input_pin(self, pin: str) -> None: pass @abstractmethod",
"= desc class AbstractAtmController(metaclass=ABCMeta): def __init__(self, bank: Bank, cash_bin: CashBin, card_reader: CardReader) ->",
"annotations from typing import List, Dict, Tuple, Optional from abc import ABCMeta, abstractmethod",
"str) -> None: pass @abstractmethod def authentication(self, card_number: str, pin: str) -> Tuple[bool,",
"카드 번호, 계좌가 없을시 atmcontroller의 control_account와 bank의 control_balnce에서 동시에 채크함 if self.card_number is",
"ControlType, delta: int = 0 ) -> Tuple[bool, str, int]: pass def finish(self)",
"= None self.account = None class AtmController(AbstractAtmController): def input_pin(self, pin: str) -> None:",
"import annotations from typing import List, Dict, Tuple, Optional from abc import ABCMeta,",
"__init__(self, code: int, desc: str) -> None: self.code = code self.desc = desc",
"str) -> Tuple[bool, str]: if account in self.accounts: self.account = account return (True,",
"def input_pin(self, pin: str) -> None: pass @abstractmethod def authentication(self, card_number: str, pin:",
"Dict, Tuple, Optional from abc import ABCMeta, abstractmethod from enum import Enum from",
"if self.cash_bin.get_balance() + delta < 0: self.finish() return (False, \"현금통에 현금이 부족합니다.\", -1)",
"(0, \"잔고 출력\") Deposit = (1, \"입금\") Withdraw = (2, \"출금\") def __init__(self,",
"self.card_reader.get_card_number() self.accounts = self.bank.check_pin(self.card_number, self.pin) if self.accounts is None: self.finish() return (False, \"인증",
"self.accounts = None self.pin = None self.card_number = None self.account = None class",
"str]: self.card_number = self.card_reader.get_card_number() self.accounts = self.bank.check_pin(self.card_number, self.pin) if self.accounts is None: self.finish()",
"입니다.\") def control_account( self, control: ControlType, delta: int = 0 ) -> Tuple[bool,",
"self.account)) elif control == ControlType.Deposit or control == ControlType.Withdraw: # delta 는 양의",
"control: ControlType, delta: int = 0 ) -> Tuple[bool, str, int]: # 중복된",
"return (True, \"성공\") def select_account(self, account: str) -> Tuple[bool, str]: if account in",
"self.finish() return (False, \"현금통에 현금이 부족합니다.\", -1) if self.bank.get_balance(self.card_number, self.account) + delta <",
"= code self.desc = desc class AbstractAtmController(metaclass=ABCMeta): def __init__(self, bank: Bank, cash_bin: CashBin,",
"계좌가 없을시 atmcontroller의 control_account와 bank의 control_balnce에서 동시에 채크함 if self.card_number is None or",
"ControlType.Withdraw: delta = delta * -1 if self.cash_bin.get_balance() + delta < 0: self.finish()",
"control == ControlType.Withdraw: # delta 는 양의 정수만 입력된다고 가정 if control ==",
"delta 는 양의 정수만 입력된다고 가정 if control == ControlType.Withdraw: delta = delta",
"현금이 부족합니다.\", -1) if self.bank.get_balance(self.card_number, self.account) + delta < 0: self.finish() return (False,",
"혹은 계좌가 존재하지 않습니다.\", -1) if control == ControlType.SeeBalance: result = (True, \"성공\",",
"-> None: pass @abstractmethod def authentication(self, card_number: str, pin: str) -> Tuple[bool, str]:",
"def __init__(self, bank: Bank, cash_bin: CashBin, card_reader: CardReader) -> None: self.bank = bank",
"Deposit = (1, \"입금\") Withdraw = (2, \"출금\") def __init__(self, code: int, desc:",
"None self.account = None class AtmController(AbstractAtmController): def input_pin(self, pin: str) -> None: self.pin",
"(True, \"성공\", balance) else: result = (False, \"잘못된 제어구문입니다.\", -1) self.finish() return result",
"elif control == ControlType.Deposit or control == ControlType.Withdraw: # delta 는 양의 정수만",
"-> Tuple[bool, str]: if account in self.accounts: self.account = account return (True, \"성공\")",
"Tuple[bool, str]: if account in self.accounts: self.account = account return (True, \"성공\") else:",
"select_account(self, account: str) -> Tuple[bool, str]: if account in self.accounts: self.account = account",
"def __init__(self, code: int, desc: str) -> None: self.code = code self.desc =",
"SeeBalance = (0, \"잔고 출력\") Deposit = (1, \"입금\") Withdraw = (2, \"출금\")",
"delta: int = 0 ) -> Tuple[bool, str, int]: # 중복된 예외처리 존재,",
"Tuple, Optional from abc import ABCMeta, abstractmethod from enum import Enum from bank",
"(False, \"존재하지 않는 계좌 입니다.\") def control_account( self, control: ControlType, delta: int =",
"delta < 0: self.finish() return (False, \"현금통에 현금이 부족합니다.\", -1) if self.bank.get_balance(self.card_number, self.account)",
"import Enum from bank import Bank from cash_bin import CashBin from card_reader import",
"0: self.finish() return (False, \"현금통에 현금이 부족합니다.\", -1) if self.bank.get_balance(self.card_number, self.account) + delta",
"account in self.accounts: self.account = account return (True, \"성공\") else: self.finish() return (False,",
"-> Tuple[bool, str]: self.card_number = self.card_reader.get_card_number() self.accounts = self.bank.check_pin(self.card_number, self.pin) if self.accounts is",
"self.account, delta) if balance < 0: result = (False, \"실패(예외처리용)\", balance) else: result",
"from enum import Enum from bank import Bank from cash_bin import CashBin from",
"Tuple[bool, str, int]: # 중복된 예외처리 존재, 개선 필요 # 카드 번호, 계좌가",
"= 0 ) -> Tuple[bool, str, int]: # 중복된 예외처리 존재, 개선 필요",
"(False, \"카드번호 혹은 계좌가 존재하지 않습니다.\", -1) if control == ControlType.SeeBalance: result =",
"= None @abstractmethod def input_pin(self, pin: str) -> None: pass @abstractmethod def authentication(self,",
"# 카드 번호, 계좌가 없을시 atmcontroller의 control_account와 bank의 control_balnce에서 동시에 채크함 if self.card_number",
"input_pin(self, pin: str) -> None: pass @abstractmethod def authentication(self, card_number: str, pin: str)",
"CashBin from card_reader import CardReader class ControlType(Enum): SeeBalance = (0, \"잔고 출력\") Deposit",
"bank: Bank, cash_bin: CashBin, card_reader: CardReader) -> None: self.bank = bank self.cash_bin =",
"control_account( self, control: ControlType, delta: int = 0 ) -> Tuple[bool, str, int]:",
"self.account = None @abstractmethod def input_pin(self, pin: str) -> None: pass @abstractmethod def",
"\"실패(예외처리용)\", balance) else: result = (True, \"성공\", balance) else: result = (False, \"잘못된",
"Tuple[bool, str]: self.card_number = self.card_reader.get_card_number() self.accounts = self.bank.check_pin(self.card_number, self.pin) if self.accounts is None:",
"if self.card_number is None or self.account is None: self.finish() return (False, \"카드번호 혹은",
"None: self.code = code self.desc = desc class AbstractAtmController(metaclass=ABCMeta): def __init__(self, bank: Bank,",
"중복된 예외처리 존재, 개선 필요 # 카드 번호, 계좌가 없을시 atmcontroller의 control_account와 bank의",
"delta = delta * -1 if self.cash_bin.get_balance() + delta < 0: self.finish() return",
"None: self.accounts = None self.pin = None self.card_number = None self.account = None",
"None: self.finish() return (False, \"카드번호 혹은 계좌가 존재하지 않습니다.\", -1) if control ==",
"return (False, \"계좌에 잔고가 부족합니다.\", -1) balance = self.bank.control_balance(self.card_number, self.account, delta) if balance",
"return (True, \"성공\") else: self.finish() return (False, \"존재하지 않는 계좌 입니다.\") def control_account(",
"if account in self.accounts: self.account = account return (True, \"성공\") else: self.finish() return",
"가정 if control == ControlType.Withdraw: delta = delta * -1 if self.cash_bin.get_balance() +",
"self.finish() return (False, \"존재하지 않는 계좌 입니다.\") def control_account( self, control: ControlType, delta:",
"from abc import ABCMeta, abstractmethod from enum import Enum from bank import Bank",
"Enum from bank import Bank from cash_bin import CashBin from card_reader import CardReader",
"pin: str) -> None: pass @abstractmethod def authentication(self, card_number: str, pin: str) ->",
"CashBin, card_reader: CardReader) -> None: self.bank = bank self.cash_bin = cash_bin self.card_reader =",
"입력된다고 가정 if control == ControlType.Withdraw: delta = delta * -1 if self.cash_bin.get_balance()",
"동시에 채크함 if self.card_number is None or self.account is None: self.finish() return (False,",
"control: ControlType, delta: int = 0 ) -> Tuple[bool, str, int]: pass def",
"예외처리 존재, 개선 필요 # 카드 번호, 계좌가 없을시 atmcontroller의 control_account와 bank의 control_balnce에서",
"return (False, \"현금통에 현금이 부족합니다.\", -1) if self.bank.get_balance(self.card_number, self.account) + delta < 0:",
"from cash_bin import CashBin from card_reader import CardReader class ControlType(Enum): SeeBalance = (0,",
"\"성공\", self.bank.get_balance(self.card_number, self.account)) elif control == ControlType.Deposit or control == ControlType.Withdraw: # delta",
"0: self.finish() return (False, \"계좌에 잔고가 부족합니다.\", -1) balance = self.bank.control_balance(self.card_number, self.account, delta)",
"== ControlType.Withdraw: # delta 는 양의 정수만 입력된다고 가정 if control == ControlType.Withdraw:",
"def authentication(self) -> Tuple[bool, str]: self.card_number = self.card_reader.get_card_number() self.accounts = self.bank.check_pin(self.card_number, self.pin) if",
"if self.bank.get_balance(self.card_number, self.account) + delta < 0: self.finish() return (False, \"계좌에 잔고가 부족합니다.\",",
"import ABCMeta, abstractmethod from enum import Enum from bank import Bank from cash_bin",
"result = (True, \"성공\", self.bank.get_balance(self.card_number, self.account)) elif control == ControlType.Deposit or control ==",
"def authentication(self, card_number: str, pin: str) -> Tuple[bool, str]: pass @abstractmethod def select_account(self,",
"self.card_number is None or self.account is None: self.finish() return (False, \"카드번호 혹은 계좌가",
"< 0: result = (False, \"실패(예외처리용)\", balance) else: result = (True, \"성공\", balance)",
"Bank from cash_bin import CashBin from card_reader import CardReader class ControlType(Enum): SeeBalance =",
"= pin def authentication(self) -> Tuple[bool, str]: self.card_number = self.card_reader.get_card_number() self.accounts = self.bank.check_pin(self.card_number,",
"+ delta < 0: self.finish() return (False, \"계좌에 잔고가 부족합니다.\", -1) balance =",
"-> Tuple[bool, str]: pass @abstractmethod def control_account( self, control: ControlType, delta: int =",
"cash_bin self.card_reader = card_reader self.accounts = None self.pin = None self.card_number = None",
"개선 필요 # 카드 번호, 계좌가 없을시 atmcontroller의 control_account와 bank의 control_balnce에서 동시에 채크함",
"self.pin = pin def authentication(self) -> Tuple[bool, str]: self.card_number = self.card_reader.get_card_number() self.accounts =",
"bank self.cash_bin = cash_bin self.card_reader = card_reader self.accounts = None self.pin = None",
"pass def finish(self) -> None: self.accounts = None self.pin = None self.card_number =",
"양의 정수만 입력된다고 가정 if control == ControlType.Withdraw: delta = delta * -1",
"\"잔고 출력\") Deposit = (1, \"입금\") Withdraw = (2, \"출금\") def __init__(self, code:",
"계좌 입니다.\") def control_account( self, control: ControlType, delta: int = 0 ) ->",
"self.desc = desc class AbstractAtmController(metaclass=ABCMeta): def __init__(self, bank: Bank, cash_bin: CashBin, card_reader: CardReader)",
"< 0: self.finish() return (False, \"현금통에 현금이 부족합니다.\", -1) if self.bank.get_balance(self.card_number, self.account) +",
"def select_account(self, account: str) -> Tuple[bool, str]: pass @abstractmethod def control_account( self, control:",
"class AtmController(AbstractAtmController): def input_pin(self, pin: str) -> None: self.pin = pin def authentication(self)",
"self.finish() return (False, \"인증 실패.\") else: return (True, \"성공\") def select_account(self, account: str)",
"None: self.bank = bank self.cash_bin = cash_bin self.card_reader = card_reader self.accounts = None",
"-1) if self.bank.get_balance(self.card_number, self.account) + delta < 0: self.finish() return (False, \"계좌에 잔고가",
"= 0 ) -> Tuple[bool, str, int]: pass def finish(self) -> None: self.accounts",
"delta < 0: self.finish() return (False, \"계좌에 잔고가 부족합니다.\", -1) balance = self.bank.control_balance(self.card_number,",
"card_reader: CardReader) -> None: self.bank = bank self.cash_bin = cash_bin self.card_reader = card_reader",
"CardReader class ControlType(Enum): SeeBalance = (0, \"잔고 출력\") Deposit = (1, \"입금\") Withdraw",
"pin: str) -> None: self.pin = pin def authentication(self) -> Tuple[bool, str]: self.card_number",
"= None self.card_number = None self.account = None class AtmController(AbstractAtmController): def input_pin(self, pin:",
"(True, \"성공\", self.bank.get_balance(self.card_number, self.account)) elif control == ControlType.Deposit or control == ControlType.Withdraw: #",
"Bank, cash_bin: CashBin, card_reader: CardReader) -> None: self.bank = bank self.cash_bin = cash_bin",
"None self.pin = None self.card_number = None self.account = None @abstractmethod def input_pin(self,",
"Tuple[bool, str, int]: pass def finish(self) -> None: self.accounts = None self.pin =",
"않습니다.\", -1) if control == ControlType.SeeBalance: result = (True, \"성공\", self.bank.get_balance(self.card_number, self.account)) elif",
"self.card_number = None self.account = None @abstractmethod def input_pin(self, pin: str) -> None:",
"select_account(self, account: str) -> Tuple[bool, str]: pass @abstractmethod def control_account( self, control: ControlType,",
"# 중복된 예외처리 존재, 개선 필요 # 카드 번호, 계좌가 없을시 atmcontroller의 control_account와",
"account return (True, \"성공\") else: self.finish() return (False, \"존재하지 않는 계좌 입니다.\") def",
"balance) else: result = (True, \"성공\", balance) else: result = (False, \"잘못된 제어구문입니다.\",",
"else: return (True, \"성공\") def select_account(self, account: str) -> Tuple[bool, str]: if account",
"self.card_number = self.card_reader.get_card_number() self.accounts = self.bank.check_pin(self.card_number, self.pin) if self.accounts is None: self.finish() return",
"= (0, \"잔고 출력\") Deposit = (1, \"입금\") Withdraw = (2, \"출금\") def",
"int, desc: str) -> None: self.code = code self.desc = desc class AbstractAtmController(metaclass=ABCMeta):",
"str, int]: pass def finish(self) -> None: self.accounts = None self.pin = None",
"\"현금통에 현금이 부족합니다.\", -1) if self.bank.get_balance(self.card_number, self.account) + delta < 0: self.finish() return",
"-1 if self.cash_bin.get_balance() + delta < 0: self.finish() return (False, \"현금통에 현금이 부족합니다.\",",
"str]: if account in self.accounts: self.account = account return (True, \"성공\") else: self.finish()",
"if control == ControlType.SeeBalance: result = (True, \"성공\", self.bank.get_balance(self.card_number, self.account)) elif control ==",
"않는 계좌 입니다.\") def control_account( self, control: ControlType, delta: int = 0 )",
"self.card_reader = card_reader self.accounts = None self.pin = None self.card_number = None self.account",
"in self.accounts: self.account = account return (True, \"성공\") else: self.finish() return (False, \"존재하지",
"authentication(self) -> Tuple[bool, str]: self.card_number = self.card_reader.get_card_number() self.accounts = self.bank.check_pin(self.card_number, self.pin) if self.accounts",
"0 ) -> Tuple[bool, str, int]: pass def finish(self) -> None: self.accounts =",
"출력\") Deposit = (1, \"입금\") Withdraw = (2, \"출금\") def __init__(self, code: int,",
"cash_bin import CashBin from card_reader import CardReader class ControlType(Enum): SeeBalance = (0, \"잔고",
"delta * -1 if self.cash_bin.get_balance() + delta < 0: self.finish() return (False, \"현금통에",
"self, control: ControlType, delta: int = 0 ) -> Tuple[bool, str, int]: pass",
"\"계좌에 잔고가 부족합니다.\", -1) balance = self.bank.control_balance(self.card_number, self.account, delta) if balance < 0:",
"= self.bank.check_pin(self.card_number, self.pin) if self.accounts is None: self.finish() return (False, \"인증 실패.\") else:",
"= cash_bin self.card_reader = card_reader self.accounts = None self.pin = None self.card_number =",
"control_balnce에서 동시에 채크함 if self.card_number is None or self.account is None: self.finish() return",
"= (True, \"성공\", balance) else: result = (False, \"잘못된 제어구문입니다.\", -1) self.finish() return",
"AbstractAtmController(metaclass=ABCMeta): def __init__(self, bank: Bank, cash_bin: CashBin, card_reader: CardReader) -> None: self.bank =",
"int = 0 ) -> Tuple[bool, str, int]: pass def finish(self) -> None:",
"return (False, \"카드번호 혹은 계좌가 존재하지 않습니다.\", -1) if control == ControlType.SeeBalance: result",
"else: self.finish() return (False, \"존재하지 않는 계좌 입니다.\") def control_account( self, control: ControlType,",
"(2, \"출금\") def __init__(self, code: int, desc: str) -> None: self.code = code",
"self.accounts = None self.pin = None self.card_number = None self.account = None @abstractmethod",
"pin def authentication(self) -> Tuple[bool, str]: self.card_number = self.card_reader.get_card_number() self.accounts = self.bank.check_pin(self.card_number, self.pin)",
"finish(self) -> None: self.accounts = None self.pin = None self.card_number = None self.account",
"존재하지 않습니다.\", -1) if control == ControlType.SeeBalance: result = (True, \"성공\", self.bank.get_balance(self.card_number, self.account))",
"code self.desc = desc class AbstractAtmController(metaclass=ABCMeta): def __init__(self, bank: Bank, cash_bin: CashBin, card_reader:",
"= card_reader self.accounts = None self.pin = None self.card_number = None self.account =",
"self.pin = None self.card_number = None self.account = None class AtmController(AbstractAtmController): def input_pin(self,"
] |
[
"Created on 2015-10-10 @author: Devuser ''' class ProjectFortestingList(object): def __init__(self,fullpart,isversion,fortestings): self.fullpart=fullpart self.isversion=isversion self.fortestings=fortestings",
"#coding=utf-8 ''' Created on 2015-10-10 @author: Devuser ''' class ProjectFortestingList(object): def __init__(self,fullpart,isversion,fortestings): self.fullpart=fullpart",
"''' Created on 2015-10-10 @author: Devuser ''' class ProjectFortestingList(object): def __init__(self,fullpart,isversion,fortestings): self.fullpart=fullpart self.isversion=isversion"
] |
[
"_, servers = role.rpartition(\"@\") if not as_user or not servers: continue if as_user",
"request.POST.getall('remove') group = Group.visible(request).filter(Group.name == name).first() if not group: return O.error(msg=\"Group is not",
"if not group: return O.error(msg=\"Group is not available\") for role in rm_roles: as_user,",
"add_roles = request.POST.getall('add') rm_roles = request.POST.getall('remove') group = Group.visible(request).filter(Group.name == name).first() if not",
"u in Group.visible(request).all()] return O._anon(groups=groups, quota=dict(allowed=request.user.tier.groups)) @groups.when(method='POST', template='json') @check_policy('is_admin') @groups.wrap_create() def add_group(self, name,",
"if not Role.is_valid(as_user): errs.append(as_user) if errs: if len(errs) == 1: return O.error(msg=\"The role",
"express permission of CloudRunner.io # *******************************************************/ import logging from pecan import expose, request",
"as_user and r.servers == servers] for r in roles: request.db.delete(r) request.db.commit() errs =",
"\"@\" r = Role(as_user=as_user, servers=servers, group=group) try: request.db.add(r) request.db.commit() except IntegrityError: request.db.rollback() @groups.when(method='DELETE',",
"as_user or not servers: continue if as_user == \"*\": as_user = \"@\" roles",
"request.db.commit() except IntegrityError: request.db.rollback() @groups.when(method='DELETE', template='json') @check_policy('is_admin') @groups.wrap_delete() def rm_group(self, name, *args): group",
"def groups(self, name=None, *args): def modifier(roles): return [dict(as_user=role.as_user, servers=role.servers) for role in roles]",
"r.as_user == as_user and r.servers == servers] for r in roles: request.db.delete(r) request.db.commit()",
"return O.error(msg=\"The following roles are not valid: %s\" % \", \".join(errs)) for role",
"== request.user.org).one() group = Group(name=name, org=org) request.db.add(group) request.db.commit() @groups.when(method='PUT', template='json') @check_policy('is_admin') @groups.wrap_modify() def",
"CloudRunner Server can not be copied and/or distributed # * without the express",
"role '%s' is not valid\" % errs[0]) else: return O.error(msg=\"The following roles are",
"*args, **kwargs): name = name or kwargs['name'] org = request.db.query(Org).filter( Org.name == request.user.org).one()",
"roles = [r for r in group.roles if r.as_user == as_user and r.servers",
"modify_group_roles(self, name, *args, **kwargs): name = name or kwargs['name'] add_roles = request.POST.getall('add') rm_roles",
"\"*\": as_user = \"@\" roles = [r for r in group.roles if r.as_user",
"'org_id'], rel=[('roles', 'roles', modifier)]) for u in Group.visible(request).all()] return O._anon(groups=groups, quota=dict(allowed=request.user.tier.groups)) @groups.when(method='POST', template='json')",
"IntegrityError from cloudrunner_server.api.decorators import wrap_command from cloudrunner_server.api.model import Group, Org, Role from cloudrunner_server.api.policy.decorators",
"and/or distributed # * without the express permission of CloudRunner.io # *******************************************************/ import",
"vim: tabstop=4 shiftwidth=4 softtabstop=4 # /******************************************************* # * Copyright (C) 2013-2014 CloudRunner.io <<EMAIL>>",
"# * Proprietary and confidential # * This file is part of CloudRunner",
"as_user = \"@\" roles = [r for r in group.roles if r.as_user ==",
"from sqlalchemy.exc import IntegrityError from cloudrunner_server.api.decorators import wrap_command from cloudrunner_server.api.model import Group, Org,",
"cloudrunner_server.api.decorators import wrap_command from cloudrunner_server.api.model import Group, Org, Role from cloudrunner_server.api.policy.decorators import check_policy",
"import Group, Org, Role from cloudrunner_server.api.policy.decorators import check_policy from cloudrunner_server.api.util import JsonOutput as",
"quota=dict(allowed=request.user.tier.groups)) @groups.when(method='POST', template='json') @check_policy('is_admin') @groups.wrap_create() def add_group(self, name, *args, **kwargs): name = name",
"logging.getLogger() class Groups(object): @expose('json', generic=True) @check_policy('is_admin') @wrap_command(Group) def groups(self, name=None, *args): def modifier(roles):",
"role.rpartition(\"@\") if not Role.is_valid(as_user): errs.append(as_user) if errs: if len(errs) == 1: return O.error(msg=\"The",
"if errs: if len(errs) == 1: return O.error(msg=\"The role '%s' is not valid\"",
"role in add_roles: as_user, _, servers = role.rpartition(\"@\") if not Role.is_valid(as_user): errs.append(as_user) if",
"rel=[('roles', 'roles', modifier)]) for u in Group.visible(request).all()] return O._anon(groups=groups, quota=dict(allowed=request.user.tier.groups)) @groups.when(method='POST', template='json') @check_policy('is_admin')",
"role in roles] if name: group = Group.visible(request).filter(Group.name == name).first() return O.group(group.serialize( skip=['id',",
"and r.servers == servers] for r in roles: request.db.delete(r) request.db.commit() errs = []",
"group = Group.visible(request).filter(Group.name == name).first() if not group: return O.error(msg=\"Group not found\") request.db.delete(group)",
"return O.error(msg=\"The role '%s' is not valid\" % errs[0]) else: return O.error(msg=\"The following",
"name, *args, **kwargs): name = name or kwargs['name'] org = request.db.query(Org).filter( Org.name ==",
"as_user or not servers: continue if as_user == \"*\": as_user = \"@\" r",
"name).first() if not group: return O.error(msg=\"Group is not available\") for role in rm_roles:",
"template='json') @check_policy('is_admin') @groups.wrap_delete() def rm_group(self, name, *args): group = Group.visible(request).filter(Group.name == name).first() if",
"servers] for r in roles: request.db.delete(r) request.db.commit() errs = [] for role in",
"errs = [] for role in add_roles: as_user, _, servers = role.rpartition(\"@\") if",
"# * CloudRunner Server can not be copied and/or distributed # * without",
"for role in add_roles: as_user, _, servers = role.rpartition(\"@\") if not Role.is_valid(as_user): errs.append(as_user)",
"**kwargs): name = name or kwargs['name'] org = request.db.query(Org).filter( Org.name == request.user.org).one() group",
"tabstop=4 shiftwidth=4 softtabstop=4 # /******************************************************* # * Copyright (C) 2013-2014 CloudRunner.io <<EMAIL>> #",
"rm_group(self, name, *args): group = Group.visible(request).filter(Group.name == name).first() if not group: return O.error(msg=\"Group",
"= \"@\" r = Role(as_user=as_user, servers=servers, group=group) try: request.db.add(r) request.db.commit() except IntegrityError: request.db.rollback()",
"# -*- coding: utf-8 -*- # vim: tabstop=4 shiftwidth=4 softtabstop=4 # /******************************************************* #",
"len(errs) == 1: return O.error(msg=\"The role '%s' is not valid\" % errs[0]) else:",
"\".join(errs)) for role in add_roles: as_user, _, servers = role.rpartition(\"@\") if not as_user",
"confidential # * This file is part of CloudRunner Server. # * #",
"valid\" % errs[0]) else: return O.error(msg=\"The following roles are not valid: %s\" %",
"@expose('json', generic=True) @check_policy('is_admin') @wrap_command(Group) def groups(self, name=None, *args): def modifier(roles): return [dict(as_user=role.as_user, servers=role.servers)",
"*args): group = Group.visible(request).filter(Group.name == name).first() if not group: return O.error(msg=\"Group not found\")",
"in group.roles if r.as_user == as_user and r.servers == servers] for r in",
"* # * CloudRunner Server can not be copied and/or distributed # *",
"*******************************************************/ import logging from pecan import expose, request # noqa from sqlalchemy.exc import",
"utf-8 -*- # vim: tabstop=4 shiftwidth=4 softtabstop=4 # /******************************************************* # * Copyright (C)",
"= \"@\" roles = [r for r in group.roles if r.as_user == as_user",
"if as_user == \"*\": as_user = \"@\" r = Role(as_user=as_user, servers=servers, group=group) try:",
"_, servers = role.rpartition(\"@\") if not Role.is_valid(as_user): errs.append(as_user) if errs: if len(errs) ==",
"O.error(msg=\"The role '%s' is not valid\" % errs[0]) else: return O.error(msg=\"The following roles",
"Role(as_user=as_user, servers=servers, group=group) try: request.db.add(r) request.db.commit() except IntegrityError: request.db.rollback() @groups.when(method='DELETE', template='json') @check_policy('is_admin') @groups.wrap_delete()",
"not valid\" % errs[0]) else: return O.error(msg=\"The following roles are not valid: %s\"",
"kwargs['name'] add_roles = request.POST.getall('add') rm_roles = request.POST.getall('remove') group = Group.visible(request).filter(Group.name == name).first() if",
"1: return O.error(msg=\"The role '%s' is not valid\" % errs[0]) else: return O.error(msg=\"The",
"in rm_roles: as_user, _, servers = role.rpartition(\"@\") if not as_user or not servers:",
"be copied and/or distributed # * without the express permission of CloudRunner.io #",
"is part of CloudRunner Server. # * # * CloudRunner Server can not",
"O LOG = logging.getLogger() class Groups(object): @expose('json', generic=True) @check_policy('is_admin') @wrap_command(Group) def groups(self, name=None,",
"O._anon(groups=groups, quota=dict(allowed=request.user.tier.groups)) @groups.when(method='POST', template='json') @check_policy('is_admin') @groups.wrap_create() def add_group(self, name, *args, **kwargs): name =",
"of CloudRunner Server. # * # * CloudRunner Server can not be copied",
"Proprietary and confidential # * This file is part of CloudRunner Server. #",
"servers=role.servers) for role in roles] if name: group = Group.visible(request).filter(Group.name == name).first() return",
"for u in Group.visible(request).all()] return O._anon(groups=groups, quota=dict(allowed=request.user.tier.groups)) @groups.when(method='POST', template='json') @check_policy('is_admin') @groups.wrap_create() def add_group(self,",
"return O.group(group.serialize( skip=['id', 'org_id'], rel=[('roles', 'roles', modifier)])) else: groups = [u.serialize( skip=['id', 'org_id'],",
"add_roles: as_user, _, servers = role.rpartition(\"@\") if not as_user or not servers: continue",
"Role from cloudrunner_server.api.policy.decorators import check_policy from cloudrunner_server.api.util import JsonOutput as O LOG =",
"request.db.commit() @groups.when(method='PUT', template='json') @check_policy('is_admin') @groups.wrap_modify() def modify_group_roles(self, name, *args, **kwargs): name = name",
"# *******************************************************/ import logging from pecan import expose, request # noqa from sqlalchemy.exc",
"== servers] for r in roles: request.db.delete(r) request.db.commit() errs = [] for role",
"valid: %s\" % \", \".join(errs)) for role in add_roles: as_user, _, servers =",
"# * # * Proprietary and confidential # * This file is part",
"name).first() return O.group(group.serialize( skip=['id', 'org_id'], rel=[('roles', 'roles', modifier)])) else: groups = [u.serialize( skip=['id',",
"def modifier(roles): return [dict(as_user=role.as_user, servers=role.servers) for role in roles] if name: group =",
"request.POST.getall('add') rm_roles = request.POST.getall('remove') group = Group.visible(request).filter(Group.name == name).first() if not group: return",
"CloudRunner.io # *******************************************************/ import logging from pecan import expose, request # noqa from",
"request.db.delete(r) request.db.commit() errs = [] for role in add_roles: as_user, _, servers =",
"= [r for r in group.roles if r.as_user == as_user and r.servers ==",
"group.roles if r.as_user == as_user and r.servers == servers] for r in roles:",
"org = request.db.query(Org).filter( Org.name == request.user.org).one() group = Group(name=name, org=org) request.db.add(group) request.db.commit() @groups.when(method='PUT',",
"* CloudRunner Server can not be copied and/or distributed # * without the",
"group = Group.visible(request).filter(Group.name == name).first() return O.group(group.serialize( skip=['id', 'org_id'], rel=[('roles', 'roles', modifier)])) else:",
"else: groups = [u.serialize( skip=['id', 'org_id'], rel=[('roles', 'roles', modifier)]) for u in Group.visible(request).all()]",
"O.error(msg=\"Group is not available\") for role in rm_roles: as_user, _, servers = role.rpartition(\"@\")",
"or not servers: continue if as_user == \"*\": as_user = \"@\" r =",
"coding: utf-8 -*- # vim: tabstop=4 shiftwidth=4 softtabstop=4 # /******************************************************* # * Copyright",
"cloudrunner_server.api.util import JsonOutput as O LOG = logging.getLogger() class Groups(object): @expose('json', generic=True) @check_policy('is_admin')",
"@groups.wrap_create() def add_group(self, name, *args, **kwargs): name = name or kwargs['name'] org =",
"# noqa from sqlalchemy.exc import IntegrityError from cloudrunner_server.api.decorators import wrap_command from cloudrunner_server.api.model import",
"import wrap_command from cloudrunner_server.api.model import Group, Org, Role from cloudrunner_server.api.policy.decorators import check_policy from",
"request.db.add(group) request.db.commit() @groups.when(method='PUT', template='json') @check_policy('is_admin') @groups.wrap_modify() def modify_group_roles(self, name, *args, **kwargs): name =",
"and confidential # * This file is part of CloudRunner Server. # *",
"name or kwargs['name'] add_roles = request.POST.getall('add') rm_roles = request.POST.getall('remove') group = Group.visible(request).filter(Group.name ==",
"expose, request # noqa from sqlalchemy.exc import IntegrityError from cloudrunner_server.api.decorators import wrap_command from",
"= role.rpartition(\"@\") if not Role.is_valid(as_user): errs.append(as_user) if errs: if len(errs) == 1: return",
"add_group(self, name, *args, **kwargs): name = name or kwargs['name'] org = request.db.query(Org).filter( Org.name",
"check_policy from cloudrunner_server.api.util import JsonOutput as O LOG = logging.getLogger() class Groups(object): @expose('json',",
"for r in group.roles if r.as_user == as_user and r.servers == servers] for",
"= logging.getLogger() class Groups(object): @expose('json', generic=True) @check_policy('is_admin') @wrap_command(Group) def groups(self, name=None, *args): def",
"servers: continue if as_user == \"*\": as_user = \"@\" roles = [r for",
"-*- coding: utf-8 -*- # vim: tabstop=4 shiftwidth=4 softtabstop=4 # /******************************************************* # *",
"for role in add_roles: as_user, _, servers = role.rpartition(\"@\") if not as_user or",
"# vim: tabstop=4 shiftwidth=4 softtabstop=4 # /******************************************************* # * Copyright (C) 2013-2014 CloudRunner.io",
"* without the express permission of CloudRunner.io # *******************************************************/ import logging from pecan",
"2013-2014 CloudRunner.io <<EMAIL>> # * # * Proprietary and confidential # * This",
"name: group = Group.visible(request).filter(Group.name == name).first() return O.group(group.serialize( skip=['id', 'org_id'], rel=[('roles', 'roles', modifier)]))",
"Groups(object): @expose('json', generic=True) @check_policy('is_admin') @wrap_command(Group) def groups(self, name=None, *args): def modifier(roles): return [dict(as_user=role.as_user,",
"= Group.visible(request).filter(Group.name == name).first() if not group: return O.error(msg=\"Group not found\") request.db.delete(group) request.db.commit()",
"add_roles: as_user, _, servers = role.rpartition(\"@\") if not Role.is_valid(as_user): errs.append(as_user) if errs: if",
"try: request.db.add(r) request.db.commit() except IntegrityError: request.db.rollback() @groups.when(method='DELETE', template='json') @check_policy('is_admin') @groups.wrap_delete() def rm_group(self, name,",
"Role.is_valid(as_user): errs.append(as_user) if errs: if len(errs) == 1: return O.error(msg=\"The role '%s' is",
"<<EMAIL>> # * # * Proprietary and confidential # * This file is",
"from cloudrunner_server.api.decorators import wrap_command from cloudrunner_server.api.model import Group, Org, Role from cloudrunner_server.api.policy.decorators import",
"following roles are not valid: %s\" % \", \".join(errs)) for role in add_roles:",
"roles: request.db.delete(r) request.db.commit() errs = [] for role in add_roles: as_user, _, servers",
"= Group.visible(request).filter(Group.name == name).first() return O.group(group.serialize( skip=['id', 'org_id'], rel=[('roles', 'roles', modifier)])) else: groups",
"[] for role in add_roles: as_user, _, servers = role.rpartition(\"@\") if not Role.is_valid(as_user):",
"import logging from pecan import expose, request # noqa from sqlalchemy.exc import IntegrityError",
"skip=['id', 'org_id'], rel=[('roles', 'roles', modifier)]) for u in Group.visible(request).all()] return O._anon(groups=groups, quota=dict(allowed=request.user.tier.groups)) @groups.when(method='POST',",
"class Groups(object): @expose('json', generic=True) @check_policy('is_admin') @wrap_command(Group) def groups(self, name=None, *args): def modifier(roles): return",
"= request.db.query(Org).filter( Org.name == request.user.org).one() group = Group(name=name, org=org) request.db.add(group) request.db.commit() @groups.when(method='PUT', template='json')",
"org=org) request.db.add(group) request.db.commit() @groups.when(method='PUT', template='json') @check_policy('is_admin') @groups.wrap_modify() def modify_group_roles(self, name, *args, **kwargs): name",
"[r for r in group.roles if r.as_user == as_user and r.servers == servers]",
"@check_policy('is_admin') @wrap_command(Group) def groups(self, name=None, *args): def modifier(roles): return [dict(as_user=role.as_user, servers=role.servers) for role",
"@check_policy('is_admin') @groups.wrap_create() def add_group(self, name, *args, **kwargs): name = name or kwargs['name'] org",
"not group: return O.error(msg=\"Group is not available\") for role in rm_roles: as_user, _,",
"name = name or kwargs['name'] add_roles = request.POST.getall('add') rm_roles = request.POST.getall('remove') group =",
"request.db.rollback() @groups.when(method='DELETE', template='json') @check_policy('is_admin') @groups.wrap_delete() def rm_group(self, name, *args): group = Group.visible(request).filter(Group.name ==",
"@check_policy('is_admin') @groups.wrap_modify() def modify_group_roles(self, name, *args, **kwargs): name = name or kwargs['name'] add_roles",
"This file is part of CloudRunner Server. # * # * CloudRunner Server",
"== 1: return O.error(msg=\"The role '%s' is not valid\" % errs[0]) else: return",
"not available\") for role in rm_roles: as_user, _, servers = role.rpartition(\"@\") if not",
"@groups.when(method='DELETE', template='json') @check_policy('is_admin') @groups.wrap_delete() def rm_group(self, name, *args): group = Group.visible(request).filter(Group.name == name).first()",
"or not servers: continue if as_user == \"*\": as_user = \"@\" roles =",
"'roles', modifier)]) for u in Group.visible(request).all()] return O._anon(groups=groups, quota=dict(allowed=request.user.tier.groups)) @groups.when(method='POST', template='json') @check_policy('is_admin') @groups.wrap_create()",
"import JsonOutput as O LOG = logging.getLogger() class Groups(object): @expose('json', generic=True) @check_policy('is_admin') @wrap_command(Group)",
"errs[0]) else: return O.error(msg=\"The following roles are not valid: %s\" % \", \".join(errs))",
"group=group) try: request.db.add(r) request.db.commit() except IntegrityError: request.db.rollback() @groups.when(method='DELETE', template='json') @check_policy('is_admin') @groups.wrap_delete() def rm_group(self,",
"%s\" % \", \".join(errs)) for role in add_roles: as_user, _, servers = role.rpartition(\"@\")",
"if name: group = Group.visible(request).filter(Group.name == name).first() return O.group(group.serialize( skip=['id', 'org_id'], rel=[('roles', 'roles',",
"name, *args): group = Group.visible(request).filter(Group.name == name).first() if not group: return O.error(msg=\"Group not",
"in roles: request.db.delete(r) request.db.commit() errs = [] for role in add_roles: as_user, _,",
"== as_user and r.servers == servers] for r in roles: request.db.delete(r) request.db.commit() errs",
"not be copied and/or distributed # * without the express permission of CloudRunner.io",
"template='json') @check_policy('is_admin') @groups.wrap_create() def add_group(self, name, *args, **kwargs): name = name or kwargs['name']",
"request.user.org).one() group = Group(name=name, org=org) request.db.add(group) request.db.commit() @groups.when(method='PUT', template='json') @check_policy('is_admin') @groups.wrap_modify() def modify_group_roles(self,",
"name = name or kwargs['name'] org = request.db.query(Org).filter( Org.name == request.user.org).one() group =",
"for role in rm_roles: as_user, _, servers = role.rpartition(\"@\") if not as_user or",
"request.db.add(r) request.db.commit() except IntegrityError: request.db.rollback() @groups.when(method='DELETE', template='json') @check_policy('is_admin') @groups.wrap_delete() def rm_group(self, name, *args):",
"# * # * CloudRunner Server can not be copied and/or distributed #",
"import expose, request # noqa from sqlalchemy.exc import IntegrityError from cloudrunner_server.api.decorators import wrap_command",
"Group(name=name, org=org) request.db.add(group) request.db.commit() @groups.when(method='PUT', template='json') @check_policy('is_admin') @groups.wrap_modify() def modify_group_roles(self, name, *args, **kwargs):",
"# * without the express permission of CloudRunner.io # *******************************************************/ import logging from",
"Server. # * # * CloudRunner Server can not be copied and/or distributed",
"wrap_command from cloudrunner_server.api.model import Group, Org, Role from cloudrunner_server.api.policy.decorators import check_policy from cloudrunner_server.api.util",
"**kwargs): name = name or kwargs['name'] add_roles = request.POST.getall('add') rm_roles = request.POST.getall('remove') group",
"@groups.when(method='POST', template='json') @check_policy('is_admin') @groups.wrap_create() def add_group(self, name, *args, **kwargs): name = name or",
"noqa from sqlalchemy.exc import IntegrityError from cloudrunner_server.api.decorators import wrap_command from cloudrunner_server.api.model import Group,",
"= request.POST.getall('remove') group = Group.visible(request).filter(Group.name == name).first() if not group: return O.error(msg=\"Group is",
"\"@\" roles = [r for r in group.roles if r.as_user == as_user and",
"import check_policy from cloudrunner_server.api.util import JsonOutput as O LOG = logging.getLogger() class Groups(object):",
"-*- # vim: tabstop=4 shiftwidth=4 softtabstop=4 # /******************************************************* # * Copyright (C) 2013-2014",
"is not available\") for role in rm_roles: as_user, _, servers = role.rpartition(\"@\") if",
"(C) 2013-2014 CloudRunner.io <<EMAIL>> # * # * Proprietary and confidential # *",
"for role in roles] if name: group = Group.visible(request).filter(Group.name == name).first() return O.group(group.serialize(",
"template='json') @check_policy('is_admin') @groups.wrap_modify() def modify_group_roles(self, name, *args, **kwargs): name = name or kwargs['name']",
"CloudRunner Server. # * # * CloudRunner Server can not be copied and/or",
"in roles] if name: group = Group.visible(request).filter(Group.name == name).first() return O.group(group.serialize( skip=['id', 'org_id'],",
"== name).first() if not group: return O.error(msg=\"Group is not available\") for role in",
"in add_roles: as_user, _, servers = role.rpartition(\"@\") if not Role.is_valid(as_user): errs.append(as_user) if errs:",
"group: return O.error(msg=\"Group is not available\") for role in rm_roles: as_user, _, servers",
"sqlalchemy.exc import IntegrityError from cloudrunner_server.api.decorators import wrap_command from cloudrunner_server.api.model import Group, Org, Role",
"of CloudRunner.io # *******************************************************/ import logging from pecan import expose, request # noqa",
"pecan import expose, request # noqa from sqlalchemy.exc import IntegrityError from cloudrunner_server.api.decorators import",
"r in group.roles if r.as_user == as_user and r.servers == servers] for r",
"def add_group(self, name, *args, **kwargs): name = name or kwargs['name'] org = request.db.query(Org).filter(",
"rel=[('roles', 'roles', modifier)])) else: groups = [u.serialize( skip=['id', 'org_id'], rel=[('roles', 'roles', modifier)]) for",
"logging from pecan import expose, request # noqa from sqlalchemy.exc import IntegrityError from",
"JsonOutput as O LOG = logging.getLogger() class Groups(object): @expose('json', generic=True) @check_policy('is_admin') @wrap_command(Group) def",
"groups(self, name=None, *args): def modifier(roles): return [dict(as_user=role.as_user, servers=role.servers) for role in roles] if",
"distributed # * without the express permission of CloudRunner.io # *******************************************************/ import logging",
"= Group.visible(request).filter(Group.name == name).first() if not group: return O.error(msg=\"Group is not available\") for",
"Copyright (C) 2013-2014 CloudRunner.io <<EMAIL>> # * # * Proprietary and confidential #",
"name or kwargs['name'] org = request.db.query(Org).filter( Org.name == request.user.org).one() group = Group(name=name, org=org)",
"= name or kwargs['name'] org = request.db.query(Org).filter( Org.name == request.user.org).one() group = Group(name=name,",
"are not valid: %s\" % \", \".join(errs)) for role in add_roles: as_user, _,",
"# * Copyright (C) 2013-2014 CloudRunner.io <<EMAIL>> # * # * Proprietary and",
"or kwargs['name'] org = request.db.query(Org).filter( Org.name == request.user.org).one() group = Group(name=name, org=org) request.db.add(group)",
"group = Group.visible(request).filter(Group.name == name).first() if not group: return O.error(msg=\"Group is not available\")",
"in Group.visible(request).all()] return O._anon(groups=groups, quota=dict(allowed=request.user.tier.groups)) @groups.when(method='POST', template='json') @check_policy('is_admin') @groups.wrap_create() def add_group(self, name, *args,",
"name=None, *args): def modifier(roles): return [dict(as_user=role.as_user, servers=role.servers) for role in roles] if name:",
"file is part of CloudRunner Server. # * # * CloudRunner Server can",
"[u.serialize( skip=['id', 'org_id'], rel=[('roles', 'roles', modifier)]) for u in Group.visible(request).all()] return O._anon(groups=groups, quota=dict(allowed=request.user.tier.groups))",
"O.error(msg=\"The following roles are not valid: %s\" % \", \".join(errs)) for role in",
"part of CloudRunner Server. # * # * CloudRunner Server can not be",
"as_user, _, servers = role.rpartition(\"@\") if not Role.is_valid(as_user): errs.append(as_user) if errs: if len(errs)",
"modifier(roles): return [dict(as_user=role.as_user, servers=role.servers) for role in roles] if name: group = Group.visible(request).filter(Group.name",
"servers=servers, group=group) try: request.db.add(r) request.db.commit() except IntegrityError: request.db.rollback() @groups.when(method='DELETE', template='json') @check_policy('is_admin') @groups.wrap_delete() def",
"* # * Proprietary and confidential # * This file is part of",
"Server can not be copied and/or distributed # * without the express permission",
"\"*\": as_user = \"@\" r = Role(as_user=as_user, servers=servers, group=group) try: request.db.add(r) request.db.commit() except",
"def rm_group(self, name, *args): group = Group.visible(request).filter(Group.name == name).first() if not group: return",
"if r.as_user == as_user and r.servers == servers] for r in roles: request.db.delete(r)",
"* Proprietary and confidential # * This file is part of CloudRunner Server.",
"= [u.serialize( skip=['id', 'org_id'], rel=[('roles', 'roles', modifier)]) for u in Group.visible(request).all()] return O._anon(groups=groups,",
"#!/usr/bin/python # -*- coding: utf-8 -*- # vim: tabstop=4 shiftwidth=4 softtabstop=4 # /*******************************************************",
"# /******************************************************* # * Copyright (C) 2013-2014 CloudRunner.io <<EMAIL>> # * # *",
"not servers: continue if as_user == \"*\": as_user = \"@\" roles = [r",
"not Role.is_valid(as_user): errs.append(as_user) if errs: if len(errs) == 1: return O.error(msg=\"The role '%s'",
"role in rm_roles: as_user, _, servers = role.rpartition(\"@\") if not as_user or not",
"O.group(group.serialize( skip=['id', 'org_id'], rel=[('roles', 'roles', modifier)])) else: groups = [u.serialize( skip=['id', 'org_id'], rel=[('roles',",
"*args, **kwargs): name = name or kwargs['name'] add_roles = request.POST.getall('add') rm_roles = request.POST.getall('remove')",
"cloudrunner_server.api.model import Group, Org, Role from cloudrunner_server.api.policy.decorators import check_policy from cloudrunner_server.api.util import JsonOutput",
"return O._anon(groups=groups, quota=dict(allowed=request.user.tier.groups)) @groups.when(method='POST', template='json') @check_policy('is_admin') @groups.wrap_create() def add_group(self, name, *args, **kwargs): name",
"* Copyright (C) 2013-2014 CloudRunner.io <<EMAIL>> # * # * Proprietary and confidential",
"cloudrunner_server.api.policy.decorators import check_policy from cloudrunner_server.api.util import JsonOutput as O LOG = logging.getLogger() class",
"continue if as_user == \"*\": as_user = \"@\" roles = [r for r",
"request # noqa from sqlalchemy.exc import IntegrityError from cloudrunner_server.api.decorators import wrap_command from cloudrunner_server.api.model",
"r.servers == servers] for r in roles: request.db.delete(r) request.db.commit() errs = [] for",
"@check_policy('is_admin') @groups.wrap_delete() def rm_group(self, name, *args): group = Group.visible(request).filter(Group.name == name).first() if not",
"or kwargs['name'] add_roles = request.POST.getall('add') rm_roles = request.POST.getall('remove') group = Group.visible(request).filter(Group.name == name).first()",
"if len(errs) == 1: return O.error(msg=\"The role '%s' is not valid\" % errs[0])",
"r in roles: request.db.delete(r) request.db.commit() errs = [] for role in add_roles: as_user,",
"= [] for role in add_roles: as_user, _, servers = role.rpartition(\"@\") if not",
"@groups.when(method='PUT', template='json') @check_policy('is_admin') @groups.wrap_modify() def modify_group_roles(self, name, *args, **kwargs): name = name or",
"modifier)]) for u in Group.visible(request).all()] return O._anon(groups=groups, quota=dict(allowed=request.user.tier.groups)) @groups.when(method='POST', template='json') @check_policy('is_admin') @groups.wrap_create() def",
"roles are not valid: %s\" % \", \".join(errs)) for role in add_roles: as_user,",
"= role.rpartition(\"@\") if not as_user or not servers: continue if as_user == \"*\":",
"% errs[0]) else: return O.error(msg=\"The following roles are not valid: %s\" % \",",
"% \", \".join(errs)) for role in add_roles: as_user, _, servers = role.rpartition(\"@\") if",
"without the express permission of CloudRunner.io # *******************************************************/ import logging from pecan import",
"skip=['id', 'org_id'], rel=[('roles', 'roles', modifier)])) else: groups = [u.serialize( skip=['id', 'org_id'], rel=[('roles', 'roles',",
"available\") for role in rm_roles: as_user, _, servers = role.rpartition(\"@\") if not as_user",
"except IntegrityError: request.db.rollback() @groups.when(method='DELETE', template='json') @check_policy('is_admin') @groups.wrap_delete() def rm_group(self, name, *args): group =",
"= request.POST.getall('add') rm_roles = request.POST.getall('remove') group = Group.visible(request).filter(Group.name == name).first() if not group:",
"'%s' is not valid\" % errs[0]) else: return O.error(msg=\"The following roles are not",
"as_user, _, servers = role.rpartition(\"@\") if not as_user or not servers: continue if",
"@groups.wrap_modify() def modify_group_roles(self, name, *args, **kwargs): name = name or kwargs['name'] add_roles =",
"Group.visible(request).all()] return O._anon(groups=groups, quota=dict(allowed=request.user.tier.groups)) @groups.when(method='POST', template='json') @check_policy('is_admin') @groups.wrap_create() def add_group(self, name, *args, **kwargs):",
"errs: if len(errs) == 1: return O.error(msg=\"The role '%s' is not valid\" %",
"can not be copied and/or distributed # * without the express permission of",
"\", \".join(errs)) for role in add_roles: as_user, _, servers = role.rpartition(\"@\") if not",
"= Group(name=name, org=org) request.db.add(group) request.db.commit() @groups.when(method='PUT', template='json') @check_policy('is_admin') @groups.wrap_modify() def modify_group_roles(self, name, *args,",
"LOG = logging.getLogger() class Groups(object): @expose('json', generic=True) @check_policy('is_admin') @wrap_command(Group) def groups(self, name=None, *args):",
"= name or kwargs['name'] add_roles = request.POST.getall('add') rm_roles = request.POST.getall('remove') group = Group.visible(request).filter(Group.name",
"# * This file is part of CloudRunner Server. # * # *",
"/******************************************************* # * Copyright (C) 2013-2014 CloudRunner.io <<EMAIL>> # * # * Proprietary",
"return [dict(as_user=role.as_user, servers=role.servers) for role in roles] if name: group = Group.visible(request).filter(Group.name ==",
"== \"*\": as_user = \"@\" roles = [r for r in group.roles if",
"Group, Org, Role from cloudrunner_server.api.policy.decorators import check_policy from cloudrunner_server.api.util import JsonOutput as O",
"as O LOG = logging.getLogger() class Groups(object): @expose('json', generic=True) @check_policy('is_admin') @wrap_command(Group) def groups(self,",
"else: return O.error(msg=\"The following roles are not valid: %s\" % \", \".join(errs)) for",
"kwargs['name'] org = request.db.query(Org).filter( Org.name == request.user.org).one() group = Group(name=name, org=org) request.db.add(group) request.db.commit()",
"rm_roles = request.POST.getall('remove') group = Group.visible(request).filter(Group.name == name).first() if not group: return O.error(msg=\"Group",
"copied and/or distributed # * without the express permission of CloudRunner.io # *******************************************************/",
"@wrap_command(Group) def groups(self, name=None, *args): def modifier(roles): return [dict(as_user=role.as_user, servers=role.servers) for role in",
"generic=True) @check_policy('is_admin') @wrap_command(Group) def groups(self, name=None, *args): def modifier(roles): return [dict(as_user=role.as_user, servers=role.servers) for",
"not valid: %s\" % \", \".join(errs)) for role in add_roles: as_user, _, servers",
"errs.append(as_user) if errs: if len(errs) == 1: return O.error(msg=\"The role '%s' is not",
"servers = role.rpartition(\"@\") if not as_user or not servers: continue if as_user ==",
"not servers: continue if as_user == \"*\": as_user = \"@\" r = Role(as_user=as_user,",
"servers = role.rpartition(\"@\") if not Role.is_valid(as_user): errs.append(as_user) if errs: if len(errs) == 1:",
"the express permission of CloudRunner.io # *******************************************************/ import logging from pecan import expose,",
"continue if as_user == \"*\": as_user = \"@\" r = Role(as_user=as_user, servers=servers, group=group)",
"== \"*\": as_user = \"@\" r = Role(as_user=as_user, servers=servers, group=group) try: request.db.add(r) request.db.commit()",
"request.db.commit() errs = [] for role in add_roles: as_user, _, servers = role.rpartition(\"@\")",
"* This file is part of CloudRunner Server. # * # * CloudRunner",
"== name).first() return O.group(group.serialize( skip=['id', 'org_id'], rel=[('roles', 'roles', modifier)])) else: groups = [u.serialize(",
"CloudRunner.io <<EMAIL>> # * # * Proprietary and confidential # * This file",
"roles] if name: group = Group.visible(request).filter(Group.name == name).first() return O.group(group.serialize( skip=['id', 'org_id'], rel=[('roles',",
"role.rpartition(\"@\") if not as_user or not servers: continue if as_user == \"*\": as_user",
"from cloudrunner_server.api.model import Group, Org, Role from cloudrunner_server.api.policy.decorators import check_policy from cloudrunner_server.api.util import",
"if not as_user or not servers: continue if as_user == \"*\": as_user =",
"'org_id'], rel=[('roles', 'roles', modifier)])) else: groups = [u.serialize( skip=['id', 'org_id'], rel=[('roles', 'roles', modifier)])",
"Org, Role from cloudrunner_server.api.policy.decorators import check_policy from cloudrunner_server.api.util import JsonOutput as O LOG",
"def modify_group_roles(self, name, *args, **kwargs): name = name or kwargs['name'] add_roles = request.POST.getall('add')",
"as_user == \"*\": as_user = \"@\" r = Role(as_user=as_user, servers=servers, group=group) try: request.db.add(r)",
"from pecan import expose, request # noqa from sqlalchemy.exc import IntegrityError from cloudrunner_server.api.decorators",
"r = Role(as_user=as_user, servers=servers, group=group) try: request.db.add(r) request.db.commit() except IntegrityError: request.db.rollback() @groups.when(method='DELETE', template='json')",
"not as_user or not servers: continue if as_user == \"*\": as_user = \"@\"",
"for r in roles: request.db.delete(r) request.db.commit() errs = [] for role in add_roles:",
"as_user == \"*\": as_user = \"@\" roles = [r for r in group.roles",
"groups = [u.serialize( skip=['id', 'org_id'], rel=[('roles', 'roles', modifier)]) for u in Group.visible(request).all()] return",
"permission of CloudRunner.io # *******************************************************/ import logging from pecan import expose, request #",
"group = Group(name=name, org=org) request.db.add(group) request.db.commit() @groups.when(method='PUT', template='json') @check_policy('is_admin') @groups.wrap_modify() def modify_group_roles(self, name,",
"rm_roles: as_user, _, servers = role.rpartition(\"@\") if not as_user or not servers: continue",
"'roles', modifier)])) else: groups = [u.serialize( skip=['id', 'org_id'], rel=[('roles', 'roles', modifier)]) for u",
"request.db.query(Org).filter( Org.name == request.user.org).one() group = Group(name=name, org=org) request.db.add(group) request.db.commit() @groups.when(method='PUT', template='json') @check_policy('is_admin')",
"as_user = \"@\" r = Role(as_user=as_user, servers=servers, group=group) try: request.db.add(r) request.db.commit() except IntegrityError:",
"from cloudrunner_server.api.policy.decorators import check_policy from cloudrunner_server.api.util import JsonOutput as O LOG = logging.getLogger()",
"IntegrityError: request.db.rollback() @groups.when(method='DELETE', template='json') @check_policy('is_admin') @groups.wrap_delete() def rm_group(self, name, *args): group = Group.visible(request).filter(Group.name",
"@groups.wrap_delete() def rm_group(self, name, *args): group = Group.visible(request).filter(Group.name == name).first() if not group:",
"Org.name == request.user.org).one() group = Group(name=name, org=org) request.db.add(group) request.db.commit() @groups.when(method='PUT', template='json') @check_policy('is_admin') @groups.wrap_modify()",
"modifier)])) else: groups = [u.serialize( skip=['id', 'org_id'], rel=[('roles', 'roles', modifier)]) for u in",
"= Role(as_user=as_user, servers=servers, group=group) try: request.db.add(r) request.db.commit() except IntegrityError: request.db.rollback() @groups.when(method='DELETE', template='json') @check_policy('is_admin')",
"from cloudrunner_server.api.util import JsonOutput as O LOG = logging.getLogger() class Groups(object): @expose('json', generic=True)",
"if as_user == \"*\": as_user = \"@\" roles = [r for r in",
"Group.visible(request).filter(Group.name == name).first() return O.group(group.serialize( skip=['id', 'org_id'], rel=[('roles', 'roles', modifier)])) else: groups =",
"softtabstop=4 # /******************************************************* # * Copyright (C) 2013-2014 CloudRunner.io <<EMAIL>> # * #",
"servers: continue if as_user == \"*\": as_user = \"@\" r = Role(as_user=as_user, servers=servers,",
"[dict(as_user=role.as_user, servers=role.servers) for role in roles] if name: group = Group.visible(request).filter(Group.name == name).first()",
"Group.visible(request).filter(Group.name == name).first() if not group: return O.error(msg=\"Group is not available\") for role",
"role in add_roles: as_user, _, servers = role.rpartition(\"@\") if not as_user or not",
"shiftwidth=4 softtabstop=4 # /******************************************************* # * Copyright (C) 2013-2014 CloudRunner.io <<EMAIL>> # *",
"*args): def modifier(roles): return [dict(as_user=role.as_user, servers=role.servers) for role in roles] if name: group",
"is not valid\" % errs[0]) else: return O.error(msg=\"The following roles are not valid:",
"in add_roles: as_user, _, servers = role.rpartition(\"@\") if not as_user or not servers:",
"name, *args, **kwargs): name = name or kwargs['name'] add_roles = request.POST.getall('add') rm_roles =",
"return O.error(msg=\"Group is not available\") for role in rm_roles: as_user, _, servers =",
"import IntegrityError from cloudrunner_server.api.decorators import wrap_command from cloudrunner_server.api.model import Group, Org, Role from"
] |
[
"logging.getLogger(__file__) echo = logger.info class CouchDBClientException(Exception): def __init__(self, *args, **kwargs): super(CouchDBClientException, self).__init__(*args, **kwargs)",
"= re.compile('[\\*\\?\\[\\]]+') def download_srcdst(self, src, dst, dry_run=False): if match := self.WILDCARD_RE.search(src): regex =",
"chunk: file_obj.write(chunk) @contextmanager def get_attachment(self, url, in_memory=False): try: if in_memory: bytes_fp = io.BytesIO()",
"import contextmanager import requests logger = logging.getLogger(__file__) echo = logger.info class CouchDBClientException(Exception): def",
"import fnmatch import io import mimetypes import os import pathlib import re import",
"# break yield file_path, dest_path def download_file(self, url, dest): with open(dest, 'wb') as",
"response = requests.post(f'{self.db_uri}', json=dict(_id=doc_id), auth=self.auth) if response.status_code != 201: return file_name, f'{file_uri}', response.status_code,",
"like {self.CONNECTION_RE}') def list_attachments(self, *patterns): regexs = [] for pattern in patterns: if",
"= args['depth'] params['reduce'] = True response = requests.get(f\"{self.db_uri}/_design/couchfs_views/_view/attachment_list\", params=params, auth=self.auth) response.raise_for_status() for doc",
"requests.put(doc_uri, json=doc, headers=headers, auth=self.auth) elif response.status_code == 404: response = requests.post(self.db_uri, json=doc, auth=self.auth)",
"**kwargs): super(CouchDBClientException, self).__init__(*args, **kwargs) class URLRequired(CouchDBClientException): \"\"\"A valid URL is required.\"\"\" class BadConnectionURI(CouchDBClientException):",
"== 404: response = requests.post(f'{self.db_uri}', json=dict(_id=doc_id), auth=self.auth) if response.status_code != 201: return file_name,",
"valid URL is required.\"\"\" class CouchDBClient: URI_ENVIRON_KEY = 'COUCHDB_URI' CONNECTION_RE = 'couchdb(s)?://((\\w+)\\:(.+)@)?([\\w\\.]+)(:(\\d+))?/(\\w+)' URI_RE",
"self.URI_RE.match(uri): (ssl, _, userid, psswd, host, _, port, db) = match.groups() scheme =",
"{\\n if (doc._attachments) {\\n for (const file_name in doc._attachments) {\\n emit((doc._id+'/'+file_name).split('/'), doc._attachments[file_name].length);\\n }\\n}",
"in doc._attachments) {\\n emit((doc._id+'/'+file_name).split('/'), doc._attachments[file_name].length);\\n }\\n} else {\\n emit(doc._id.split('/'), 0)\\n}\\n}\", \"reduce\": \"_stats\" }",
"uri: key = self.URI_ENVIRON_KEY raise URLRequired(f'You can set environment varialble {key}') scheme, userid,",
"os.path.basename(src))) elif os.path.isdir(src): p = pathlib.Path(src).resolve() for (dirpath, dirs, files) in os.walk(src): for",
"rev[1:-1]} response = requests.put(f'{file_uri}', data=src, headers=headers, auth=self.auth) response.raise_for_status() return file_name, f'{file_uri}', response.status_code, response.reason",
"'If-Match': rev[1:-1]} response = requests.put(f'{file_uri}', data=src, headers=headers, auth=self.auth) response.raise_for_status() return file_name, f'{file_uri}', response.status_code,",
"self).__init__(*args, **kwargs) class URLRequired(CouchDBClientException): \"\"\"A valid URL is required.\"\"\" class BadConnectionURI(CouchDBClientException): \"\"\"A valid",
"None self.db = db self.db_uri = f'{scheme}://{host}{port}/{self.db}' def check_db(self): response = requests.head(f\"{self.db_uri}\", auth=self.auth)",
"else: raise BadConnectionURI(f'use a connections like {self.CONNECTION_RE}') def list_attachments(self, *patterns): regexs = []",
"params['reduce'] = True response = requests.get(f\"{self.db_uri}/_design/couchfs_views/_view/attachment_list\", params=params, auth=self.auth) response.raise_for_status() for doc in response.json()['rows']:",
"dst in self.download_srcdst(src, dst): if dry_run: yield src, dst, 'DRY RUN', response else:",
"(dirpath, dirs, files) in os.walk(src): for filename in files: file_path = os.path.join(dirpath, filename)",
"dst_file_path = dst_file_path[1:] dest_path = os.path.join(dst, dst_file_path[1:]) if not dest_path.startswith('dump'): print('NO DUMP', is_copying_files,",
"response = requests.head(f'{doc_uri}', auth=self.auth) if response.status_code == 404: response = requests.post(f'{self.db_uri}', json=dict(_id=doc_id), auth=self.auth)",
"'wb') as f: return self.download_to_file(url, f) def download_to_file(self, url, file_obj): with requests.get(url, stream=True,",
"regex = re.compile(fnmatch.translate(src)) is_copying_files = True else: regex = re.compile(fnmatch.translate(src)[:-2]) sub_regex = re.compile(src)",
"def download_file(self, url, dest): with open(dest, 'wb') as f: return self.download_to_file(url, f) def",
"\"\"\" Given: 'couchdb://admin:*****%@127.0.0.1:5984/test' -> http://127.0.0.1:5984/ :param uri: :return {host, db, auth, passwd}: \"\"\"",
"response.json()['rows']: yield '/'.join(doc['key']), doc['value'] def download(self, src, dst, dry_run=False): for src, dst in",
"cls() logger('checking the db') if not client.check_db(): logger('creating the db') client.create_db() _id =",
"userid, psswd, host, port, db = self.parse_connection_uri(uri) if userid and psswd: self.auth =",
"200: rev = response.headers['ETag'] headers = {'If-Match': rev[1:-1]} response = requests.put(doc_uri, json=doc, headers=headers,",
"response = requests.put(doc_uri, json=doc, headers=headers, auth=self.auth) elif response.status_code == 404: response = requests.post(self.db_uri,",
"RUN', response else: uri = f'{self.db_uri}/{src}' response = requests.get(uri, auth=self.auth) yield uri, dst,",
"= logger.info class CouchDBClientException(Exception): def __init__(self, *args, **kwargs): super(CouchDBClientException, self).__init__(*args, **kwargs) class URLRequired(CouchDBClientException):",
"doc['value'] def download(self, src, dst, dry_run=False): for src, dst in self.download_srcdst(src, dst): if",
"self.WILDCARD_RE.search(src): regex = re.compile(fnmatch.translate(src)) is_copying_files = True else: regex = re.compile(fnmatch.translate(src)[:-2]) sub_regex =",
"db else: raise BadConnectionURI(f'use a connections like {self.CONNECTION_RE}') def list_attachments(self, *patterns): regexs =",
"def init_db(cls, logger=echo): echo('connecting to couchdb') client = cls() logger('checking the db') if",
"import os import pathlib import re import tempfile from contextlib import contextmanager import",
"in args: params['group_level'] = args['depth'] params['reduce'] = True response = requests.get(f\"{self.db_uri}/_design/couchfs_views/_view/attachment_list\", params=params, auth=self.auth)",
"files: file_path = os.path.join(dirpath, filename) pp = file_path[len(p.parent.as_posix()) + 1:] dest_path = os.path.join(dst,",
"if ssl else '') port = f':{port}' if port else '' return scheme,",
"if regex.search(file_path): if is_copying_files: match = self.WILDCARD_RE.search(src) dst_file_path = file_path[match.span()[0]:] if dst_file_path.startswith('/'): dst_file_path",
"bytes_fp = io.BytesIO() self.download_to_file(url, bytes_fp) yield bytes_fp.getvalue() else: fp = tempfile.NamedTemporaryFile(delete=False) self.download_to_file(url, fp)",
"= f'{doc_uri}/{file_name}' response = requests.head(f'{doc_uri}', auth=self.auth) if response.status_code == 404: response = requests.post(f'{self.db_uri}',",
"not client.check_db(): logger('creating the db') client.create_db() _id = client.COUCHFS_VIEWS['_id'] logger(f'creating or updating the",
"yield '/'.join(doc['key']), doc['value'] def download(self, src, dst, dry_run=False): for src, dst in self.download_srcdst(src,",
"_ in self.ls(): if regex.search(file_path): if is_copying_files: match = self.WILDCARD_RE.search(src) dst_file_path = file_path[match.span()[0]:]",
"if not client.check_db(): logger('creating the db') client.create_db() _id = client.COUCHFS_VIEWS['_id'] logger(f'creating or updating",
"response.reason rev = response.json()['rev'] else: rev = response.headers['ETag'] major, _ = mimetypes.guess_type(src.name) headers",
"couchdb') client = cls() logger('checking the db') if not client.check_db(): logger('creating the db')",
"= re.compile(fnmatch.translate(pattern)[:-2]) regexs.append(regex) for file_path, file_size in self.run_view(): if not regexs or any([regex.search(file_path)",
"class URLRequired(CouchDBClientException): \"\"\"A valid URL is required.\"\"\" class BadConnectionURI(CouchDBClientException): \"\"\"A valid URL is",
"'/'.join(doc['key']), doc['value'] def download(self, src, dst, dry_run=False): for src, dst in self.download_srcdst(src, dst):",
"requests.post(self.db_uri, json=doc, auth=self.auth) response.raise_for_status() def parse_connection_uri(self, uri): \"\"\" Given: 'couchdb://admin:*****%@127.0.0.1:5984/test' -> http://127.0.0.1:5984/ :param",
"url, dest): with open(dest, 'wb') as f: return self.download_to_file(url, f) def download_to_file(self, url,",
"rev = response.json()['rev'] else: rev = response.headers['ETag'] major, _ = mimetypes.guess_type(src.name) headers =",
"'rb') finally: if in_memory: bytes_fp.close() else: os.unlink(fp.name) def get_attachment_as_bytes(self, url): return requests.get(url, stream=True,",
"= requests.put(f'{file_uri}', data=src, headers=headers, auth=self.auth) response.raise_for_status() return file_name, f'{file_uri}', response.status_code, response.reason @classmethod def",
"= re.compile(CONNECTION_RE) def __init__(self, uri=None): if uri is None: uri = os.environ.get(self.URI_ENVIRON_KEY) if",
"bytes_fp.getvalue() else: fp = tempfile.NamedTemporaryFile(delete=False) self.download_to_file(url, fp) fp.close() yield open(fp.name, 'rb') finally: if",
"+ ('s' if ssl else '') port = f':{port}' if port else ''",
"fp = tempfile.NamedTemporaryFile(delete=False) self.download_to_file(url, fp) fp.close() yield open(fp.name, 'rb') finally: if in_memory: bytes_fp.close()",
"dst): with tempfile.NamedTemporaryFile() as src_fp: src_fp.name = os.path.basename(dst) src_fp.write(src_bytes) return self.upload_file(src_fp, dst) def",
"dest_path, 'DRY RUN', '' else: yield self.upload_file(file_path, dest_path) def upload_bytes_file(self, src_bytes, dst): with",
"setup') COUCHFS_VIEWS={ \"_id\": \"_design/couchfs_views\", \"views\": { \"attachment_list\": { \"map\": \"function (doc) {\\n if",
"match.groups() scheme = 'http' + ('s' if ssl else '') port = f':{port}'",
"\"\"\" if match := self.URI_RE.match(uri): (ssl, _, userid, psswd, host, _, port, db)",
"src, dst, dry_run=False): src = os.path.abspath(src) if os.path.isfile(src): if dry_run: yield src, dst,",
"can set environment varialble {key}') scheme, userid, psswd, host, port, db = self.parse_connection_uri(uri)",
"src_fp.name = os.path.basename(dst) src_fp.write(src_bytes) return self.upload_file(src_fp, dst) def upload_file(self, src, dst): \"\"\" Uploads",
"os.path.join(dst, pp) if dry_run: yield file_path, dest_path, 'DRY RUN', '' else: yield self.upload_file(file_path,",
"-> http://127.0.0.1:5984/ :param uri: :return {host, db, auth, passwd}: \"\"\" if match :=",
"http://127.0.0.1:5984/ :param uri: :return {host, db, auth, passwd}: \"\"\" if match := self.URI_RE.match(uri):",
"= re.compile(fnmatch.translate(src)[:-2]) sub_regex = re.compile(src) is_copying_files = False for file_path, _ in self.ls():",
"dest_path.startswith('dump'): print('NO DUMP', is_copying_files, dst, file_path[len(src):]) # break yield file_path, dest_path def download_file(self,",
"raise BadConnectionURI(f'use a connections like {self.CONNECTION_RE}') def list_attachments(self, *patterns): regexs = [] for",
"in_memory: bytes_fp.close() else: os.unlink(fp.name) def get_attachment_as_bytes(self, url): return requests.get(url, stream=True, auth=self.auth).content def upload(self,",
"for file_path, file_size in self.run_view(): if not regexs or any([regex.search(file_path) for regex in",
"class CouchDBClient: URI_ENVIRON_KEY = 'COUCHDB_URI' CONNECTION_RE = 'couchdb(s)?://((\\w+)\\:(.+)@)?([\\w\\.]+)(:(\\d+))?/(\\w+)' URI_RE = re.compile(CONNECTION_RE) def __init__(self,",
"_, userid, psswd, host, _, port, db) = match.groups() scheme = 'http' +",
"BadConnectionURI(CouchDBClientException): \"\"\"A valid URL is required.\"\"\" class CouchDBClient: URI_ENVIRON_KEY = 'COUCHDB_URI' CONNECTION_RE =",
"return response.status_code == 200 def create_db(self): response = requests.put(f\"{self.db_uri}\", auth=self.auth) response.raise_for_status() def save_doc(self,",
"logger('checking the db') if not client.check_db(): logger('creating the db') client.create_db() _id = client.COUCHFS_VIEWS['_id']",
"file_path = os.path.join(dirpath, filename) pp = file_path[len(p.parent.as_posix()) + 1:] dest_path = os.path.join(dst, pp)",
"WILDCARD_RE = re.compile('[\\*\\?\\[\\]]+') def download_srcdst(self, src, dst, dry_run=False): if match := self.WILDCARD_RE.search(src): regex",
"passwd}: \"\"\" if match := self.URI_RE.match(uri): (ssl, _, userid, psswd, host, _, port,",
"URLRequired(CouchDBClientException): \"\"\"A valid URL is required.\"\"\" class BadConnectionURI(CouchDBClientException): \"\"\"A valid URL is required.\"\"\"",
"re import tempfile from contextlib import contextmanager import requests logger = logging.getLogger(__file__) echo",
"{key}') scheme, userid, psswd, host, port, db = self.parse_connection_uri(uri) if userid and psswd:",
"response.raise_for_status() def save_doc(self, doc): _id = doc['_id'] doc_uri = f'{self.db_uri}/{_id}' response = requests.head(doc_uri,",
"list_attachments(self, *patterns): regexs = [] for pattern in patterns: if self.WILDCARD_RE.search(pattern): regex =",
"dst, file_path[len(src):]) # break yield file_path, dest_path def download_file(self, url, dest): with open(dest,",
"if match := self.WILDCARD_RE.search(src): regex = re.compile(fnmatch.translate(src)) is_copying_files = True else: regex =",
"key = self.URI_ENVIRON_KEY raise URLRequired(f'You can set environment varialble {key}') scheme, userid, psswd,",
"src_fp: src_fp.name = os.path.basename(dst) src_fp.write(src_bytes) return self.upload_file(src_fp, dst) def upload_file(self, src, dst): \"\"\"",
"__init__(self, uri=None): if uri is None: uri = os.environ.get(self.URI_ENVIRON_KEY) if not uri: key",
"api for couchdb attachments \"\"\" \"\"\"Main module.\"\"\" import logging import fnmatch import io",
"if response.status_code == 200: rev = response.headers['ETag'] headers = {'If-Match': rev[1:-1]} response =",
"= f'{self.db_uri}/{src}' response = requests.get(uri, auth=self.auth) yield uri, dst, response.status_code, response.reason WILDCARD_RE =",
"auth=self.auth) response.raise_for_status() return file_name, f'{file_uri}', response.status_code, response.reason @classmethod def init_db(cls, logger=echo): echo('connecting to",
"open(dest, 'wb') as f: return self.download_to_file(url, f) def download_to_file(self, url, file_obj): with requests.get(url,",
"return self.upload_file(src_fp, dst) def upload_file(self, src, dst): \"\"\" Uploads a file using dst",
"in files: file_path = os.path.join(dirpath, filename) pp = file_path[len(p.parent.as_posix()) + 1:] dest_path =",
"def upload_file(self, src, dst): \"\"\" Uploads a file using dst as the doc/bucket",
"files) in os.walk(src): for filename in files: file_path = os.path.join(dirpath, filename) pp =",
"in r.iter_content(chunk_size=8192): if chunk: file_obj.write(chunk) @contextmanager def get_attachment(self, url, in_memory=False): try: if in_memory:",
"response.json()['rev'] else: rev = response.headers['ETag'] major, _ = mimetypes.guess_type(src.name) headers = {'Content-type': f'{major}',",
"the doc/bucket id :param src: path to file to upload :param dst: id",
"else: regex = re.compile(fnmatch.translate(src)[:-2]) sub_regex = re.compile(src) is_copying_files = False for file_path, _",
"= {'reduce': False, 'include_docs': False} if 'depth' in args: params['group_level'] = args['depth'] params['reduce']",
"regex in regexs]): yield file_path, file_size def run_view(self, **args): params = {'reduce': False,",
"requests.get(uri, auth=self.auth) yield uri, dst, response.status_code, response.reason WILDCARD_RE = re.compile('[\\*\\?\\[\\]]+') def download_srcdst(self, src,",
"RUN', '' else: yield self.upload_file(file_path, dest_path) def upload_bytes_file(self, src_bytes, dst): with tempfile.NamedTemporaryFile() as",
"auth=self.auth) if response.status_code != 201: return file_name, f'{file_uri}', response.status_code, response.reason rev = response.json()['rev']",
"class BadConnectionURI(CouchDBClientException): \"\"\"A valid URL is required.\"\"\" class CouchDBClient: URI_ENVIRON_KEY = 'COUCHDB_URI' CONNECTION_RE",
"f'{major}', 'If-Match': rev[1:-1]} response = requests.put(f'{file_uri}', data=src, headers=headers, auth=self.auth) response.raise_for_status() return file_name, f'{file_uri}',",
"download_file(self, url, dest): with open(dest, 'wb') as f: return self.download_to_file(url, f) def download_to_file(self,",
"dst as the doc/bucket id :param src: path to file to upload :param",
"args: params['group_level'] = args['depth'] params['reduce'] = True response = requests.get(f\"{self.db_uri}/_design/couchfs_views/_view/attachment_list\", params=params, auth=self.auth) response.raise_for_status()",
"file_obj): with requests.get(url, stream=True, auth=self.auth) as r: r.raise_for_status() for chunk in r.iter_content(chunk_size=8192): if",
"set environment varialble {key}') scheme, userid, psswd, host, port, db = self.parse_connection_uri(uri) if",
"dest_path = os.path.join(dst, pp) if dry_run: yield file_path, dest_path, 'DRY RUN', '' else:",
"def get_attachment_as_bytes(self, url): return requests.get(url, stream=True, auth=self.auth).content def upload(self, src, dst, dry_run=False): src",
"= True response = requests.get(f\"{self.db_uri}/_design/couchfs_views/_view/attachment_list\", params=params, auth=self.auth) response.raise_for_status() for doc in response.json()['rows']: yield",
"else: regex = re.compile(fnmatch.translate(pattern)[:-2]) regexs.append(regex) for file_path, file_size in self.run_view(): if not regexs",
"self.upload_file(src_fp, os.path.join(dst, os.path.basename(src))) elif os.path.isdir(src): p = pathlib.Path(src).resolve() for (dirpath, dirs, files) in",
"upload status, upload message \"\"\" doc_id = [segment for segment in dst.split('/') if",
"auth=self.auth) return response.status_code == 200 def create_db(self): response = requests.put(f\"{self.db_uri}\", auth=self.auth) response.raise_for_status() def",
"def list_attachments(self, *patterns): regexs = [] for pattern in patterns: if self.WILDCARD_RE.search(pattern): regex",
"requests logger = logging.getLogger(__file__) echo = logger.info class CouchDBClientException(Exception): def __init__(self, *args, **kwargs):",
"dry_run: yield src, dst, 'DRY RUN', '' else: with open(src, 'rb') as src_fp:",
"emit((doc._id+'/'+file_name).split('/'), doc._attachments[file_name].length);\\n }\\n} else {\\n emit(doc._id.split('/'), 0)\\n}\\n}\", \"reduce\": \"_stats\" } }, \"language\": \"javascript\"",
"f'{file_uri}', response.status_code, response.reason rev = response.json()['rev'] else: rev = response.headers['ETag'] major, _ =",
"client api for couchdb attachments \"\"\" \"\"\"Main module.\"\"\" import logging import fnmatch import",
"response.raise_for_status() def parse_connection_uri(self, uri): \"\"\" Given: 'couchdb://admin:*****%@127.0.0.1:5984/test' -> http://127.0.0.1:5984/ :param uri: :return {host,",
"dst_file_path = file_path[len(src):] if file_path.startswith('/'): dst_file_path = dst_file_path[1:] dest_path = os.path.join(dst, dst_file_path[1:]) if",
"auth=self.auth) as r: r.raise_for_status() for chunk in r.iter_content(chunk_size=8192): if chunk: file_obj.write(chunk) @contextmanager def",
"get_attachment(self, url, in_memory=False): try: if in_memory: bytes_fp = io.BytesIO() self.download_to_file(url, bytes_fp) yield bytes_fp.getvalue()",
"not uri: key = self.URI_ENVIRON_KEY raise URLRequired(f'You can set environment varialble {key}') scheme,",
"'http' + ('s' if ssl else '') port = f':{port}' if port else",
"f'{self.db_uri}/{src}' response = requests.get(uri, auth=self.auth) yield uri, dst, response.status_code, response.reason WILDCARD_RE = re.compile('[\\*\\?\\[\\]]+')",
"dst, dry_run=False): for src, dst in self.download_srcdst(src, dst): if dry_run: yield src, dst,",
"def upload(self, src, dst, dry_run=False): src = os.path.abspath(src) if os.path.isfile(src): if dry_run: yield",
"f'{self.db_uri}/{_id}' response = requests.head(doc_uri, auth=self.auth) if response.status_code == 200: rev = response.headers['ETag'] headers",
"f':{port}' if port else '' return scheme, userid, psswd, host, port, db else:",
"dst) def upload_file(self, src, dst): \"\"\" Uploads a file using dst as the",
":param uri: :return {host, db, auth, passwd}: \"\"\" if match := self.URI_RE.match(uri): (ssl,",
"doc._attachments) {\\n emit((doc._id+'/'+file_name).split('/'), doc._attachments[file_name].length);\\n }\\n} else {\\n emit(doc._id.split('/'), 0)\\n}\\n}\", \"reduce\": \"_stats\" } },",
"def get_attachment(self, url, in_memory=False): try: if in_memory: bytes_fp = io.BytesIO() self.download_to_file(url, bytes_fp) yield",
"host, port, db = self.parse_connection_uri(uri) if userid and psswd: self.auth = (userid, psswd)",
"self.upload_file(file_path, dest_path) def upload_bytes_file(self, src_bytes, dst): with tempfile.NamedTemporaryFile() as src_fp: src_fp.name = os.path.basename(dst)",
"response = requests.get(f\"{self.db_uri}/_design/couchfs_views/_view/attachment_list\", params=params, auth=self.auth) response.raise_for_status() for doc in response.json()['rows']: yield '/'.join(doc['key']), doc['value']",
"= cls() logger('checking the db') if not client.check_db(): logger('creating the db') client.create_db() _id",
"a file using dst as the doc/bucket id :param src: path to file",
"auth=self.auth) response.raise_for_status() def parse_connection_uri(self, uri): \"\"\" Given: 'couchdb://admin:*****%@127.0.0.1:5984/test' -> http://127.0.0.1:5984/ :param uri: :return",
"f) def download_to_file(self, url, file_obj): with requests.get(url, stream=True, auth=self.auth) as r: r.raise_for_status() for",
"doc): _id = doc['_id'] doc_uri = f'{self.db_uri}/{_id}' response = requests.head(doc_uri, auth=self.auth) if response.status_code",
"[segment for segment in dst.split('/') if segment][0] file_name = '/'.join(dst.split('/')[1:]) doc_uri = f'{self.db_uri}/{doc_id}'",
"if dry_run: yield src, dst, 'DRY RUN', response else: uri = f'{self.db_uri}/{src}' response",
"re.compile(CONNECTION_RE) def __init__(self, uri=None): if uri is None: uri = os.environ.get(self.URI_ENVIRON_KEY) if not",
"self.download_to_file(url, f) def download_to_file(self, url, file_obj): with requests.get(url, stream=True, auth=self.auth) as r: r.raise_for_status()",
"= response.headers['ETag'] major, _ = mimetypes.guess_type(src.name) headers = {'Content-type': f'{major}', 'If-Match': rev[1:-1]} response",
"in os.walk(src): for filename in files: file_path = os.path.join(dirpath, filename) pp = file_path[len(p.parent.as_posix())",
"doc['_id'] doc_uri = f'{self.db_uri}/{_id}' response = requests.head(doc_uri, auth=self.auth) if response.status_code == 200: rev",
"scheme, userid, psswd, host, port, db else: raise BadConnectionURI(f'use a connections like {self.CONNECTION_RE}')",
"port, db = self.parse_connection_uri(uri) if userid and psswd: self.auth = (userid, psswd) else:",
"file_url, upload status, upload message \"\"\" doc_id = [segment for segment in dst.split('/')",
"requests.post(f'{self.db_uri}', json=dict(_id=doc_id), auth=self.auth) if response.status_code != 201: return file_name, f'{file_uri}', response.status_code, response.reason rev",
"self.run_view(): if not regexs or any([regex.search(file_path) for regex in regexs]): yield file_path, file_size",
"response = requests.post(self.db_uri, json=doc, auth=self.auth) response.raise_for_status() def parse_connection_uri(self, uri): \"\"\" Given: 'couchdb://admin:*****%@127.0.0.1:5984/test' ->",
"message \"\"\" doc_id = [segment for segment in dst.split('/') if segment][0] file_name =",
"db {_id}') client.save_doc(client.COUCHFS_VIEWS) logger(f'db is now setup') COUCHFS_VIEWS={ \"_id\": \"_design/couchfs_views\", \"views\": { \"attachment_list\":",
"= os.path.join(dst, pp) if dry_run: yield file_path, dest_path, 'DRY RUN', '' else: yield",
"db, auth, passwd}: \"\"\" if match := self.URI_RE.match(uri): (ssl, _, userid, psswd, host,",
"db) = match.groups() scheme = 'http' + ('s' if ssl else '') port",
"os.unlink(fp.name) def get_attachment_as_bytes(self, url): return requests.get(url, stream=True, auth=self.auth).content def upload(self, src, dst, dry_run=False):",
"bytes_fp.close() else: os.unlink(fp.name) def get_attachment_as_bytes(self, url): return requests.get(url, stream=True, auth=self.auth).content def upload(self, src,",
"response.status_code == 200 def create_db(self): response = requests.put(f\"{self.db_uri}\", auth=self.auth) response.raise_for_status() def save_doc(self, doc):",
"rev[1:-1]} response = requests.put(doc_uri, json=doc, headers=headers, auth=self.auth) elif response.status_code == 404: response =",
"f'{scheme}://{host}{port}/{self.db}' def check_db(self): response = requests.head(f\"{self.db_uri}\", auth=self.auth) return response.status_code == 200 def create_db(self):",
"re.compile(fnmatch.translate(pattern)[:-2]) regexs.append(regex) for file_path, file_size in self.run_view(): if not regexs or any([regex.search(file_path) for",
"headers=headers, auth=self.auth) response.raise_for_status() return file_name, f'{file_uri}', response.status_code, response.reason @classmethod def init_db(cls, logger=echo): echo('connecting",
"'COUCHDB_URI' CONNECTION_RE = 'couchdb(s)?://((\\w+)\\:(.+)@)?([\\w\\.]+)(:(\\d+))?/(\\w+)' URI_RE = re.compile(CONNECTION_RE) def __init__(self, uri=None): if uri is",
"response.raise_for_status() for doc in response.json()['rows']: yield '/'.join(doc['key']), doc['value'] def download(self, src, dst, dry_run=False):",
"client.check_db(): logger('creating the db') client.create_db() _id = client.COUCHFS_VIEWS['_id'] logger(f'creating or updating the db",
"stream=True, auth=self.auth).content def upload(self, src, dst, dry_run=False): src = os.path.abspath(src) if os.path.isfile(src): if",
"with requests.get(url, stream=True, auth=self.auth) as r: r.raise_for_status() for chunk in r.iter_content(chunk_size=8192): if chunk:",
"__init__(self, *args, **kwargs): super(CouchDBClientException, self).__init__(*args, **kwargs) class URLRequired(CouchDBClientException): \"\"\"A valid URL is required.\"\"\"",
"using dst as the doc/bucket id :param src: path to file to upload",
"response else: uri = f'{self.db_uri}/{src}' response = requests.get(uri, auth=self.auth) yield uri, dst, response.status_code,",
"re.compile(fnmatch.translate(pattern)) else: regex = re.compile(fnmatch.translate(pattern)[:-2]) regexs.append(regex) for file_path, file_size in self.run_view(): if not",
"= file_path[match.span()[0]:] if dst_file_path.startswith('/'): dst_file_path = file_path[1:] dest_path = os.path.join(dst, dst_file_path) else: dst_file_path",
"dest_path) def upload_bytes_file(self, src_bytes, dst): with tempfile.NamedTemporaryFile() as src_fp: src_fp.name = os.path.basename(dst) src_fp.write(src_bytes)",
"upload_file(self, src, dst): \"\"\" Uploads a file using dst as the doc/bucket id",
"201: return file_name, f'{file_uri}', response.status_code, response.reason rev = response.json()['rev'] else: rev = response.headers['ETag']",
"json=doc, headers=headers, auth=self.auth) elif response.status_code == 404: response = requests.post(self.db_uri, json=doc, auth=self.auth) response.raise_for_status()",
"= file_path[len(src):] if file_path.startswith('/'): dst_file_path = dst_file_path[1:] dest_path = os.path.join(dst, dst_file_path[1:]) if not",
"= (userid, psswd) else: self.auth = None self.db = db self.db_uri = f'{scheme}://{host}{port}/{self.db}'",
"= match.groups() scheme = 'http' + ('s' if ssl else '') port =",
"is_copying_files, dst, file_path[len(src):]) # break yield file_path, dest_path def download_file(self, url, dest): with",
"= os.path.join(dst, dst_file_path) else: dst_file_path = file_path[len(src):] if file_path.startswith('/'): dst_file_path = dst_file_path[1:] dest_path",
"psswd) else: self.auth = None self.db = db self.db_uri = f'{scheme}://{host}{port}/{self.db}' def check_db(self):",
"if chunk: file_obj.write(chunk) @contextmanager def get_attachment(self, url, in_memory=False): try: if in_memory: bytes_fp =",
"else: dst_file_path = file_path[len(src):] if file_path.startswith('/'): dst_file_path = dst_file_path[1:] dest_path = os.path.join(dst, dst_file_path[1:])",
"= os.path.join(dirpath, filename) pp = file_path[len(p.parent.as_posix()) + 1:] dest_path = os.path.join(dst, pp) if",
"dry_run: yield src, dst, 'DRY RUN', response else: uri = f'{self.db_uri}/{src}' response =",
"params=params, auth=self.auth) response.raise_for_status() for doc in response.json()['rows']: yield '/'.join(doc['key']), doc['value'] def download(self, src,",
"for src, dst in self.download_srcdst(src, dst): if dry_run: yield src, dst, 'DRY RUN',",
"= requests.post(f'{self.db_uri}', json=dict(_id=doc_id), auth=self.auth) if response.status_code != 201: return file_name, f'{file_uri}', response.status_code, response.reason",
"return scheme, userid, psswd, host, port, db else: raise BadConnectionURI(f'use a connections like",
"= self.URI_ENVIRON_KEY raise URLRequired(f'You can set environment varialble {key}') scheme, userid, psswd, host,",
"args['depth'] params['reduce'] = True response = requests.get(f\"{self.db_uri}/_design/couchfs_views/_view/attachment_list\", params=params, auth=self.auth) response.raise_for_status() for doc in",
"any([regex.search(file_path) for regex in regexs]): yield file_path, file_size def run_view(self, **args): params =",
"for doc in response.json()['rows']: yield '/'.join(doc['key']), doc['value'] def download(self, src, dst, dry_run=False): for",
"False, 'include_docs': False} if 'depth' in args: params['group_level'] = args['depth'] params['reduce'] = True",
"= requests.get(uri, auth=self.auth) yield uri, dst, response.status_code, response.reason WILDCARD_RE = re.compile('[\\*\\?\\[\\]]+') def download_srcdst(self,",
"response.headers['ETag'] headers = {'If-Match': rev[1:-1]} response = requests.put(doc_uri, json=doc, headers=headers, auth=self.auth) elif response.status_code",
"os.environ.get(self.URI_ENVIRON_KEY) if not uri: key = self.URI_ENVIRON_KEY raise URLRequired(f'You can set environment varialble",
"_id = doc['_id'] doc_uri = f'{self.db_uri}/{_id}' response = requests.head(doc_uri, auth=self.auth) if response.status_code ==",
"src_fp: yield self.upload_file(src_fp, os.path.join(dst, os.path.basename(src))) elif os.path.isdir(src): p = pathlib.Path(src).resolve() for (dirpath, dirs,",
"'depth' in args: params['group_level'] = args['depth'] params['reduce'] = True response = requests.get(f\"{self.db_uri}/_design/couchfs_views/_view/attachment_list\", params=params,",
"client.save_doc(client.COUCHFS_VIEWS) logger(f'db is now setup') COUCHFS_VIEWS={ \"_id\": \"_design/couchfs_views\", \"views\": { \"attachment_list\": { \"map\":",
"scheme, userid, psswd, host, port, db = self.parse_connection_uri(uri) if userid and psswd: self.auth",
"self.ls(): if regex.search(file_path): if is_copying_files: match = self.WILDCARD_RE.search(src) dst_file_path = file_path[match.span()[0]:] if dst_file_path.startswith('/'):",
"= doc['_id'] doc_uri = f'{self.db_uri}/{_id}' response = requests.head(doc_uri, auth=self.auth) if response.status_code == 200:",
":= self.WILDCARD_RE.search(src): regex = re.compile(fnmatch.translate(src)) is_copying_files = True else: regex = re.compile(fnmatch.translate(src)[:-2]) sub_regex",
"dst): if dry_run: yield src, dst, 'DRY RUN', response else: uri = f'{self.db_uri}/{src}'",
"file_path[len(src):]) # break yield file_path, dest_path def download_file(self, url, dest): with open(dest, 'wb')",
"src, dst, dry_run=False): if match := self.WILDCARD_RE.search(src): regex = re.compile(fnmatch.translate(src)) is_copying_files = True",
"re.compile(src) is_copying_files = False for file_path, _ in self.ls(): if regex.search(file_path): if is_copying_files:",
"in self.run_view(): if not regexs or any([regex.search(file_path) for regex in regexs]): yield file_path,",
"dst_file_path = file_path[match.span()[0]:] if dst_file_path.startswith('/'): dst_file_path = file_path[1:] dest_path = os.path.join(dst, dst_file_path) else:",
"download_srcdst(self, src, dst, dry_run=False): if match := self.WILDCARD_RE.search(src): regex = re.compile(fnmatch.translate(src)) is_copying_files =",
"class CouchDBClientException(Exception): def __init__(self, *args, **kwargs): super(CouchDBClientException, self).__init__(*args, **kwargs) class URLRequired(CouchDBClientException): \"\"\"A valid",
"self.download_srcdst(src, dst): if dry_run: yield src, dst, 'DRY RUN', response else: uri =",
"if userid and psswd: self.auth = (userid, psswd) else: self.auth = None self.db",
"auth=self.auth) yield uri, dst, response.status_code, response.reason WILDCARD_RE = re.compile('[\\*\\?\\[\\]]+') def download_srcdst(self, src, dst,",
"404: response = requests.post(self.db_uri, json=doc, auth=self.auth) response.raise_for_status() def parse_connection_uri(self, uri): \"\"\" Given: 'couchdb://admin:*****%@127.0.0.1:5984/test'",
"file using dst as the doc/bucket id :param src: path to file to",
"re.compile(fnmatch.translate(src)) is_copying_files = True else: regex = re.compile(fnmatch.translate(src)[:-2]) sub_regex = re.compile(src) is_copying_files =",
"yield self.upload_file(file_path, dest_path) def upload_bytes_file(self, src_bytes, dst): with tempfile.NamedTemporaryFile() as src_fp: src_fp.name =",
"{\\n for (const file_name in doc._attachments) {\\n emit((doc._id+'/'+file_name).split('/'), doc._attachments[file_name].length);\\n }\\n} else {\\n emit(doc._id.split('/'),",
"def upload_bytes_file(self, src_bytes, dst): with tempfile.NamedTemporaryFile() as src_fp: src_fp.name = os.path.basename(dst) src_fp.write(src_bytes) return",
"doc in response.json()['rows']: yield '/'.join(doc['key']), doc['value'] def download(self, src, dst, dry_run=False): for src,",
"= False for file_path, _ in self.ls(): if regex.search(file_path): if is_copying_files: match =",
"= f'{scheme}://{host}{port}/{self.db}' def check_db(self): response = requests.head(f\"{self.db_uri}\", auth=self.auth) return response.status_code == 200 def",
":return: file_name, file_url, upload status, upload message \"\"\" doc_id = [segment for segment",
"== 200 def create_db(self): response = requests.put(f\"{self.db_uri}\", auth=self.auth) response.raise_for_status() def save_doc(self, doc): _id",
"if segment][0] file_name = '/'.join(dst.split('/')[1:]) doc_uri = f'{self.db_uri}/{doc_id}' file_uri = f'{doc_uri}/{file_name}' response =",
"dst): \"\"\" Uploads a file using dst as the doc/bucket id :param src:",
"doc_id = [segment for segment in dst.split('/') if segment][0] file_name = '/'.join(dst.split('/')[1:]) doc_uri",
"id :param src: path to file to upload :param dst: id :return: file_name,",
"dst_file_path.startswith('/'): dst_file_path = file_path[1:] dest_path = os.path.join(dst, dst_file_path) else: dst_file_path = file_path[len(src):] if",
"if not dest_path.startswith('dump'): print('NO DUMP', is_copying_files, dst, file_path[len(src):]) # break yield file_path, dest_path",
"yield file_path, dest_path def download_file(self, url, dest): with open(dest, 'wb') as f: return",
"for (dirpath, dirs, files) in os.walk(src): for filename in files: file_path = os.path.join(dirpath,",
"= re.compile(fnmatch.translate(src)) is_copying_files = True else: regex = re.compile(fnmatch.translate(src)[:-2]) sub_regex = re.compile(src) is_copying_files",
"RUN', '' else: with open(src, 'rb') as src_fp: yield self.upload_file(src_fp, os.path.join(dst, os.path.basename(src))) elif",
"**kwargs) class URLRequired(CouchDBClientException): \"\"\"A valid URL is required.\"\"\" class BadConnectionURI(CouchDBClientException): \"\"\"A valid URL",
"True response = requests.get(f\"{self.db_uri}/_design/couchfs_views/_view/attachment_list\", params=params, auth=self.auth) response.raise_for_status() for doc in response.json()['rows']: yield '/'.join(doc['key']),",
"== 404: response = requests.post(self.db_uri, json=doc, auth=self.auth) response.raise_for_status() def parse_connection_uri(self, uri): \"\"\" Given:",
"os.path.join(dst, os.path.basename(src))) elif os.path.isdir(src): p = pathlib.Path(src).resolve() for (dirpath, dirs, files) in os.walk(src):",
"the db {_id}') client.save_doc(client.COUCHFS_VIEWS) logger(f'db is now setup') COUCHFS_VIEWS={ \"_id\": \"_design/couchfs_views\", \"views\": {",
"check_db(self): response = requests.head(f\"{self.db_uri}\", auth=self.auth) return response.status_code == 200 def create_db(self): response =",
"return file_name, f'{file_uri}', response.status_code, response.reason @classmethod def init_db(cls, logger=echo): echo('connecting to couchdb') client",
"as the doc/bucket id :param src: path to file to upload :param dst:",
"= response.json()['rev'] else: rev = response.headers['ETag'] major, _ = mimetypes.guess_type(src.name) headers = {'Content-type':",
"echo = logger.info class CouchDBClientException(Exception): def __init__(self, *args, **kwargs): super(CouchDBClientException, self).__init__(*args, **kwargs) class",
"auth=self.auth) response.raise_for_status() for doc in response.json()['rows']: yield '/'.join(doc['key']), doc['value'] def download(self, src, dst,",
"CouchDBClient: URI_ENVIRON_KEY = 'COUCHDB_URI' CONNECTION_RE = 'couchdb(s)?://((\\w+)\\:(.+)@)?([\\w\\.]+)(:(\\d+))?/(\\w+)' URI_RE = re.compile(CONNECTION_RE) def __init__(self, uri=None):",
"(doc) {\\n if (doc._attachments) {\\n for (const file_name in doc._attachments) {\\n emit((doc._id+'/'+file_name).split('/'), doc._attachments[file_name].length);\\n",
"= requests.head(f'{doc_uri}', auth=self.auth) if response.status_code == 404: response = requests.post(f'{self.db_uri}', json=dict(_id=doc_id), auth=self.auth) if",
"def create_db(self): response = requests.put(f\"{self.db_uri}\", auth=self.auth) response.raise_for_status() def save_doc(self, doc): _id = doc['_id']",
"1:] dest_path = os.path.join(dst, pp) if dry_run: yield file_path, dest_path, 'DRY RUN', ''",
"in regexs]): yield file_path, file_size def run_view(self, **args): params = {'reduce': False, 'include_docs':",
"(const file_name in doc._attachments) {\\n emit((doc._id+'/'+file_name).split('/'), doc._attachments[file_name].length);\\n }\\n} else {\\n emit(doc._id.split('/'), 0)\\n}\\n}\", \"reduce\":",
"{\\n emit((doc._id+'/'+file_name).split('/'), doc._attachments[file_name].length);\\n }\\n} else {\\n emit(doc._id.split('/'), 0)\\n}\\n}\", \"reduce\": \"_stats\" } }, \"language\":",
"logging import fnmatch import io import mimetypes import os import pathlib import re",
"dst_file_path = file_path[1:] dest_path = os.path.join(dst, dst_file_path) else: dst_file_path = file_path[len(src):] if file_path.startswith('/'):",
"src, dst): \"\"\" Uploads a file using dst as the doc/bucket id :param",
"def download(self, src, dst, dry_run=False): for src, dst in self.download_srcdst(src, dst): if dry_run:",
"required.\"\"\" class CouchDBClient: URI_ENVIRON_KEY = 'COUCHDB_URI' CONNECTION_RE = 'couchdb(s)?://((\\w+)\\:(.+)@)?([\\w\\.]+)(:(\\d+))?/(\\w+)' URI_RE = re.compile(CONNECTION_RE) def",
"dst, 'DRY RUN', '' else: with open(src, 'rb') as src_fp: yield self.upload_file(src_fp, os.path.join(dst,",
"= self.WILDCARD_RE.search(src) dst_file_path = file_path[match.span()[0]:] if dst_file_path.startswith('/'): dst_file_path = file_path[1:] dest_path = os.path.join(dst,",
"Uploads a file using dst as the doc/bucket id :param src: path to",
"os import pathlib import re import tempfile from contextlib import contextmanager import requests",
"True else: regex = re.compile(fnmatch.translate(src)[:-2]) sub_regex = re.compile(src) is_copying_files = False for file_path,",
"db = self.parse_connection_uri(uri) if userid and psswd: self.auth = (userid, psswd) else: self.auth",
"== 200: rev = response.headers['ETag'] headers = {'If-Match': rev[1:-1]} response = requests.put(doc_uri, json=doc,",
"match := self.URI_RE.match(uri): (ssl, _, userid, psswd, host, _, port, db) = match.groups()",
"else '') port = f':{port}' if port else '' return scheme, userid, psswd,",
"doc/bucket id :param src: path to file to upload :param dst: id :return:",
"chunk in r.iter_content(chunk_size=8192): if chunk: file_obj.write(chunk) @contextmanager def get_attachment(self, url, in_memory=False): try: if",
"json=doc, auth=self.auth) response.raise_for_status() def parse_connection_uri(self, uri): \"\"\" Given: 'couchdb://admin:*****%@127.0.0.1:5984/test' -> http://127.0.0.1:5984/ :param uri:",
"= client.COUCHFS_VIEWS['_id'] logger(f'creating or updating the db {_id}') client.save_doc(client.COUCHFS_VIEWS) logger(f'db is now setup')",
"headers = {'Content-type': f'{major}', 'If-Match': rev[1:-1]} response = requests.put(f'{file_uri}', data=src, headers=headers, auth=self.auth) response.raise_for_status()",
"os.path.join(dst, dst_file_path) else: dst_file_path = file_path[len(src):] if file_path.startswith('/'): dst_file_path = dst_file_path[1:] dest_path =",
"= 'http' + ('s' if ssl else '') port = f':{port}' if port",
"if 'depth' in args: params['group_level'] = args['depth'] params['reduce'] = True response = requests.get(f\"{self.db_uri}/_design/couchfs_views/_view/attachment_list\",",
"self.db = db self.db_uri = f'{scheme}://{host}{port}/{self.db}' def check_db(self): response = requests.head(f\"{self.db_uri}\", auth=self.auth) return",
"fp) fp.close() yield open(fp.name, 'rb') finally: if in_memory: bytes_fp.close() else: os.unlink(fp.name) def get_attachment_as_bytes(self,",
"auth=self.auth) if response.status_code == 404: response = requests.post(f'{self.db_uri}', json=dict(_id=doc_id), auth=self.auth) if response.status_code !=",
"\"\"\"Main module.\"\"\" import logging import fnmatch import io import mimetypes import os import",
"logger('creating the db') client.create_db() _id = client.COUCHFS_VIEWS['_id'] logger(f'creating or updating the db {_id}')",
"_ = mimetypes.guess_type(src.name) headers = {'Content-type': f'{major}', 'If-Match': rev[1:-1]} response = requests.put(f'{file_uri}', data=src,",
"import requests logger = logging.getLogger(__file__) echo = logger.info class CouchDBClientException(Exception): def __init__(self, *args,",
"self.WILDCARD_RE.search(pattern): regex = re.compile(fnmatch.translate(pattern)) else: regex = re.compile(fnmatch.translate(pattern)[:-2]) regexs.append(regex) for file_path, file_size in",
"environment varialble {key}') scheme, userid, psswd, host, port, db = self.parse_connection_uri(uri) if userid",
"\"\"\" Uploads a file using dst as the doc/bucket id :param src: path",
"url): return requests.get(url, stream=True, auth=self.auth).content def upload(self, src, dst, dry_run=False): src = os.path.abspath(src)",
"= file_path[len(p.parent.as_posix()) + 1:] dest_path = os.path.join(dst, pp) if dry_run: yield file_path, dest_path,",
"auth=self.auth).content def upload(self, src, dst, dry_run=False): src = os.path.abspath(src) if os.path.isfile(src): if dry_run:",
"os.path.isfile(src): if dry_run: yield src, dst, 'DRY RUN', '' else: with open(src, 'rb')",
"in dst.split('/') if segment][0] file_name = '/'.join(dst.split('/')[1:]) doc_uri = f'{self.db_uri}/{doc_id}' file_uri = f'{doc_uri}/{file_name}'",
"in_memory=False): try: if in_memory: bytes_fp = io.BytesIO() self.download_to_file(url, bytes_fp) yield bytes_fp.getvalue() else: fp",
"f'{file_uri}', response.status_code, response.reason @classmethod def init_db(cls, logger=echo): echo('connecting to couchdb') client = cls()",
"return file_name, f'{file_uri}', response.status_code, response.reason rev = response.json()['rev'] else: rev = response.headers['ETag'] major,",
"\"function (doc) {\\n if (doc._attachments) {\\n for (const file_name in doc._attachments) {\\n emit((doc._id+'/'+file_name).split('/'),",
"psswd, host, port, db else: raise BadConnectionURI(f'use a connections like {self.CONNECTION_RE}') def list_attachments(self,",
"psswd, host, port, db = self.parse_connection_uri(uri) if userid and psswd: self.auth = (userid,",
"file_path, file_size def run_view(self, **args): params = {'reduce': False, 'include_docs': False} if 'depth'",
"headers=headers, auth=self.auth) elif response.status_code == 404: response = requests.post(self.db_uri, json=doc, auth=self.auth) response.raise_for_status() def",
"COUCHFS_VIEWS={ \"_id\": \"_design/couchfs_views\", \"views\": { \"attachment_list\": { \"map\": \"function (doc) {\\n if (doc._attachments)",
"file_path.startswith('/'): dst_file_path = dst_file_path[1:] dest_path = os.path.join(dst, dst_file_path[1:]) if not dest_path.startswith('dump'): print('NO DUMP',",
"requests.head(f'{doc_uri}', auth=self.auth) if response.status_code == 404: response = requests.post(f'{self.db_uri}', json=dict(_id=doc_id), auth=self.auth) if response.status_code",
"import tempfile from contextlib import contextmanager import requests logger = logging.getLogger(__file__) echo =",
"couchdb attachments \"\"\" \"\"\"Main module.\"\"\" import logging import fnmatch import io import mimetypes",
"self.auth = None self.db = db self.db_uri = f'{scheme}://{host}{port}/{self.db}' def check_db(self): response =",
"'couchdb://admin:*****%@127.0.0.1:5984/test' -> http://127.0.0.1:5984/ :param uri: :return {host, db, auth, passwd}: \"\"\" if match",
"psswd: self.auth = (userid, psswd) else: self.auth = None self.db = db self.db_uri",
"scheme = 'http' + ('s' if ssl else '') port = f':{port}' if",
"URLRequired(f'You can set environment varialble {key}') scheme, userid, psswd, host, port, db =",
"\"\"\"A valid URL is required.\"\"\" class BadConnectionURI(CouchDBClientException): \"\"\"A valid URL is required.\"\"\" class",
"regexs or any([regex.search(file_path) for regex in regexs]): yield file_path, file_size def run_view(self, **args):",
"404: response = requests.post(f'{self.db_uri}', json=dict(_id=doc_id), auth=self.auth) if response.status_code != 201: return file_name, f'{file_uri}',",
"dst, dry_run=False): src = os.path.abspath(src) if os.path.isfile(src): if dry_run: yield src, dst, 'DRY",
"port, db) = match.groups() scheme = 'http' + ('s' if ssl else '')",
"yield src, dst, 'DRY RUN', response else: uri = f'{self.db_uri}/{src}' response = requests.get(uri,",
"[] for pattern in patterns: if self.WILDCARD_RE.search(pattern): regex = re.compile(fnmatch.translate(pattern)) else: regex =",
"response = requests.get(uri, auth=self.auth) yield uri, dst, response.status_code, response.reason WILDCARD_RE = re.compile('[\\*\\?\\[\\]]+') def",
"os.path.abspath(src) if os.path.isfile(src): if dry_run: yield src, dst, 'DRY RUN', '' else: with",
"dst, 'DRY RUN', response else: uri = f'{self.db_uri}/{src}' response = requests.get(uri, auth=self.auth) yield",
"= tempfile.NamedTemporaryFile(delete=False) self.download_to_file(url, fp) fp.close() yield open(fp.name, 'rb') finally: if in_memory: bytes_fp.close() else:",
"def run_view(self, **args): params = {'reduce': False, 'include_docs': False} if 'depth' in args:",
"dest_path = os.path.join(dst, dst_file_path[1:]) if not dest_path.startswith('dump'): print('NO DUMP', is_copying_files, dst, file_path[len(src):]) #",
"for segment in dst.split('/') if segment][0] file_name = '/'.join(dst.split('/')[1:]) doc_uri = f'{self.db_uri}/{doc_id}' file_uri",
"response.reason @classmethod def init_db(cls, logger=echo): echo('connecting to couchdb') client = cls() logger('checking the",
"valid URL is required.\"\"\" class BadConnectionURI(CouchDBClientException): \"\"\"A valid URL is required.\"\"\" class CouchDBClient:",
"{'Content-type': f'{major}', 'If-Match': rev[1:-1]} response = requests.put(f'{file_uri}', data=src, headers=headers, auth=self.auth) response.raise_for_status() return file_name,",
"raise URLRequired(f'You can set environment varialble {key}') scheme, userid, psswd, host, port, db",
"if in_memory: bytes_fp.close() else: os.unlink(fp.name) def get_attachment_as_bytes(self, url): return requests.get(url, stream=True, auth=self.auth).content def",
"= io.BytesIO() self.download_to_file(url, bytes_fp) yield bytes_fp.getvalue() else: fp = tempfile.NamedTemporaryFile(delete=False) self.download_to_file(url, fp) fp.close()",
"\"\"\" \"\"\"Main module.\"\"\" import logging import fnmatch import io import mimetypes import os",
"download(self, src, dst, dry_run=False): for src, dst in self.download_srcdst(src, dst): if dry_run: yield",
"def __init__(self, *args, **kwargs): super(CouchDBClientException, self).__init__(*args, **kwargs) class URLRequired(CouchDBClientException): \"\"\"A valid URL is",
"= 'COUCHDB_URI' CONNECTION_RE = 'couchdb(s)?://((\\w+)\\:(.+)@)?([\\w\\.]+)(:(\\d+))?/(\\w+)' URI_RE = re.compile(CONNECTION_RE) def __init__(self, uri=None): if uri",
"port, db else: raise BadConnectionURI(f'use a connections like {self.CONNECTION_RE}') def list_attachments(self, *patterns): regexs",
"if os.path.isfile(src): if dry_run: yield src, dst, 'DRY RUN', '' else: with open(src,",
"('s' if ssl else '') port = f':{port}' if port else '' return",
"= os.path.basename(dst) src_fp.write(src_bytes) return self.upload_file(src_fp, dst) def upload_file(self, src, dst): \"\"\" Uploads a",
"file_name, f'{file_uri}', response.status_code, response.reason rev = response.json()['rev'] else: rev = response.headers['ETag'] major, _",
"to file to upload :param dst: id :return: file_name, file_url, upload status, upload",
"if dry_run: yield src, dst, 'DRY RUN', '' else: with open(src, 'rb') as",
"= file_path[1:] dest_path = os.path.join(dst, dst_file_path) else: dst_file_path = file_path[len(src):] if file_path.startswith('/'): dst_file_path",
"segment in dst.split('/') if segment][0] file_name = '/'.join(dst.split('/')[1:]) doc_uri = f'{self.db_uri}/{doc_id}' file_uri =",
"is required.\"\"\" class BadConnectionURI(CouchDBClientException): \"\"\"A valid URL is required.\"\"\" class CouchDBClient: URI_ENVIRON_KEY =",
"= response.headers['ETag'] headers = {'If-Match': rev[1:-1]} response = requests.put(doc_uri, json=doc, headers=headers, auth=self.auth) elif",
"\"\"\"A valid URL is required.\"\"\" class CouchDBClient: URI_ENVIRON_KEY = 'COUCHDB_URI' CONNECTION_RE = 'couchdb(s)?://((\\w+)\\:(.+)@)?([\\w\\.]+)(:(\\d+))?/(\\w+)'",
"{'reduce': False, 'include_docs': False} if 'depth' in args: params['group_level'] = args['depth'] params['reduce'] =",
"200 def create_db(self): response = requests.put(f\"{self.db_uri}\", auth=self.auth) response.raise_for_status() def save_doc(self, doc): _id =",
"else: fp = tempfile.NamedTemporaryFile(delete=False) self.download_to_file(url, fp) fp.close() yield open(fp.name, 'rb') finally: if in_memory:",
"\"_id\": \"_design/couchfs_views\", \"views\": { \"attachment_list\": { \"map\": \"function (doc) {\\n if (doc._attachments) {\\n",
"url, in_memory=False): try: if in_memory: bytes_fp = io.BytesIO() self.download_to_file(url, bytes_fp) yield bytes_fp.getvalue() else:",
"is required.\"\"\" class CouchDBClient: URI_ENVIRON_KEY = 'COUCHDB_URI' CONNECTION_RE = 'couchdb(s)?://((\\w+)\\:(.+)@)?([\\w\\.]+)(:(\\d+))?/(\\w+)' URI_RE = re.compile(CONNECTION_RE)",
"def save_doc(self, doc): _id = doc['_id'] doc_uri = f'{self.db_uri}/{_id}' response = requests.head(doc_uri, auth=self.auth)",
"yield file_path, dest_path, 'DRY RUN', '' else: yield self.upload_file(file_path, dest_path) def upload_bytes_file(self, src_bytes,",
"io import mimetypes import os import pathlib import re import tempfile from contextlib",
"db self.db_uri = f'{scheme}://{host}{port}/{self.db}' def check_db(self): response = requests.head(f\"{self.db_uri}\", auth=self.auth) return response.status_code ==",
"re.compile(fnmatch.translate(src)[:-2]) sub_regex = re.compile(src) is_copying_files = False for file_path, _ in self.ls(): if",
"\"_design/couchfs_views\", \"views\": { \"attachment_list\": { \"map\": \"function (doc) {\\n if (doc._attachments) {\\n for",
"dry_run=False): for src, dst in self.download_srcdst(src, dst): if dry_run: yield src, dst, 'DRY",
"= f':{port}' if port else '' return scheme, userid, psswd, host, port, db",
"f'{doc_uri}/{file_name}' response = requests.head(f'{doc_uri}', auth=self.auth) if response.status_code == 404: response = requests.post(f'{self.db_uri}', json=dict(_id=doc_id),",
"response.status_code, response.reason @classmethod def init_db(cls, logger=echo): echo('connecting to couchdb') client = cls() logger('checking",
"logger(f'creating or updating the db {_id}') client.save_doc(client.COUCHFS_VIEWS) logger(f'db is now setup') COUCHFS_VIEWS={ \"_id\":",
"break yield file_path, dest_path def download_file(self, url, dest): with open(dest, 'wb') as f:",
"file_path[1:] dest_path = os.path.join(dst, dst_file_path) else: dst_file_path = file_path[len(src):] if file_path.startswith('/'): dst_file_path =",
"to couchdb') client = cls() logger('checking the db') if not client.check_db(): logger('creating the",
"os.path.isdir(src): p = pathlib.Path(src).resolve() for (dirpath, dirs, files) in os.walk(src): for filename in",
"not dest_path.startswith('dump'): print('NO DUMP', is_copying_files, dst, file_path[len(src):]) # break yield file_path, dest_path def",
":param src: path to file to upload :param dst: id :return: file_name, file_url,",
"DUMP', is_copying_files, dst, file_path[len(src):]) # break yield file_path, dest_path def download_file(self, url, dest):",
"import pathlib import re import tempfile from contextlib import contextmanager import requests logger",
"else: uri = f'{self.db_uri}/{src}' response = requests.get(uri, auth=self.auth) yield uri, dst, response.status_code, response.reason",
"src_bytes, dst): with tempfile.NamedTemporaryFile() as src_fp: src_fp.name = os.path.basename(dst) src_fp.write(src_bytes) return self.upload_file(src_fp, dst)",
"f'{self.db_uri}/{doc_id}' file_uri = f'{doc_uri}/{file_name}' response = requests.head(f'{doc_uri}', auth=self.auth) if response.status_code == 404: response",
"response.status_code == 404: response = requests.post(f'{self.db_uri}', json=dict(_id=doc_id), auth=self.auth) if response.status_code != 201: return",
"in response.json()['rows']: yield '/'.join(doc['key']), doc['value'] def download(self, src, dst, dry_run=False): for src, dst",
"fnmatch import io import mimetypes import os import pathlib import re import tempfile",
"dst_file_path) else: dst_file_path = file_path[len(src):] if file_path.startswith('/'): dst_file_path = dst_file_path[1:] dest_path = os.path.join(dst,",
"dest_path = os.path.join(dst, dst_file_path) else: dst_file_path = file_path[len(src):] if file_path.startswith('/'): dst_file_path = dst_file_path[1:]",
"userid and psswd: self.auth = (userid, psswd) else: self.auth = None self.db =",
"file_path, file_size in self.run_view(): if not regexs or any([regex.search(file_path) for regex in regexs]):",
"'' else: yield self.upload_file(file_path, dest_path) def upload_bytes_file(self, src_bytes, dst): with tempfile.NamedTemporaryFile() as src_fp:",
"not regexs or any([regex.search(file_path) for regex in regexs]): yield file_path, file_size def run_view(self,",
"response = requests.head(f\"{self.db_uri}\", auth=self.auth) return response.status_code == 200 def create_db(self): response = requests.put(f\"{self.db_uri}\",",
"client.COUCHFS_VIEWS['_id'] logger(f'creating or updating the db {_id}') client.save_doc(client.COUCHFS_VIEWS) logger(f'db is now setup') COUCHFS_VIEWS={",
"= [] for pattern in patterns: if self.WILDCARD_RE.search(pattern): regex = re.compile(fnmatch.translate(pattern)) else: regex",
"response.status_code, response.reason WILDCARD_RE = re.compile('[\\*\\?\\[\\]]+') def download_srcdst(self, src, dst, dry_run=False): if match :=",
"filename in files: file_path = os.path.join(dirpath, filename) pp = file_path[len(p.parent.as_posix()) + 1:] dest_path",
"def __init__(self, uri=None): if uri is None: uri = os.environ.get(self.URI_ENVIRON_KEY) if not uri:",
"dst: id :return: file_name, file_url, upload status, upload message \"\"\" doc_id = [segment",
"requests.get(f\"{self.db_uri}/_design/couchfs_views/_view/attachment_list\", params=params, auth=self.auth) response.raise_for_status() for doc in response.json()['rows']: yield '/'.join(doc['key']), doc['value'] def download(self,",
"self.db_uri = f'{scheme}://{host}{port}/{self.db}' def check_db(self): response = requests.head(f\"{self.db_uri}\", auth=self.auth) return response.status_code == 200",
"self.download_to_file(url, fp) fp.close() yield open(fp.name, 'rb') finally: if in_memory: bytes_fp.close() else: os.unlink(fp.name) def",
"elif os.path.isdir(src): p = pathlib.Path(src).resolve() for (dirpath, dirs, files) in os.walk(src): for filename",
"URL is required.\"\"\" class BadConnectionURI(CouchDBClientException): \"\"\"A valid URL is required.\"\"\" class CouchDBClient: URI_ENVIRON_KEY",
"src: path to file to upload :param dst: id :return: file_name, file_url, upload",
"host, port, db else: raise BadConnectionURI(f'use a connections like {self.CONNECTION_RE}') def list_attachments(self, *patterns):",
"is None: uri = os.environ.get(self.URI_ENVIRON_KEY) if not uri: key = self.URI_ENVIRON_KEY raise URLRequired(f'You",
"status, upload message \"\"\" doc_id = [segment for segment in dst.split('/') if segment][0]",
"@classmethod def init_db(cls, logger=echo): echo('connecting to couchdb') client = cls() logger('checking the db')",
"path to file to upload :param dst: id :return: file_name, file_url, upload status,",
"auth=self.auth) if response.status_code == 200: rev = response.headers['ETag'] headers = {'If-Match': rev[1:-1]} response",
"dest): with open(dest, 'wb') as f: return self.download_to_file(url, f) def download_to_file(self, url, file_obj):",
"sub_regex = re.compile(src) is_copying_files = False for file_path, _ in self.ls(): if regex.search(file_path):",
"in self.ls(): if regex.search(file_path): if is_copying_files: match = self.WILDCARD_RE.search(src) dst_file_path = file_path[match.span()[0]:] if",
"r.raise_for_status() for chunk in r.iter_content(chunk_size=8192): if chunk: file_obj.write(chunk) @contextmanager def get_attachment(self, url, in_memory=False):",
"is now setup') COUCHFS_VIEWS={ \"_id\": \"_design/couchfs_views\", \"views\": { \"attachment_list\": { \"map\": \"function (doc)",
"\"map\": \"function (doc) {\\n if (doc._attachments) {\\n for (const file_name in doc._attachments) {\\n",
"response.status_code == 404: response = requests.post(self.db_uri, json=doc, auth=self.auth) response.raise_for_status() def parse_connection_uri(self, uri): \"\"\"",
"!= 201: return file_name, f'{file_uri}', response.status_code, response.reason rev = response.json()['rev'] else: rev =",
"dst_file_path[1:] dest_path = os.path.join(dst, dst_file_path[1:]) if not dest_path.startswith('dump'): print('NO DUMP', is_copying_files, dst, file_path[len(src):])",
"else: os.unlink(fp.name) def get_attachment_as_bytes(self, url): return requests.get(url, stream=True, auth=self.auth).content def upload(self, src, dst,",
"fp.close() yield open(fp.name, 'rb') finally: if in_memory: bytes_fp.close() else: os.unlink(fp.name) def get_attachment_as_bytes(self, url):",
"echo('connecting to couchdb') client = cls() logger('checking the db') if not client.check_db(): logger('creating",
"'DRY RUN', '' else: with open(src, 'rb') as src_fp: yield self.upload_file(src_fp, os.path.join(dst, os.path.basename(src)))",
"if not uri: key = self.URI_ENVIRON_KEY raise URLRequired(f'You can set environment varialble {key}')",
"match := self.WILDCARD_RE.search(src): regex = re.compile(fnmatch.translate(src)) is_copying_files = True else: regex = re.compile(fnmatch.translate(src)[:-2])",
"= requests.post(self.db_uri, json=doc, auth=self.auth) response.raise_for_status() def parse_connection_uri(self, uri): \"\"\" Given: 'couchdb://admin:*****%@127.0.0.1:5984/test' -> http://127.0.0.1:5984/",
"self.URI_ENVIRON_KEY raise URLRequired(f'You can set environment varialble {key}') scheme, userid, psswd, host, port,",
"False} if 'depth' in args: params['group_level'] = args['depth'] params['reduce'] = True response =",
"*patterns): regexs = [] for pattern in patterns: if self.WILDCARD_RE.search(pattern): regex = re.compile(fnmatch.translate(pattern))",
"in patterns: if self.WILDCARD_RE.search(pattern): regex = re.compile(fnmatch.translate(pattern)) else: regex = re.compile(fnmatch.translate(pattern)[:-2]) regexs.append(regex) for",
"requests.get(url, stream=True, auth=self.auth).content def upload(self, src, dst, dry_run=False): src = os.path.abspath(src) if os.path.isfile(src):",
"def check_db(self): response = requests.head(f\"{self.db_uri}\", auth=self.auth) return response.status_code == 200 def create_db(self): response",
"file_size in self.run_view(): if not regexs or any([regex.search(file_path) for regex in regexs]): yield",
"userid, psswd, host, port, db else: raise BadConnectionURI(f'use a connections like {self.CONNECTION_RE}') def",
"pathlib.Path(src).resolve() for (dirpath, dirs, files) in os.walk(src): for filename in files: file_path =",
"uri: :return {host, db, auth, passwd}: \"\"\" if match := self.URI_RE.match(uri): (ssl, _,",
"requests.get(url, stream=True, auth=self.auth) as r: r.raise_for_status() for chunk in r.iter_content(chunk_size=8192): if chunk: file_obj.write(chunk)",
"'/'.join(dst.split('/')[1:]) doc_uri = f'{self.db_uri}/{doc_id}' file_uri = f'{doc_uri}/{file_name}' response = requests.head(f'{doc_uri}', auth=self.auth) if response.status_code",
"= [segment for segment in dst.split('/') if segment][0] file_name = '/'.join(dst.split('/')[1:]) doc_uri =",
"requests.put(f'{file_uri}', data=src, headers=headers, auth=self.auth) response.raise_for_status() return file_name, f'{file_uri}', response.status_code, response.reason @classmethod def init_db(cls,",
"\"\"\" doc_id = [segment for segment in dst.split('/') if segment][0] file_name = '/'.join(dst.split('/')[1:])",
"init_db(cls, logger=echo): echo('connecting to couchdb') client = cls() logger('checking the db') if not",
"if dst_file_path.startswith('/'): dst_file_path = file_path[1:] dest_path = os.path.join(dst, dst_file_path) else: dst_file_path = file_path[len(src):]",
"upload :param dst: id :return: file_name, file_url, upload status, upload message \"\"\" doc_id",
"client = cls() logger('checking the db') if not client.check_db(): logger('creating the db') client.create_db()",
"db') if not client.check_db(): logger('creating the db') client.create_db() _id = client.COUCHFS_VIEWS['_id'] logger(f'creating or",
"ssl else '') port = f':{port}' if port else '' return scheme, userid,",
"for pattern in patterns: if self.WILDCARD_RE.search(pattern): regex = re.compile(fnmatch.translate(pattern)) else: regex = re.compile(fnmatch.translate(pattern)[:-2])",
"rev = response.headers['ETag'] major, _ = mimetypes.guess_type(src.name) headers = {'Content-type': f'{major}', 'If-Match': rev[1:-1]}",
"to upload :param dst: id :return: file_name, file_url, upload status, upload message \"\"\"",
"regex = re.compile(fnmatch.translate(pattern)[:-2]) regexs.append(regex) for file_path, file_size in self.run_view(): if not regexs or",
"else: with open(src, 'rb') as src_fp: yield self.upload_file(src_fp, os.path.join(dst, os.path.basename(src))) elif os.path.isdir(src): p",
"port = f':{port}' if port else '' return scheme, userid, psswd, host, port,",
"as src_fp: yield self.upload_file(src_fp, os.path.join(dst, os.path.basename(src))) elif os.path.isdir(src): p = pathlib.Path(src).resolve() for (dirpath,",
"file_path[len(p.parent.as_posix()) + 1:] dest_path = os.path.join(dst, pp) if dry_run: yield file_path, dest_path, 'DRY",
"URL is required.\"\"\" class CouchDBClient: URI_ENVIRON_KEY = 'COUCHDB_URI' CONNECTION_RE = 'couchdb(s)?://((\\w+)\\:(.+)@)?([\\w\\.]+)(:(\\d+))?/(\\w+)' URI_RE =",
"file_path, dest_path def download_file(self, url, dest): with open(dest, 'wb') as f: return self.download_to_file(url,",
"yield uri, dst, response.status_code, response.reason WILDCARD_RE = re.compile('[\\*\\?\\[\\]]+') def download_srcdst(self, src, dst, dry_run=False):",
"response = requests.head(doc_uri, auth=self.auth) if response.status_code == 200: rev = response.headers['ETag'] headers =",
"_id = client.COUCHFS_VIEWS['_id'] logger(f'creating or updating the db {_id}') client.save_doc(client.COUCHFS_VIEWS) logger(f'db is now",
"r.iter_content(chunk_size=8192): if chunk: file_obj.write(chunk) @contextmanager def get_attachment(self, url, in_memory=False): try: if in_memory: bytes_fp",
"= self.parse_connection_uri(uri) if userid and psswd: self.auth = (userid, psswd) else: self.auth =",
"import logging import fnmatch import io import mimetypes import os import pathlib import",
"parse_connection_uri(self, uri): \"\"\" Given: 'couchdb://admin:*****%@127.0.0.1:5984/test' -> http://127.0.0.1:5984/ :param uri: :return {host, db, auth,",
"= '/'.join(dst.split('/')[1:]) doc_uri = f'{self.db_uri}/{doc_id}' file_uri = f'{doc_uri}/{file_name}' response = requests.head(f'{doc_uri}', auth=self.auth) if",
"def download_srcdst(self, src, dst, dry_run=False): if match := self.WILDCARD_RE.search(src): regex = re.compile(fnmatch.translate(src)) is_copying_files",
"= logging.getLogger(__file__) echo = logger.info class CouchDBClientException(Exception): def __init__(self, *args, **kwargs): super(CouchDBClientException, self).__init__(*args,",
"if file_path.startswith('/'): dst_file_path = dst_file_path[1:] dest_path = os.path.join(dst, dst_file_path[1:]) if not dest_path.startswith('dump'): print('NO",
"dry_run=False): if match := self.WILDCARD_RE.search(src): regex = re.compile(fnmatch.translate(src)) is_copying_files = True else: regex",
"filename) pp = file_path[len(p.parent.as_posix()) + 1:] dest_path = os.path.join(dst, pp) if dry_run: yield",
"yield open(fp.name, 'rb') finally: if in_memory: bytes_fp.close() else: os.unlink(fp.name) def get_attachment_as_bytes(self, url): return",
"yield self.upload_file(src_fp, os.path.join(dst, os.path.basename(src))) elif os.path.isdir(src): p = pathlib.Path(src).resolve() for (dirpath, dirs, files)",
"= {'If-Match': rev[1:-1]} response = requests.put(doc_uri, json=doc, headers=headers, auth=self.auth) elif response.status_code == 404:",
"uri=None): if uri is None: uri = os.environ.get(self.URI_ENVIRON_KEY) if not uri: key =",
"requests.head(f\"{self.db_uri}\", auth=self.auth) return response.status_code == 200 def create_db(self): response = requests.put(f\"{self.db_uri}\", auth=self.auth) response.raise_for_status()",
"psswd, host, _, port, db) = match.groups() scheme = 'http' + ('s' if",
"is_copying_files = True else: regex = re.compile(fnmatch.translate(src)[:-2]) sub_regex = re.compile(src) is_copying_files = False",
"= requests.put(doc_uri, json=doc, headers=headers, auth=self.auth) elif response.status_code == 404: response = requests.post(self.db_uri, json=doc,",
"auth, passwd}: \"\"\" if match := self.URI_RE.match(uri): (ssl, _, userid, psswd, host, _,",
"= True else: regex = re.compile(fnmatch.translate(src)[:-2]) sub_regex = re.compile(src) is_copying_files = False for",
"bytes_fp) yield bytes_fp.getvalue() else: fp = tempfile.NamedTemporaryFile(delete=False) self.download_to_file(url, fp) fp.close() yield open(fp.name, 'rb')",
"open(src, 'rb') as src_fp: yield self.upload_file(src_fp, os.path.join(dst, os.path.basename(src))) elif os.path.isdir(src): p = pathlib.Path(src).resolve()",
"now setup') COUCHFS_VIEWS={ \"_id\": \"_design/couchfs_views\", \"views\": { \"attachment_list\": { \"map\": \"function (doc) {\\n",
"response.headers['ETag'] major, _ = mimetypes.guess_type(src.name) headers = {'Content-type': f'{major}', 'If-Match': rev[1:-1]} response =",
"= 'couchdb(s)?://((\\w+)\\:(.+)@)?([\\w\\.]+)(:(\\d+))?/(\\w+)' URI_RE = re.compile(CONNECTION_RE) def __init__(self, uri=None): if uri is None: uri",
"def parse_connection_uri(self, uri): \"\"\" Given: 'couchdb://admin:*****%@127.0.0.1:5984/test' -> http://127.0.0.1:5984/ :param uri: :return {host, db,",
"if in_memory: bytes_fp = io.BytesIO() self.download_to_file(url, bytes_fp) yield bytes_fp.getvalue() else: fp = tempfile.NamedTemporaryFile(delete=False)",
"file_name = '/'.join(dst.split('/')[1:]) doc_uri = f'{self.db_uri}/{doc_id}' file_uri = f'{doc_uri}/{file_name}' response = requests.head(f'{doc_uri}', auth=self.auth)",
"= os.environ.get(self.URI_ENVIRON_KEY) if not uri: key = self.URI_ENVIRON_KEY raise URLRequired(f'You can set environment",
"doc._attachments[file_name].length);\\n }\\n} else {\\n emit(doc._id.split('/'), 0)\\n}\\n}\", \"reduce\": \"_stats\" } }, \"language\": \"javascript\" }",
"auth=self.auth) elif response.status_code == 404: response = requests.post(self.db_uri, json=doc, auth=self.auth) response.raise_for_status() def parse_connection_uri(self,",
"\"\"\" A client api for couchdb attachments \"\"\" \"\"\"Main module.\"\"\" import logging import",
"(userid, psswd) else: self.auth = None self.db = db self.db_uri = f'{scheme}://{host}{port}/{self.db}' def",
"URI_RE = re.compile(CONNECTION_RE) def __init__(self, uri=None): if uri is None: uri = os.environ.get(self.URI_ENVIRON_KEY)",
"yield file_path, file_size def run_view(self, **args): params = {'reduce': False, 'include_docs': False} if",
"src, dst, 'DRY RUN', '' else: with open(src, 'rb') as src_fp: yield self.upload_file(src_fp,",
"regexs = [] for pattern in patterns: if self.WILDCARD_RE.search(pattern): regex = re.compile(fnmatch.translate(pattern)) else:",
"if response.status_code != 201: return file_name, f'{file_uri}', response.status_code, response.reason rev = response.json()['rev'] else:",
"*args, **kwargs): super(CouchDBClientException, self).__init__(*args, **kwargs) class URLRequired(CouchDBClientException): \"\"\"A valid URL is required.\"\"\" class",
"**args): params = {'reduce': False, 'include_docs': False} if 'depth' in args: params['group_level'] =",
"a connections like {self.CONNECTION_RE}') def list_attachments(self, *patterns): regexs = [] for pattern in",
"re.compile('[\\*\\?\\[\\]]+') def download_srcdst(self, src, dst, dry_run=False): if match := self.WILDCARD_RE.search(src): regex = re.compile(fnmatch.translate(src))",
"updating the db {_id}') client.save_doc(client.COUCHFS_VIEWS) logger(f'db is now setup') COUCHFS_VIEWS={ \"_id\": \"_design/couchfs_views\", \"views\":",
"URI_ENVIRON_KEY = 'COUCHDB_URI' CONNECTION_RE = 'couchdb(s)?://((\\w+)\\:(.+)@)?([\\w\\.]+)(:(\\d+))?/(\\w+)' URI_RE = re.compile(CONNECTION_RE) def __init__(self, uri=None): if",
"the db') client.create_db() _id = client.COUCHFS_VIEWS['_id'] logger(f'creating or updating the db {_id}') client.save_doc(client.COUCHFS_VIEWS)",
"file_name in doc._attachments) {\\n emit((doc._id+'/'+file_name).split('/'), doc._attachments[file_name].length);\\n }\\n} else {\\n emit(doc._id.split('/'), 0)\\n}\\n}\", \"reduce\": \"_stats\"",
"params['group_level'] = args['depth'] params['reduce'] = True response = requests.get(f\"{self.db_uri}/_design/couchfs_views/_view/attachment_list\", params=params, auth=self.auth) response.raise_for_status() for",
"os.path.basename(dst) src_fp.write(src_bytes) return self.upload_file(src_fp, dst) def upload_file(self, src, dst): \"\"\" Uploads a file",
"with open(src, 'rb') as src_fp: yield self.upload_file(src_fp, os.path.join(dst, os.path.basename(src))) elif os.path.isdir(src): p =",
"db') client.create_db() _id = client.COUCHFS_VIEWS['_id'] logger(f'creating or updating the db {_id}') client.save_doc(client.COUCHFS_VIEWS) logger(f'db",
"module.\"\"\" import logging import fnmatch import io import mimetypes import os import pathlib",
"'DRY RUN', '' else: yield self.upload_file(file_path, dest_path) def upload_bytes_file(self, src_bytes, dst): with tempfile.NamedTemporaryFile()",
"CouchDBClientException(Exception): def __init__(self, *args, **kwargs): super(CouchDBClientException, self).__init__(*args, **kwargs) class URLRequired(CouchDBClientException): \"\"\"A valid URL",
"if match := self.URI_RE.match(uri): (ssl, _, userid, psswd, host, _, port, db) =",
"varialble {key}') scheme, userid, psswd, host, port, db = self.parse_connection_uri(uri) if userid and",
"client.create_db() _id = client.COUCHFS_VIEWS['_id'] logger(f'creating or updating the db {_id}') client.save_doc(client.COUCHFS_VIEWS) logger(f'db is",
"else: self.auth = None self.db = db self.db_uri = f'{scheme}://{host}{port}/{self.db}' def check_db(self): response",
"{ \"attachment_list\": { \"map\": \"function (doc) {\\n if (doc._attachments) {\\n for (const file_name",
"id :return: file_name, file_url, upload status, upload message \"\"\" doc_id = [segment for",
"file_name, file_url, upload status, upload message \"\"\" doc_id = [segment for segment in",
"@contextmanager def get_attachment(self, url, in_memory=False): try: if in_memory: bytes_fp = io.BytesIO() self.download_to_file(url, bytes_fp)",
"else '' return scheme, userid, psswd, host, port, db else: raise BadConnectionURI(f'use a",
"(doc._attachments) {\\n for (const file_name in doc._attachments) {\\n emit((doc._id+'/'+file_name).split('/'), doc._attachments[file_name].length);\\n }\\n} else {\\n",
"uri): \"\"\" Given: 'couchdb://admin:*****%@127.0.0.1:5984/test' -> http://127.0.0.1:5984/ :param uri: :return {host, db, auth, passwd}:",
"if self.WILDCARD_RE.search(pattern): regex = re.compile(fnmatch.translate(pattern)) else: regex = re.compile(fnmatch.translate(pattern)[:-2]) regexs.append(regex) for file_path, file_size",
"uri = f'{self.db_uri}/{src}' response = requests.get(uri, auth=self.auth) yield uri, dst, response.status_code, response.reason WILDCARD_RE",
"import mimetypes import os import pathlib import re import tempfile from contextlib import",
"A client api for couchdb attachments \"\"\" \"\"\"Main module.\"\"\" import logging import fnmatch",
"dst.split('/') if segment][0] file_name = '/'.join(dst.split('/')[1:]) doc_uri = f'{self.db_uri}/{doc_id}' file_uri = f'{doc_uri}/{file_name}' response",
"file_name, f'{file_uri}', response.status_code, response.reason @classmethod def init_db(cls, logger=echo): echo('connecting to couchdb') client =",
"dst, dry_run=False): if match := self.WILDCARD_RE.search(src): regex = re.compile(fnmatch.translate(src)) is_copying_files = True else:",
"def download_to_file(self, url, file_obj): with requests.get(url, stream=True, auth=self.auth) as r: r.raise_for_status() for chunk",
"'couchdb(s)?://((\\w+)\\:(.+)@)?([\\w\\.]+)(:(\\d+))?/(\\w+)' URI_RE = re.compile(CONNECTION_RE) def __init__(self, uri=None): if uri is None: uri =",
"if port else '' return scheme, userid, psswd, host, port, db else: raise",
"'DRY RUN', response else: uri = f'{self.db_uri}/{src}' response = requests.get(uri, auth=self.auth) yield uri,",
"src_fp.write(src_bytes) return self.upload_file(src_fp, dst) def upload_file(self, src, dst): \"\"\" Uploads a file using",
"or updating the db {_id}') client.save_doc(client.COUCHFS_VIEWS) logger(f'db is now setup') COUCHFS_VIEWS={ \"_id\": \"_design/couchfs_views\",",
"dirs, files) in os.walk(src): for filename in files: file_path = os.path.join(dirpath, filename) pp",
"uri, dst, response.status_code, response.reason WILDCARD_RE = re.compile('[\\*\\?\\[\\]]+') def download_srcdst(self, src, dst, dry_run=False): if",
"pp = file_path[len(p.parent.as_posix()) + 1:] dest_path = os.path.join(dst, pp) if dry_run: yield file_path,",
"run_view(self, **args): params = {'reduce': False, 'include_docs': False} if 'depth' in args: params['group_level']",
"response.status_code != 201: return file_name, f'{file_uri}', response.status_code, response.reason rev = response.json()['rev'] else: rev",
"{_id}') client.save_doc(client.COUCHFS_VIEWS) logger(f'db is now setup') COUCHFS_VIEWS={ \"_id\": \"_design/couchfs_views\", \"views\": { \"attachment_list\": {",
"tempfile from contextlib import contextmanager import requests logger = logging.getLogger(__file__) echo = logger.info",
"'' else: with open(src, 'rb') as src_fp: yield self.upload_file(src_fp, os.path.join(dst, os.path.basename(src))) elif os.path.isdir(src):",
"logger(f'db is now setup') COUCHFS_VIEWS={ \"_id\": \"_design/couchfs_views\", \"views\": { \"attachment_list\": { \"map\": \"function",
"data=src, headers=headers, auth=self.auth) response.raise_for_status() return file_name, f'{file_uri}', response.status_code, response.reason @classmethod def init_db(cls, logger=echo):",
"pattern in patterns: if self.WILDCARD_RE.search(pattern): regex = re.compile(fnmatch.translate(pattern)) else: regex = re.compile(fnmatch.translate(pattern)[:-2]) regexs.append(regex)",
"requests.head(doc_uri, auth=self.auth) if response.status_code == 200: rev = response.headers['ETag'] headers = {'If-Match': rev[1:-1]}",
"download_to_file(self, url, file_obj): with requests.get(url, stream=True, auth=self.auth) as r: r.raise_for_status() for chunk in",
"response = requests.put(f'{file_uri}', data=src, headers=headers, auth=self.auth) response.raise_for_status() return file_name, f'{file_uri}', response.status_code, response.reason @classmethod",
"= None self.db = db self.db_uri = f'{scheme}://{host}{port}/{self.db}' def check_db(self): response = requests.head(f\"{self.db_uri}\",",
"return requests.get(url, stream=True, auth=self.auth).content def upload(self, src, dst, dry_run=False): src = os.path.abspath(src) if",
"if response.status_code == 404: response = requests.post(f'{self.db_uri}', json=dict(_id=doc_id), auth=self.auth) if response.status_code != 201:",
"stream=True, auth=self.auth) as r: r.raise_for_status() for chunk in r.iter_content(chunk_size=8192): if chunk: file_obj.write(chunk) @contextmanager",
"dst, response.status_code, response.reason WILDCARD_RE = re.compile('[\\*\\?\\[\\]]+') def download_srcdst(self, src, dst, dry_run=False): if match",
"or any([regex.search(file_path) for regex in regexs]): yield file_path, file_size def run_view(self, **args): params",
"response.status_code == 200: rev = response.headers['ETag'] headers = {'If-Match': rev[1:-1]} response = requests.put(doc_uri,",
"get_attachment_as_bytes(self, url): return requests.get(url, stream=True, auth=self.auth).content def upload(self, src, dst, dry_run=False): src =",
"file_path, dest_path, 'DRY RUN', '' else: yield self.upload_file(file_path, dest_path) def upload_bytes_file(self, src_bytes, dst):",
"params = {'reduce': False, 'include_docs': False} if 'depth' in args: params['group_level'] = args['depth']",
"attachments \"\"\" \"\"\"Main module.\"\"\" import logging import fnmatch import io import mimetypes import",
"(ssl, _, userid, psswd, host, _, port, db) = match.groups() scheme = 'http'",
"'' return scheme, userid, psswd, host, port, db else: raise BadConnectionURI(f'use a connections",
":param dst: id :return: file_name, file_url, upload status, upload message \"\"\" doc_id =",
"for (const file_name in doc._attachments) {\\n emit((doc._id+'/'+file_name).split('/'), doc._attachments[file_name].length);\\n }\\n} else {\\n emit(doc._id.split('/'), 0)\\n}\\n}\",",
"segment][0] file_name = '/'.join(dst.split('/')[1:]) doc_uri = f'{self.db_uri}/{doc_id}' file_uri = f'{doc_uri}/{file_name}' response = requests.head(f'{doc_uri}',",
"dest_path def download_file(self, url, dest): with open(dest, 'wb') as f: return self.download_to_file(url, f)",
"Given: 'couchdb://admin:*****%@127.0.0.1:5984/test' -> http://127.0.0.1:5984/ :param uri: :return {host, db, auth, passwd}: \"\"\" if",
"= os.path.abspath(src) if os.path.isfile(src): if dry_run: yield src, dst, 'DRY RUN', '' else:",
"regexs]): yield file_path, file_size def run_view(self, **args): params = {'reduce': False, 'include_docs': False}",
"host, _, port, db) = match.groups() scheme = 'http' + ('s' if ssl",
"os.path.join(dst, dst_file_path[1:]) if not dest_path.startswith('dump'): print('NO DUMP', is_copying_files, dst, file_path[len(src):]) # break yield",
"upload_bytes_file(self, src_bytes, dst): with tempfile.NamedTemporaryFile() as src_fp: src_fp.name = os.path.basename(dst) src_fp.write(src_bytes) return self.upload_file(src_fp,",
"create_db(self): response = requests.put(f\"{self.db_uri}\", auth=self.auth) response.raise_for_status() def save_doc(self, doc): _id = doc['_id'] doc_uri",
"file_path[match.span()[0]:] if dst_file_path.startswith('/'): dst_file_path = file_path[1:] dest_path = os.path.join(dst, dst_file_path) else: dst_file_path =",
"import io import mimetypes import os import pathlib import re import tempfile from",
"BadConnectionURI(f'use a connections like {self.CONNECTION_RE}') def list_attachments(self, *patterns): regexs = [] for pattern",
"return self.download_to_file(url, f) def download_to_file(self, url, file_obj): with requests.get(url, stream=True, auth=self.auth) as r:",
"contextlib import contextmanager import requests logger = logging.getLogger(__file__) echo = logger.info class CouchDBClientException(Exception):",
"print('NO DUMP', is_copying_files, dst, file_path[len(src):]) # break yield file_path, dest_path def download_file(self, url,",
"= requests.put(f\"{self.db_uri}\", auth=self.auth) response.raise_for_status() def save_doc(self, doc): _id = doc['_id'] doc_uri = f'{self.db_uri}/{_id}'",
"self.auth = (userid, psswd) else: self.auth = None self.db = db self.db_uri =",
"{self.CONNECTION_RE}') def list_attachments(self, *patterns): regexs = [] for pattern in patterns: if self.WILDCARD_RE.search(pattern):",
"regex = re.compile(fnmatch.translate(pattern)) else: regex = re.compile(fnmatch.translate(pattern)[:-2]) regexs.append(regex) for file_path, file_size in self.run_view():",
"= re.compile(fnmatch.translate(pattern)) else: regex = re.compile(fnmatch.translate(pattern)[:-2]) regexs.append(regex) for file_path, file_size in self.run_view(): if",
"'include_docs': False} if 'depth' in args: params['group_level'] = args['depth'] params['reduce'] = True response",
"+ 1:] dest_path = os.path.join(dst, pp) if dry_run: yield file_path, dest_path, 'DRY RUN',",
"= os.path.join(dst, dst_file_path[1:]) if not dest_path.startswith('dump'): print('NO DUMP', is_copying_files, dst, file_path[len(src):]) # break",
"regex.search(file_path): if is_copying_files: match = self.WILDCARD_RE.search(src) dst_file_path = file_path[match.span()[0]:] if dst_file_path.startswith('/'): dst_file_path =",
"= pathlib.Path(src).resolve() for (dirpath, dirs, files) in os.walk(src): for filename in files: file_path",
"response = requests.put(f\"{self.db_uri}\", auth=self.auth) response.raise_for_status() def save_doc(self, doc): _id = doc['_id'] doc_uri =",
"pp) if dry_run: yield file_path, dest_path, 'DRY RUN', '' else: yield self.upload_file(file_path, dest_path)",
"as src_fp: src_fp.name = os.path.basename(dst) src_fp.write(src_bytes) return self.upload_file(src_fp, dst) def upload_file(self, src, dst):",
"if not regexs or any([regex.search(file_path) for regex in regexs]): yield file_path, file_size def",
"dry_run=False): src = os.path.abspath(src) if os.path.isfile(src): if dry_run: yield src, dst, 'DRY RUN',",
"requests.put(f\"{self.db_uri}\", auth=self.auth) response.raise_for_status() def save_doc(self, doc): _id = doc['_id'] doc_uri = f'{self.db_uri}/{_id}' response",
"auth=self.auth) response.raise_for_status() def save_doc(self, doc): _id = doc['_id'] doc_uri = f'{self.db_uri}/{_id}' response =",
"src, dst, dry_run=False): for src, dst in self.download_srcdst(src, dst): if dry_run: yield src,",
"src = os.path.abspath(src) if os.path.isfile(src): if dry_run: yield src, dst, 'DRY RUN', ''",
"contextmanager import requests logger = logging.getLogger(__file__) echo = logger.info class CouchDBClientException(Exception): def __init__(self,",
"src, dst, 'DRY RUN', response else: uri = f'{self.db_uri}/{src}' response = requests.get(uri, auth=self.auth)",
"= mimetypes.guess_type(src.name) headers = {'Content-type': f'{major}', 'If-Match': rev[1:-1]} response = requests.put(f'{file_uri}', data=src, headers=headers,",
"upload message \"\"\" doc_id = [segment for segment in dst.split('/') if segment][0] file_name",
"= f'{self.db_uri}/{_id}' response = requests.head(doc_uri, auth=self.auth) if response.status_code == 200: rev = response.headers['ETag']",
"None: uri = os.environ.get(self.URI_ENVIRON_KEY) if not uri: key = self.URI_ENVIRON_KEY raise URLRequired(f'You can",
"self.download_to_file(url, bytes_fp) yield bytes_fp.getvalue() else: fp = tempfile.NamedTemporaryFile(delete=False) self.download_to_file(url, fp) fp.close() yield open(fp.name,",
"else: yield self.upload_file(file_path, dest_path) def upload_bytes_file(self, src_bytes, dst): with tempfile.NamedTemporaryFile() as src_fp: src_fp.name",
":= self.URI_RE.match(uri): (ssl, _, userid, psswd, host, _, port, db) = match.groups() scheme",
"regexs.append(regex) for file_path, file_size in self.run_view(): if not regexs or any([regex.search(file_path) for regex",
"logger=echo): echo('connecting to couchdb') client = cls() logger('checking the db') if not client.check_db():",
"uri = os.environ.get(self.URI_ENVIRON_KEY) if not uri: key = self.URI_ENVIRON_KEY raise URLRequired(f'You can set",
"io.BytesIO() self.download_to_file(url, bytes_fp) yield bytes_fp.getvalue() else: fp = tempfile.NamedTemporaryFile(delete=False) self.download_to_file(url, fp) fp.close() yield",
"{host, db, auth, passwd}: \"\"\" if match := self.URI_RE.match(uri): (ssl, _, userid, psswd,",
"if (doc._attachments) {\\n for (const file_name in doc._attachments) {\\n emit((doc._id+'/'+file_name).split('/'), doc._attachments[file_name].length);\\n }\\n} else",
"for chunk in r.iter_content(chunk_size=8192): if chunk: file_obj.write(chunk) @contextmanager def get_attachment(self, url, in_memory=False): try:",
"is_copying_files: match = self.WILDCARD_RE.search(src) dst_file_path = file_path[match.span()[0]:] if dst_file_path.startswith('/'): dst_file_path = file_path[1:] dest_path",
"r: r.raise_for_status() for chunk in r.iter_content(chunk_size=8192): if chunk: file_obj.write(chunk) @contextmanager def get_attachment(self, url,",
"as f: return self.download_to_file(url, f) def download_to_file(self, url, file_obj): with requests.get(url, stream=True, auth=self.auth)",
"in_memory: bytes_fp = io.BytesIO() self.download_to_file(url, bytes_fp) yield bytes_fp.getvalue() else: fp = tempfile.NamedTemporaryFile(delete=False) self.download_to_file(url,",
"'rb') as src_fp: yield self.upload_file(src_fp, os.path.join(dst, os.path.basename(src))) elif os.path.isdir(src): p = pathlib.Path(src).resolve() for",
"= requests.head(f\"{self.db_uri}\", auth=self.auth) return response.status_code == 200 def create_db(self): response = requests.put(f\"{self.db_uri}\", auth=self.auth)",
"logger = logging.getLogger(__file__) echo = logger.info class CouchDBClientException(Exception): def __init__(self, *args, **kwargs): super(CouchDBClientException,",
"patterns: if self.WILDCARD_RE.search(pattern): regex = re.compile(fnmatch.translate(pattern)) else: regex = re.compile(fnmatch.translate(pattern)[:-2]) regexs.append(regex) for file_path,",
"= requests.head(doc_uri, auth=self.auth) if response.status_code == 200: rev = response.headers['ETag'] headers = {'If-Match':",
"for regex in regexs]): yield file_path, file_size def run_view(self, **args): params = {'reduce':",
"= requests.get(f\"{self.db_uri}/_design/couchfs_views/_view/attachment_list\", params=params, auth=self.auth) response.raise_for_status() for doc in response.json()['rows']: yield '/'.join(doc['key']), doc['value'] def",
"import re import tempfile from contextlib import contextmanager import requests logger = logging.getLogger(__file__)",
"dst_file_path[1:]) if not dest_path.startswith('dump'): print('NO DUMP', is_copying_files, dst, file_path[len(src):]) # break yield file_path,",
"as r: r.raise_for_status() for chunk in r.iter_content(chunk_size=8192): if chunk: file_obj.write(chunk) @contextmanager def get_attachment(self,",
"os.path.join(dirpath, filename) pp = file_path[len(p.parent.as_posix()) + 1:] dest_path = os.path.join(dst, pp) if dry_run:",
"userid, psswd, host, _, port, db) = match.groups() scheme = 'http' + ('s'",
"for file_path, _ in self.ls(): if regex.search(file_path): if is_copying_files: match = self.WILDCARD_RE.search(src) dst_file_path",
"file_obj.write(chunk) @contextmanager def get_attachment(self, url, in_memory=False): try: if in_memory: bytes_fp = io.BytesIO() self.download_to_file(url,",
"regex = re.compile(fnmatch.translate(src)[:-2]) sub_regex = re.compile(src) is_copying_files = False for file_path, _ in",
"yield src, dst, 'DRY RUN', '' else: with open(src, 'rb') as src_fp: yield",
"f: return self.download_to_file(url, f) def download_to_file(self, url, file_obj): with requests.get(url, stream=True, auth=self.auth) as",
"False for file_path, _ in self.ls(): if regex.search(file_path): if is_copying_files: match = self.WILDCARD_RE.search(src)",
"file_path, _ in self.ls(): if regex.search(file_path): if is_copying_files: match = self.WILDCARD_RE.search(src) dst_file_path =",
"CONNECTION_RE = 'couchdb(s)?://((\\w+)\\:(.+)@)?([\\w\\.]+)(:(\\d+))?/(\\w+)' URI_RE = re.compile(CONNECTION_RE) def __init__(self, uri=None): if uri is None:",
"open(fp.name, 'rb') finally: if in_memory: bytes_fp.close() else: os.unlink(fp.name) def get_attachment_as_bytes(self, url): return requests.get(url,",
"the db') if not client.check_db(): logger('creating the db') client.create_db() _id = client.COUCHFS_VIEWS['_id'] logger(f'creating",
"required.\"\"\" class BadConnectionURI(CouchDBClientException): \"\"\"A valid URL is required.\"\"\" class CouchDBClient: URI_ENVIRON_KEY = 'COUCHDB_URI'",
"uri is None: uri = os.environ.get(self.URI_ENVIRON_KEY) if not uri: key = self.URI_ENVIRON_KEY raise",
"match = self.WILDCARD_RE.search(src) dst_file_path = file_path[match.span()[0]:] if dst_file_path.startswith('/'): dst_file_path = file_path[1:] dest_path =",
"yield bytes_fp.getvalue() else: fp = tempfile.NamedTemporaryFile(delete=False) self.download_to_file(url, fp) fp.close() yield open(fp.name, 'rb') finally:",
"major, _ = mimetypes.guess_type(src.name) headers = {'Content-type': f'{major}', 'If-Match': rev[1:-1]} response = requests.put(f'{file_uri}',",
"connections like {self.CONNECTION_RE}') def list_attachments(self, *patterns): regexs = [] for pattern in patterns:",
"save_doc(self, doc): _id = doc['_id'] doc_uri = f'{self.db_uri}/{_id}' response = requests.head(doc_uri, auth=self.auth) if",
"in self.download_srcdst(src, dst): if dry_run: yield src, dst, 'DRY RUN', response else: uri",
"\"attachment_list\": { \"map\": \"function (doc) {\\n if (doc._attachments) {\\n for (const file_name in",
"rev = response.headers['ETag'] headers = {'If-Match': rev[1:-1]} response = requests.put(doc_uri, json=doc, headers=headers, auth=self.auth)",
"port else '' return scheme, userid, psswd, host, port, db else: raise BadConnectionURI(f'use",
"= f'{self.db_uri}/{doc_id}' file_uri = f'{doc_uri}/{file_name}' response = requests.head(f'{doc_uri}', auth=self.auth) if response.status_code == 404:",
":return {host, db, auth, passwd}: \"\"\" if match := self.URI_RE.match(uri): (ssl, _, userid,",
"doc_uri = f'{self.db_uri}/{_id}' response = requests.head(doc_uri, auth=self.auth) if response.status_code == 200: rev =",
"self.upload_file(src_fp, dst) def upload_file(self, src, dst): \"\"\" Uploads a file using dst as",
"url, file_obj): with requests.get(url, stream=True, auth=self.auth) as r: r.raise_for_status() for chunk in r.iter_content(chunk_size=8192):",
"dry_run: yield file_path, dest_path, 'DRY RUN', '' else: yield self.upload_file(file_path, dest_path) def upload_bytes_file(self,",
"with tempfile.NamedTemporaryFile() as src_fp: src_fp.name = os.path.basename(dst) src_fp.write(src_bytes) return self.upload_file(src_fp, dst) def upload_file(self,",
"file_uri = f'{doc_uri}/{file_name}' response = requests.head(f'{doc_uri}', auth=self.auth) if response.status_code == 404: response =",
"upload(self, src, dst, dry_run=False): src = os.path.abspath(src) if os.path.isfile(src): if dry_run: yield src,",
"mimetypes import os import pathlib import re import tempfile from contextlib import contextmanager",
"is_copying_files = False for file_path, _ in self.ls(): if regex.search(file_path): if is_copying_files: match",
"mimetypes.guess_type(src.name) headers = {'Content-type': f'{major}', 'If-Match': rev[1:-1]} response = requests.put(f'{file_uri}', data=src, headers=headers, auth=self.auth)",
"if is_copying_files: match = self.WILDCARD_RE.search(src) dst_file_path = file_path[match.span()[0]:] if dst_file_path.startswith('/'): dst_file_path = file_path[1:]",
"tempfile.NamedTemporaryFile() as src_fp: src_fp.name = os.path.basename(dst) src_fp.write(src_bytes) return self.upload_file(src_fp, dst) def upload_file(self, src,",
"if uri is None: uri = os.environ.get(self.URI_ENVIRON_KEY) if not uri: key = self.URI_ENVIRON_KEY",
"= {'Content-type': f'{major}', 'If-Match': rev[1:-1]} response = requests.put(f'{file_uri}', data=src, headers=headers, auth=self.auth) response.raise_for_status() return",
"self.WILDCARD_RE.search(src) dst_file_path = file_path[match.span()[0]:] if dst_file_path.startswith('/'): dst_file_path = file_path[1:] dest_path = os.path.join(dst, dst_file_path)",
"response.status_code, response.reason rev = response.json()['rev'] else: rev = response.headers['ETag'] major, _ = mimetypes.guess_type(src.name)",
"else: rev = response.headers['ETag'] major, _ = mimetypes.guess_type(src.name) headers = {'Content-type': f'{major}', 'If-Match':",
"{ \"map\": \"function (doc) {\\n if (doc._attachments) {\\n for (const file_name in doc._attachments)",
"response.reason WILDCARD_RE = re.compile('[\\*\\?\\[\\]]+') def download_srcdst(self, src, dst, dry_run=False): if match := self.WILDCARD_RE.search(src):",
"p = pathlib.Path(src).resolve() for (dirpath, dirs, files) in os.walk(src): for filename in files:",
"try: if in_memory: bytes_fp = io.BytesIO() self.download_to_file(url, bytes_fp) yield bytes_fp.getvalue() else: fp =",
"with open(dest, 'wb') as f: return self.download_to_file(url, f) def download_to_file(self, url, file_obj): with",
"if dry_run: yield file_path, dest_path, 'DRY RUN', '' else: yield self.upload_file(file_path, dest_path) def",
"doc_uri = f'{self.db_uri}/{doc_id}' file_uri = f'{doc_uri}/{file_name}' response = requests.head(f'{doc_uri}', auth=self.auth) if response.status_code ==",
"{'If-Match': rev[1:-1]} response = requests.put(doc_uri, json=doc, headers=headers, auth=self.auth) elif response.status_code == 404: response",
"src, dst in self.download_srcdst(src, dst): if dry_run: yield src, dst, 'DRY RUN', response",
"super(CouchDBClientException, self).__init__(*args, **kwargs) class URLRequired(CouchDBClientException): \"\"\"A valid URL is required.\"\"\" class BadConnectionURI(CouchDBClientException): \"\"\"A",
"tempfile.NamedTemporaryFile(delete=False) self.download_to_file(url, fp) fp.close() yield open(fp.name, 'rb') finally: if in_memory: bytes_fp.close() else: os.unlink(fp.name)",
"from contextlib import contextmanager import requests logger = logging.getLogger(__file__) echo = logger.info class",
"'') port = f':{port}' if port else '' return scheme, userid, psswd, host,",
"file_size def run_view(self, **args): params = {'reduce': False, 'include_docs': False} if 'depth' in",
"pathlib import re import tempfile from contextlib import contextmanager import requests logger =",
"= dst_file_path[1:] dest_path = os.path.join(dst, dst_file_path[1:]) if not dest_path.startswith('dump'): print('NO DUMP', is_copying_files, dst,",
"finally: if in_memory: bytes_fp.close() else: os.unlink(fp.name) def get_attachment_as_bytes(self, url): return requests.get(url, stream=True, auth=self.auth).content",
"file_path[len(src):] if file_path.startswith('/'): dst_file_path = dst_file_path[1:] dest_path = os.path.join(dst, dst_file_path[1:]) if not dest_path.startswith('dump'):",
"headers = {'If-Match': rev[1:-1]} response = requests.put(doc_uri, json=doc, headers=headers, auth=self.auth) elif response.status_code ==",
"= re.compile(src) is_copying_files = False for file_path, _ in self.ls(): if regex.search(file_path): if",
"elif response.status_code == 404: response = requests.post(self.db_uri, json=doc, auth=self.auth) response.raise_for_status() def parse_connection_uri(self, uri):",
"\"views\": { \"attachment_list\": { \"map\": \"function (doc) {\\n if (doc._attachments) {\\n for (const",
"os.walk(src): for filename in files: file_path = os.path.join(dirpath, filename) pp = file_path[len(p.parent.as_posix()) +",
"json=dict(_id=doc_id), auth=self.auth) if response.status_code != 201: return file_name, f'{file_uri}', response.status_code, response.reason rev =",
"for couchdb attachments \"\"\" \"\"\"Main module.\"\"\" import logging import fnmatch import io import",
"= db self.db_uri = f'{scheme}://{host}{port}/{self.db}' def check_db(self): response = requests.head(f\"{self.db_uri}\", auth=self.auth) return response.status_code",
"response.raise_for_status() return file_name, f'{file_uri}', response.status_code, response.reason @classmethod def init_db(cls, logger=echo): echo('connecting to couchdb')",
"logger.info class CouchDBClientException(Exception): def __init__(self, *args, **kwargs): super(CouchDBClientException, self).__init__(*args, **kwargs) class URLRequired(CouchDBClientException): \"\"\"A",
"and psswd: self.auth = (userid, psswd) else: self.auth = None self.db = db",
"_, port, db) = match.groups() scheme = 'http' + ('s' if ssl else",
"for filename in files: file_path = os.path.join(dirpath, filename) pp = file_path[len(p.parent.as_posix()) + 1:]",
"self.parse_connection_uri(uri) if userid and psswd: self.auth = (userid, psswd) else: self.auth = None",
"file to upload :param dst: id :return: file_name, file_url, upload status, upload message"
] |
[] |
[] |
[
"* solve(3, 1) * solve(5, 1) * solve(7, 1) * solve(1, 2) print(f'solution",
"self.tile.trees[tile_pos.y][tile_pos.x] def solve(step_x, step_y) -> int: map_ = Map(Tile.from_raw_lines(get_input())) tree_count = 0 while",
"step_y) tree_count += 1 if map_.pos_has_tree() else 0 except ValueError: return tree_count if",
"typing import List INPUT_FILE = path.join(path.dirname(__file__), 'input') def get_input() -> List[str]: with open(INPUT_FILE)",
"trees: List[List[bool]]) -> None: self.trees = trees self.height = len(self.trees) self.width = len(self.trees[0])",
"'#' for char in line] for line in lines ] return cls(trees) def",
"def solve(step_x, step_y) -> int: map_ = Map(Tile.from_raw_lines(get_input())) tree_count = 0 while True:",
"raise ValueError('stepped out') self.pos = new_pos def pos_has_tree(self) -> bool: tile_pos = Position(self.pos.x",
"Position(0, 0) def step(self, x, y) -> None: new_pos = Position(self.pos.x + x,",
"self.tile = tile self.pos = Position(0, 0) def step(self, x, y) -> None:",
"int: map_ = Map(Tile.from_raw_lines(get_input())) tree_count = 0 while True: try: map_.step(step_x, step_y) tree_count",
"self.height = len(self.trees) self.width = len(self.trees[0]) Position = namedtuple('Position', ['x', 'y']) class Map:",
"char in line] for line in lines ] return cls(trees) def __init__(self, trees:",
"from_raw_lines(cls, lines: List[str]) -> \"Tile\": trees = [ [char == '#' for char",
"lines ] return cls(trees) def __init__(self, trees: List[List[bool]]) -> None: self.trees = trees",
"0) def step(self, x, y) -> None: new_pos = Position(self.pos.x + x, self.pos.y",
"= namedtuple('Position', ['x', 'y']) class Map: def __init__(self, tile: Tile) -> None: self.tile",
"map_.step(step_x, step_y) tree_count += 1 if map_.pos_has_tree() else 0 except ValueError: return tree_count",
"INPUT_FILE = path.join(path.dirname(__file__), 'input') def get_input() -> List[str]: with open(INPUT_FILE) as fh: return",
"self.tile.height: raise ValueError('stepped out') self.pos = new_pos def pos_has_tree(self) -> bool: tile_pos =",
"in lines ] return cls(trees) def __init__(self, trees: List[List[bool]]) -> None: self.trees =",
"else 0 except ValueError: return tree_count if __name__ == '__main__': solution_part1 = solve(3,",
"for line in fh.readlines()] class Tile: @classmethod def from_raw_lines(cls, lines: List[str]) -> \"Tile\":",
"-> None: self.trees = trees self.height = len(self.trees) self.width = len(self.trees[0]) Position =",
"ValueError('stepped out') self.pos = new_pos def pos_has_tree(self) -> bool: tile_pos = Position(self.pos.x %",
"def __init__(self, tile: Tile) -> None: self.tile = tile self.pos = Position(0, 0)",
"= trees self.height = len(self.trees) self.width = len(self.trees[0]) Position = namedtuple('Position', ['x', 'y'])",
"= Position(0, 0) def step(self, x, y) -> None: new_pos = Position(self.pos.x +",
"[ [char == '#' for char in line] for line in lines ]",
"def pos_has_tree(self) -> bool: tile_pos = Position(self.pos.x % self.tile.width, self.pos.y) return self.tile.trees[tile_pos.y][tile_pos.x] def",
"y) -> None: new_pos = Position(self.pos.x + x, self.pos.y + y) if new_pos.y",
"self.pos = new_pos def pos_has_tree(self) -> bool: tile_pos = Position(self.pos.x % self.tile.width, self.pos.y)",
"solve(step_x, step_y) -> int: map_ = Map(Tile.from_raw_lines(get_input())) tree_count = 0 while True: try:",
"== '__main__': solution_part1 = solve(3, 1) print(f'solution part 1: {solution_part1}') solution_part2 = solve(1,",
"self.width = len(self.trees[0]) Position = namedtuple('Position', ['x', 'y']) class Map: def __init__(self, tile:",
"self.trees = trees self.height = len(self.trees) self.width = len(self.trees[0]) Position = namedtuple('Position', ['x',",
"['x', 'y']) class Map: def __init__(self, tile: Tile) -> None: self.tile = tile",
"map_.pos_has_tree() else 0 except ValueError: return tree_count if __name__ == '__main__': solution_part1 =",
"out') self.pos = new_pos def pos_has_tree(self) -> bool: tile_pos = Position(self.pos.x % self.tile.width,",
"__init__(self, tile: Tile) -> None: self.tile = tile self.pos = Position(0, 0) def",
"Position(self.pos.x + x, self.pos.y + y) if new_pos.y >= self.tile.height: raise ValueError('stepped out')",
"while True: try: map_.step(step_x, step_y) tree_count += 1 if map_.pos_has_tree() else 0 except",
"fh.readlines()] class Tile: @classmethod def from_raw_lines(cls, lines: List[str]) -> \"Tile\": trees = [",
"x, y) -> None: new_pos = Position(self.pos.x + x, self.pos.y + y) if",
"import namedtuple from os import path from typing import List INPUT_FILE = path.join(path.dirname(__file__),",
"import List INPUT_FILE = path.join(path.dirname(__file__), 'input') def get_input() -> List[str]: with open(INPUT_FILE) as",
"= path.join(path.dirname(__file__), 'input') def get_input() -> List[str]: with open(INPUT_FILE) as fh: return [line.rstrip('\\n')",
"-> List[str]: with open(INPUT_FILE) as fh: return [line.rstrip('\\n') for line in fh.readlines()] class",
"print(f'solution part 1: {solution_part1}') solution_part2 = solve(1, 1) * solve(3, 1) * solve(5,",
"for char in line] for line in lines ] return cls(trees) def __init__(self,",
"\"Tile\": trees = [ [char == '#' for char in line] for line",
"namedtuple from os import path from typing import List INPUT_FILE = path.join(path.dirname(__file__), 'input')",
"+ x, self.pos.y + y) if new_pos.y >= self.tile.height: raise ValueError('stepped out') self.pos",
"-> int: map_ = Map(Tile.from_raw_lines(get_input())) tree_count = 0 while True: try: map_.step(step_x, step_y)",
"__init__(self, trees: List[List[bool]]) -> None: self.trees = trees self.height = len(self.trees) self.width =",
"True: try: map_.step(step_x, step_y) tree_count += 1 if map_.pos_has_tree() else 0 except ValueError:",
"0 except ValueError: return tree_count if __name__ == '__main__': solution_part1 = solve(3, 1)",
"solve(1, 1) * solve(3, 1) * solve(5, 1) * solve(7, 1) * solve(1,",
"open(INPUT_FILE) as fh: return [line.rstrip('\\n') for line in fh.readlines()] class Tile: @classmethod def",
"ValueError: return tree_count if __name__ == '__main__': solution_part1 = solve(3, 1) print(f'solution part",
"'__main__': solution_part1 = solve(3, 1) print(f'solution part 1: {solution_part1}') solution_part2 = solve(1, 1)",
"0 while True: try: map_.step(step_x, step_y) tree_count += 1 if map_.pos_has_tree() else 0",
"new_pos = Position(self.pos.x + x, self.pos.y + y) if new_pos.y >= self.tile.height: raise",
"os import path from typing import List INPUT_FILE = path.join(path.dirname(__file__), 'input') def get_input()",
">= self.tile.height: raise ValueError('stepped out') self.pos = new_pos def pos_has_tree(self) -> bool: tile_pos",
"trees = [ [char == '#' for char in line] for line in",
"namedtuple('Position', ['x', 'y']) class Map: def __init__(self, tile: Tile) -> None: self.tile =",
"from collections import namedtuple from os import path from typing import List INPUT_FILE",
"tree_count += 1 if map_.pos_has_tree() else 0 except ValueError: return tree_count if __name__",
"<reponame>jkowalleck/AoC2020<filename>03/solve.py from collections import namedtuple from os import path from typing import List",
"as fh: return [line.rstrip('\\n') for line in fh.readlines()] class Tile: @classmethod def from_raw_lines(cls,",
"+= 1 if map_.pos_has_tree() else 0 except ValueError: return tree_count if __name__ ==",
"1) print(f'solution part 1: {solution_part1}') solution_part2 = solve(1, 1) * solve(3, 1) *",
"__name__ == '__main__': solution_part1 = solve(3, 1) print(f'solution part 1: {solution_part1}') solution_part2 =",
"new_pos.y >= self.tile.height: raise ValueError('stepped out') self.pos = new_pos def pos_has_tree(self) -> bool:",
"def step(self, x, y) -> None: new_pos = Position(self.pos.x + x, self.pos.y +",
"x, self.pos.y + y) if new_pos.y >= self.tile.height: raise ValueError('stepped out') self.pos =",
"class Map: def __init__(self, tile: Tile) -> None: self.tile = tile self.pos =",
"List[str]) -> \"Tile\": trees = [ [char == '#' for char in line]",
"1) * solve(5, 1) * solve(7, 1) * solve(1, 2) print(f'solution part 2:",
"= [ [char == '#' for char in line] for line in lines",
"from typing import List INPUT_FILE = path.join(path.dirname(__file__), 'input') def get_input() -> List[str]: with",
"tile self.pos = Position(0, 0) def step(self, x, y) -> None: new_pos =",
"+ y) if new_pos.y >= self.tile.height: raise ValueError('stepped out') self.pos = new_pos def",
"trees self.height = len(self.trees) self.width = len(self.trees[0]) Position = namedtuple('Position', ['x', 'y']) class",
"= len(self.trees) self.width = len(self.trees[0]) Position = namedtuple('Position', ['x', 'y']) class Map: def",
"None: self.tile = tile self.pos = Position(0, 0) def step(self, x, y) ->",
"line in lines ] return cls(trees) def __init__(self, trees: List[List[bool]]) -> None: self.trees",
"solve(3, 1) * solve(5, 1) * solve(7, 1) * solve(1, 2) print(f'solution part",
"-> None: new_pos = Position(self.pos.x + x, self.pos.y + y) if new_pos.y >=",
"Map(Tile.from_raw_lines(get_input())) tree_count = 0 while True: try: map_.step(step_x, step_y) tree_count += 1 if",
"Position = namedtuple('Position', ['x', 'y']) class Map: def __init__(self, tile: Tile) -> None:",
"solution_part2 = solve(1, 1) * solve(3, 1) * solve(5, 1) * solve(7, 1)",
"bool: tile_pos = Position(self.pos.x % self.tile.width, self.pos.y) return self.tile.trees[tile_pos.y][tile_pos.x] def solve(step_x, step_y) ->",
"-> bool: tile_pos = Position(self.pos.x % self.tile.width, self.pos.y) return self.tile.trees[tile_pos.y][tile_pos.x] def solve(step_x, step_y)",
"if __name__ == '__main__': solution_part1 = solve(3, 1) print(f'solution part 1: {solution_part1}') solution_part2",
"* solve(5, 1) * solve(7, 1) * solve(1, 2) print(f'solution part 2: {solution_part2}')",
"% self.tile.width, self.pos.y) return self.tile.trees[tile_pos.y][tile_pos.x] def solve(step_x, step_y) -> int: map_ = Map(Tile.from_raw_lines(get_input()))",
"cls(trees) def __init__(self, trees: List[List[bool]]) -> None: self.trees = trees self.height = len(self.trees)",
"from os import path from typing import List INPUT_FILE = path.join(path.dirname(__file__), 'input') def",
"for line in lines ] return cls(trees) def __init__(self, trees: List[List[bool]]) -> None:",
"in line] for line in lines ] return cls(trees) def __init__(self, trees: List[List[bool]])",
"-> None: self.tile = tile self.pos = Position(0, 0) def step(self, x, y)",
"path.join(path.dirname(__file__), 'input') def get_input() -> List[str]: with open(INPUT_FILE) as fh: return [line.rstrip('\\n') for",
"return tree_count if __name__ == '__main__': solution_part1 = solve(3, 1) print(f'solution part 1:",
"new_pos def pos_has_tree(self) -> bool: tile_pos = Position(self.pos.x % self.tile.width, self.pos.y) return self.tile.trees[tile_pos.y][tile_pos.x]",
"tile: Tile) -> None: self.tile = tile self.pos = Position(0, 0) def step(self,",
"self.pos.y + y) if new_pos.y >= self.tile.height: raise ValueError('stepped out') self.pos = new_pos",
"len(self.trees[0]) Position = namedtuple('Position', ['x', 'y']) class Map: def __init__(self, tile: Tile) ->",
"1 if map_.pos_has_tree() else 0 except ValueError: return tree_count if __name__ == '__main__':",
"] return cls(trees) def __init__(self, trees: List[List[bool]]) -> None: self.trees = trees self.height",
"None: new_pos = Position(self.pos.x + x, self.pos.y + y) if new_pos.y >= self.tile.height:",
"tree_count = 0 while True: try: map_.step(step_x, step_y) tree_count += 1 if map_.pos_has_tree()",
"= 0 while True: try: map_.step(step_x, step_y) tree_count += 1 if map_.pos_has_tree() else",
"lines: List[str]) -> \"Tile\": trees = [ [char == '#' for char in",
"return [line.rstrip('\\n') for line in fh.readlines()] class Tile: @classmethod def from_raw_lines(cls, lines: List[str])",
"Tile: @classmethod def from_raw_lines(cls, lines: List[str]) -> \"Tile\": trees = [ [char ==",
"[line.rstrip('\\n') for line in fh.readlines()] class Tile: @classmethod def from_raw_lines(cls, lines: List[str]) ->",
"None: self.trees = trees self.height = len(self.trees) self.width = len(self.trees[0]) Position = namedtuple('Position',",
"'y']) class Map: def __init__(self, tile: Tile) -> None: self.tile = tile self.pos",
"get_input() -> List[str]: with open(INPUT_FILE) as fh: return [line.rstrip('\\n') for line in fh.readlines()]",
"1: {solution_part1}') solution_part2 = solve(1, 1) * solve(3, 1) * solve(5, 1) *",
"= new_pos def pos_has_tree(self) -> bool: tile_pos = Position(self.pos.x % self.tile.width, self.pos.y) return",
"Tile) -> None: self.tile = tile self.pos = Position(0, 0) def step(self, x,",
"try: map_.step(step_x, step_y) tree_count += 1 if map_.pos_has_tree() else 0 except ValueError: return",
"self.tile.width, self.pos.y) return self.tile.trees[tile_pos.y][tile_pos.x] def solve(step_x, step_y) -> int: map_ = Map(Tile.from_raw_lines(get_input())) tree_count",
"line] for line in lines ] return cls(trees) def __init__(self, trees: List[List[bool]]) ->",
"def from_raw_lines(cls, lines: List[str]) -> \"Tile\": trees = [ [char == '#' for",
"if new_pos.y >= self.tile.height: raise ValueError('stepped out') self.pos = new_pos def pos_has_tree(self) ->",
"[char == '#' for char in line] for line in lines ] return",
"return self.tile.trees[tile_pos.y][tile_pos.x] def solve(step_x, step_y) -> int: map_ = Map(Tile.from_raw_lines(get_input())) tree_count = 0",
"tree_count if __name__ == '__main__': solution_part1 = solve(3, 1) print(f'solution part 1: {solution_part1}')",
"= len(self.trees[0]) Position = namedtuple('Position', ['x', 'y']) class Map: def __init__(self, tile: Tile)",
"= Position(self.pos.x + x, self.pos.y + y) if new_pos.y >= self.tile.height: raise ValueError('stepped",
"= Map(Tile.from_raw_lines(get_input())) tree_count = 0 while True: try: map_.step(step_x, step_y) tree_count += 1",
"= solve(3, 1) print(f'solution part 1: {solution_part1}') solution_part2 = solve(1, 1) * solve(3,",
"@classmethod def from_raw_lines(cls, lines: List[str]) -> \"Tile\": trees = [ [char == '#'",
"y) if new_pos.y >= self.tile.height: raise ValueError('stepped out') self.pos = new_pos def pos_has_tree(self)",
"return cls(trees) def __init__(self, trees: List[List[bool]]) -> None: self.trees = trees self.height =",
"= solve(1, 1) * solve(3, 1) * solve(5, 1) * solve(7, 1) *",
"= Position(self.pos.x % self.tile.width, self.pos.y) return self.tile.trees[tile_pos.y][tile_pos.x] def solve(step_x, step_y) -> int: map_",
"self.pos.y) return self.tile.trees[tile_pos.y][tile_pos.x] def solve(step_x, step_y) -> int: map_ = Map(Tile.from_raw_lines(get_input())) tree_count =",
"collections import namedtuple from os import path from typing import List INPUT_FILE =",
"'input') def get_input() -> List[str]: with open(INPUT_FILE) as fh: return [line.rstrip('\\n') for line",
"len(self.trees) self.width = len(self.trees[0]) Position = namedtuple('Position', ['x', 'y']) class Map: def __init__(self,",
"if map_.pos_has_tree() else 0 except ValueError: return tree_count if __name__ == '__main__': solution_part1",
"== '#' for char in line] for line in lines ] return cls(trees)",
"List INPUT_FILE = path.join(path.dirname(__file__), 'input') def get_input() -> List[str]: with open(INPUT_FILE) as fh:",
"except ValueError: return tree_count if __name__ == '__main__': solution_part1 = solve(3, 1) print(f'solution",
"self.pos = Position(0, 0) def step(self, x, y) -> None: new_pos = Position(self.pos.x",
"in fh.readlines()] class Tile: @classmethod def from_raw_lines(cls, lines: List[str]) -> \"Tile\": trees =",
"List[str]: with open(INPUT_FILE) as fh: return [line.rstrip('\\n') for line in fh.readlines()] class Tile:",
"line in fh.readlines()] class Tile: @classmethod def from_raw_lines(cls, lines: List[str]) -> \"Tile\": trees",
"-> \"Tile\": trees = [ [char == '#' for char in line] for",
"step(self, x, y) -> None: new_pos = Position(self.pos.x + x, self.pos.y + y)",
"Map: def __init__(self, tile: Tile) -> None: self.tile = tile self.pos = Position(0,",
"path from typing import List INPUT_FILE = path.join(path.dirname(__file__), 'input') def get_input() -> List[str]:",
"tile_pos = Position(self.pos.x % self.tile.width, self.pos.y) return self.tile.trees[tile_pos.y][tile_pos.x] def solve(step_x, step_y) -> int:",
"{solution_part1}') solution_part2 = solve(1, 1) * solve(3, 1) * solve(5, 1) * solve(7,",
"1) * solve(3, 1) * solve(5, 1) * solve(7, 1) * solve(1, 2)",
"class Tile: @classmethod def from_raw_lines(cls, lines: List[str]) -> \"Tile\": trees = [ [char",
"Position(self.pos.x % self.tile.width, self.pos.y) return self.tile.trees[tile_pos.y][tile_pos.x] def solve(step_x, step_y) -> int: map_ =",
"def get_input() -> List[str]: with open(INPUT_FILE) as fh: return [line.rstrip('\\n') for line in",
"step_y) -> int: map_ = Map(Tile.from_raw_lines(get_input())) tree_count = 0 while True: try: map_.step(step_x,",
"map_ = Map(Tile.from_raw_lines(get_input())) tree_count = 0 while True: try: map_.step(step_x, step_y) tree_count +=",
"with open(INPUT_FILE) as fh: return [line.rstrip('\\n') for line in fh.readlines()] class Tile: @classmethod",
"pos_has_tree(self) -> bool: tile_pos = Position(self.pos.x % self.tile.width, self.pos.y) return self.tile.trees[tile_pos.y][tile_pos.x] def solve(step_x,",
"fh: return [line.rstrip('\\n') for line in fh.readlines()] class Tile: @classmethod def from_raw_lines(cls, lines:",
"solve(3, 1) print(f'solution part 1: {solution_part1}') solution_part2 = solve(1, 1) * solve(3, 1)",
"part 1: {solution_part1}') solution_part2 = solve(1, 1) * solve(3, 1) * solve(5, 1)",
"import path from typing import List INPUT_FILE = path.join(path.dirname(__file__), 'input') def get_input() ->",
"List[List[bool]]) -> None: self.trees = trees self.height = len(self.trees) self.width = len(self.trees[0]) Position",
"= tile self.pos = Position(0, 0) def step(self, x, y) -> None: new_pos",
"def __init__(self, trees: List[List[bool]]) -> None: self.trees = trees self.height = len(self.trees) self.width",
"solution_part1 = solve(3, 1) print(f'solution part 1: {solution_part1}') solution_part2 = solve(1, 1) *"
] |
[
"# Annotate annovar formatted files with given databases for annovar_file in os.listdir(annovar_file_dir): base_name",
"annovar formatted files with given databases for annovar_file in os.listdir(annovar_file_dir): base_name = annovar_file.split('.')[0]",
"'refGene,cosmic70,gnomad_exome,dbnsfp30a ' \\ '-operation g,f,f,f -nastring . -csvout ' \\ '-polish'.format(table_annovar, full_annov_file, annotated_vcf_file)",
"args = parser.parse_args() merged = args.merged humanonly = args.humanonly if merged: vcf_file_dir =",
"'--merged', action='store_true', help='use directory for merged VCFs') parser.add_argument('-g', '--humanonly', action='store_true', help='use humanonly directory",
"annotated_file_dir = '{}_{}'.format(annotated_file_dir, human_string) annovar_dir = os.path.join('modules', 'annovar') humandb_dir = os.path.join(annovar_dir, 'humandb/') convert_annovar",
"= annovar_file.split('.')[0] full_annov_file = os.path.join(annovar_file_dir, annovar_file) annotated_vcf_file = os.path.join(annotated_file_dir, '{}.annotated'.format(base_name)) if not os.path.isfile(annotated_vcf_file):",
"vcf_file_dir = os.path.join('results', 'gatk_vcfs') annovar_file_dir = os.path.join('results', 'annovar_vcfs') annotated_file_dir = os.path.join('results', 'annotated_vcfs') if",
"directory for merged VCFs') parser.add_argument('-g', '--humanonly', action='store_true', help='use humanonly directory for merged VCFs')",
"= os.path.join('results', 'annovar_merged_vcfs') annotated_file_dir = os.path.join('results', 'annotated_merged_vcfs') else: vcf_file_dir = os.path.join('results', 'gatk_vcfs') annovar_file_dir",
"{} -buildver hg19'.format(table_annovar, humandb_dir) # Convert to annovar format for vcf_file in os.listdir(vcf_file_dir):",
"annovar_file) annotated_vcf_file = os.path.join(annotated_file_dir, '{}.annotated'.format(base_name)) if not os.path.isfile(annotated_vcf_file): file_command = 'perl {} {}",
"os.path.join('results', 'annovar_merged_vcfs') annotated_file_dir = os.path.join('results', 'annotated_merged_vcfs') else: vcf_file_dir = os.path.join('results', 'gatk_vcfs') annovar_file_dir =",
"= '{}_{}'.format(annotated_file_dir, human_string) annovar_dir = os.path.join('modules', 'annovar') humandb_dir = os.path.join(annovar_dir, 'humandb/') convert_annovar =",
"# Convert to annovar format for vcf_file in os.listdir(vcf_file_dir): if '.idx' not in",
"os.path.join('processed', 'gatk_merged_vcf') annovar_file_dir = os.path.join('results', 'annovar_merged_vcfs') annotated_file_dir = os.path.join('results', 'annotated_merged_vcfs') else: vcf_file_dir =",
"= 'humanonly' vcf_file_dir = '{}_{}'.format(vcf_file_dir, human_string) annovar_file_dir = '{}_{}'.format(annovar_file_dir, human_string) annotated_file_dir = '{}_{}'.format(annotated_file_dir,",
"= 'perl {} -format vcf4 -filter pass'.format(convert_annovar) anno_com = 'perl {} {} -buildver",
"import argparse import subprocess parser = argparse.ArgumentParser() parser.add_argument('-m', '--merged', action='store_true', help='use directory for",
"'table_annovar.pl') conv_com = 'perl {} -format vcf4 -filter pass'.format(convert_annovar) anno_com = 'perl {}",
"'humandb/') convert_annovar = os.path.join(annovar_dir, 'convert2annovar.pl') table_annovar = os.path.join(annovar_dir, 'table_annovar.pl') conv_com = 'perl {}",
"merged: vcf_file_dir = os.path.join('processed', 'gatk_merged_vcf') annovar_file_dir = os.path.join('results', 'annovar_merged_vcfs') annotated_file_dir = os.path.join('results', 'annotated_merged_vcfs')",
"scripts/annotate_variants.py Use ANNOVAR to first convert a sample into annovar format and then",
"= os.path.join('results', 'annotated_vcfs') if humanonly: human_string = 'humanonly' vcf_file_dir = '{}_{}'.format(vcf_file_dir, human_string) annovar_file_dir",
"-buildver hg19 ' \\ '-out {} -verbose -otherinfo -remove -protocol ' \\ 'refGene,cosmic70,gnomad_exome,dbnsfp30a",
"'humanonly' vcf_file_dir = '{}_{}'.format(vcf_file_dir, human_string) annovar_file_dir = '{}_{}'.format(annovar_file_dir, human_string) annotated_file_dir = '{}_{}'.format(annotated_file_dir, human_string)",
"os.path.join(annovar_dir, 'table_annovar.pl') conv_com = 'perl {} -format vcf4 -filter pass'.format(convert_annovar) anno_com = 'perl",
"if not os.path.isfile(output_vcf_file): file_command = '{} {} > {}'.format(conv_com, full_vcf_file, output_vcf_file) subprocess.call(file_command, shell=True)",
"vcf_file.split('.')[0] full_vcf_file = os.path.join(vcf_file_dir, vcf_file) output_vcf_file = os.path.join(annovar_file_dir, '{}.annovar.vcf'.format(base_name)) if not os.path.isfile(output_vcf_file): file_command",
"'{}_{}'.format(vcf_file_dir, human_string) annovar_file_dir = '{}_{}'.format(annovar_file_dir, human_string) annotated_file_dir = '{}_{}'.format(annotated_file_dir, human_string) annovar_dir = os.path.join('modules',",
"-filter pass'.format(convert_annovar) anno_com = 'perl {} {} -buildver hg19'.format(table_annovar, humandb_dir) # Convert to",
"os.path.join(annovar_file_dir, annovar_file) annotated_vcf_file = os.path.join(annotated_file_dir, '{}.annotated'.format(base_name)) if not os.path.isfile(annotated_vcf_file): file_command = 'perl {}",
"= vcf_file.split('.')[0] full_vcf_file = os.path.join(vcf_file_dir, vcf_file) output_vcf_file = os.path.join(annovar_file_dir, '{}.annovar.vcf'.format(base_name)) if not os.path.isfile(output_vcf_file):",
"{} modules/annovar/humandb -buildver hg19 ' \\ '-out {} -verbose -otherinfo -remove -protocol '",
"humandb_dir = os.path.join(annovar_dir, 'humandb/') convert_annovar = os.path.join(annovar_dir, 'convert2annovar.pl') table_annovar = os.path.join(annovar_dir, 'table_annovar.pl') conv_com",
"annovar_file in os.listdir(annovar_file_dir): base_name = annovar_file.split('.')[0] full_annov_file = os.path.join(annovar_file_dir, annovar_file) annotated_vcf_file = os.path.join(annotated_file_dir,",
"= os.path.join(annovar_dir, 'humandb/') convert_annovar = os.path.join(annovar_dir, 'convert2annovar.pl') table_annovar = os.path.join(annovar_dir, 'table_annovar.pl') conv_com =",
"= os.path.join('results', 'gatk_vcfs') annovar_file_dir = os.path.join('results', 'annovar_vcfs') annotated_file_dir = os.path.join('results', 'annotated_vcfs') if humanonly:",
"'--humanonly', action='store_true', help='use humanonly directory for merged VCFs') args = parser.parse_args() merged =",
"os.path.join(annotated_file_dir, '{}.annotated'.format(base_name)) if not os.path.isfile(annotated_vcf_file): file_command = 'perl {} {} modules/annovar/humandb -buildver hg19",
"first convert a sample into annovar format and then annotate \"\"\" import os",
"parser.parse_args() merged = args.merged humanonly = args.humanonly if merged: vcf_file_dir = os.path.join('processed', 'gatk_merged_vcf')",
"full_vcf_file, output_vcf_file) subprocess.call(file_command, shell=True) # Annotate annovar formatted files with given databases for",
"annotated_file_dir = os.path.join('results', 'annotated_merged_vcfs') else: vcf_file_dir = os.path.join('results', 'gatk_vcfs') annovar_file_dir = os.path.join('results', 'annovar_vcfs')",
"= os.path.join(vcf_file_dir, vcf_file) output_vcf_file = os.path.join(annovar_file_dir, '{}.annovar.vcf'.format(base_name)) if not os.path.isfile(output_vcf_file): file_command = '{}",
"for merged VCFs') parser.add_argument('-g', '--humanonly', action='store_true', help='use humanonly directory for merged VCFs') args",
"\"\"\" <NAME> 2017 scripts/annotate_variants.py Use ANNOVAR to first convert a sample into annovar",
"parser.add_argument('-m', '--merged', action='store_true', help='use directory for merged VCFs') parser.add_argument('-g', '--humanonly', action='store_true', help='use humanonly",
"annotated_file_dir = os.path.join('results', 'annotated_vcfs') if humanonly: human_string = 'humanonly' vcf_file_dir = '{}_{}'.format(vcf_file_dir, human_string)",
"vcf_file in os.listdir(vcf_file_dir): if '.idx' not in vcf_file: base_name = vcf_file.split('.')[0] full_vcf_file =",
"os.path.join('results', 'annotated_vcfs') if humanonly: human_string = 'humanonly' vcf_file_dir = '{}_{}'.format(vcf_file_dir, human_string) annovar_file_dir =",
"= os.path.join(annovar_dir, 'convert2annovar.pl') table_annovar = os.path.join(annovar_dir, 'table_annovar.pl') conv_com = 'perl {} -format vcf4",
"vcf4 -filter pass'.format(convert_annovar) anno_com = 'perl {} {} -buildver hg19'.format(table_annovar, humandb_dir) # Convert",
"\\ '-operation g,f,f,f -nastring . -csvout ' \\ '-polish'.format(table_annovar, full_annov_file, annotated_vcf_file) subprocess.call(file_command, shell=True)",
"os.path.join(annovar_dir, 'convert2annovar.pl') table_annovar = os.path.join(annovar_dir, 'table_annovar.pl') conv_com = 'perl {} -format vcf4 -filter",
"os.path.join(annovar_file_dir, '{}.annovar.vcf'.format(base_name)) if not os.path.isfile(output_vcf_file): file_command = '{} {} > {}'.format(conv_com, full_vcf_file, output_vcf_file)",
"' \\ 'refGene,cosmic70,gnomad_exome,dbnsfp30a ' \\ '-operation g,f,f,f -nastring . -csvout ' \\ '-polish'.format(table_annovar,",
"= os.path.join('processed', 'gatk_merged_vcf') annovar_file_dir = os.path.join('results', 'annovar_merged_vcfs') annotated_file_dir = os.path.join('results', 'annotated_merged_vcfs') else: vcf_file_dir",
"humanonly directory for merged VCFs') args = parser.parse_args() merged = args.merged humanonly =",
"'{}_{}'.format(annovar_file_dir, human_string) annotated_file_dir = '{}_{}'.format(annotated_file_dir, human_string) annovar_dir = os.path.join('modules', 'annovar') humandb_dir = os.path.join(annovar_dir,",
"to annovar format for vcf_file in os.listdir(vcf_file_dir): if '.idx' not in vcf_file: base_name",
"{} {} -buildver hg19'.format(table_annovar, humandb_dir) # Convert to annovar format for vcf_file in",
"= '{}_{}'.format(annovar_file_dir, human_string) annotated_file_dir = '{}_{}'.format(annotated_file_dir, human_string) annovar_dir = os.path.join('modules', 'annovar') humandb_dir =",
"Convert to annovar format for vcf_file in os.listdir(vcf_file_dir): if '.idx' not in vcf_file:",
"VCFs') parser.add_argument('-g', '--humanonly', action='store_true', help='use humanonly directory for merged VCFs') args = parser.parse_args()",
"else: vcf_file_dir = os.path.join('results', 'gatk_vcfs') annovar_file_dir = os.path.join('results', 'annovar_vcfs') annotated_file_dir = os.path.join('results', 'annotated_vcfs')",
"humanonly = args.humanonly if merged: vcf_file_dir = os.path.join('processed', 'gatk_merged_vcf') annovar_file_dir = os.path.join('results', 'annovar_merged_vcfs')",
"annovar_file_dir = '{}_{}'.format(annovar_file_dir, human_string) annotated_file_dir = '{}_{}'.format(annotated_file_dir, human_string) annovar_dir = os.path.join('modules', 'annovar') humandb_dir",
"hg19 ' \\ '-out {} -verbose -otherinfo -remove -protocol ' \\ 'refGene,cosmic70,gnomad_exome,dbnsfp30a '",
"argparse import subprocess parser = argparse.ArgumentParser() parser.add_argument('-m', '--merged', action='store_true', help='use directory for merged",
"os.path.join('modules', 'annovar') humandb_dir = os.path.join(annovar_dir, 'humandb/') convert_annovar = os.path.join(annovar_dir, 'convert2annovar.pl') table_annovar = os.path.join(annovar_dir,",
"= os.path.join('results', 'annovar_vcfs') annotated_file_dir = os.path.join('results', 'annotated_vcfs') if humanonly: human_string = 'humanonly' vcf_file_dir",
"os.path.join('results', 'annovar_vcfs') annotated_file_dir = os.path.join('results', 'annotated_vcfs') if humanonly: human_string = 'humanonly' vcf_file_dir =",
"human_string) annotated_file_dir = '{}_{}'.format(annotated_file_dir, human_string) annovar_dir = os.path.join('modules', 'annovar') humandb_dir = os.path.join(annovar_dir, 'humandb/')",
"'annovar') humandb_dir = os.path.join(annovar_dir, 'humandb/') convert_annovar = os.path.join(annovar_dir, 'convert2annovar.pl') table_annovar = os.path.join(annovar_dir, 'table_annovar.pl')",
"sample into annovar format and then annotate \"\"\" import os import argparse import",
"output_vcf_file) subprocess.call(file_command, shell=True) # Annotate annovar formatted files with given databases for annovar_file",
"format and then annotate \"\"\" import os import argparse import subprocess parser =",
"import os import argparse import subprocess parser = argparse.ArgumentParser() parser.add_argument('-m', '--merged', action='store_true', help='use",
"action='store_true', help='use directory for merged VCFs') parser.add_argument('-g', '--humanonly', action='store_true', help='use humanonly directory for",
"databases for annovar_file in os.listdir(annovar_file_dir): base_name = annovar_file.split('.')[0] full_annov_file = os.path.join(annovar_file_dir, annovar_file) annotated_vcf_file",
"Use ANNOVAR to first convert a sample into annovar format and then annotate",
"os.path.isfile(annotated_vcf_file): file_command = 'perl {} {} modules/annovar/humandb -buildver hg19 ' \\ '-out {}",
"if merged: vcf_file_dir = os.path.join('processed', 'gatk_merged_vcf') annovar_file_dir = os.path.join('results', 'annovar_merged_vcfs') annotated_file_dir = os.path.join('results',",
"format for vcf_file in os.listdir(vcf_file_dir): if '.idx' not in vcf_file: base_name = vcf_file.split('.')[0]",
"os.listdir(vcf_file_dir): if '.idx' not in vcf_file: base_name = vcf_file.split('.')[0] full_vcf_file = os.path.join(vcf_file_dir, vcf_file)",
"annotated_vcf_file = os.path.join(annotated_file_dir, '{}.annotated'.format(base_name)) if not os.path.isfile(annotated_vcf_file): file_command = 'perl {} {} modules/annovar/humandb",
"vcf_file) output_vcf_file = os.path.join(annovar_file_dir, '{}.annovar.vcf'.format(base_name)) if not os.path.isfile(output_vcf_file): file_command = '{} {} >",
"conv_com = 'perl {} -format vcf4 -filter pass'.format(convert_annovar) anno_com = 'perl {} {}",
"{} {} modules/annovar/humandb -buildver hg19 ' \\ '-out {} -verbose -otherinfo -remove -protocol",
"formatted files with given databases for annovar_file in os.listdir(annovar_file_dir): base_name = annovar_file.split('.')[0] full_annov_file",
"given databases for annovar_file in os.listdir(annovar_file_dir): base_name = annovar_file.split('.')[0] full_annov_file = os.path.join(annovar_file_dir, annovar_file)",
"= os.path.join(annovar_dir, 'table_annovar.pl') conv_com = 'perl {} -format vcf4 -filter pass'.format(convert_annovar) anno_com =",
"human_string = 'humanonly' vcf_file_dir = '{}_{}'.format(vcf_file_dir, human_string) annovar_file_dir = '{}_{}'.format(annovar_file_dir, human_string) annotated_file_dir =",
"hg19'.format(table_annovar, humandb_dir) # Convert to annovar format for vcf_file in os.listdir(vcf_file_dir): if '.idx'",
"file_command = 'perl {} {} modules/annovar/humandb -buildver hg19 ' \\ '-out {} -verbose",
"for merged VCFs') args = parser.parse_args() merged = args.merged humanonly = args.humanonly if",
"args.merged humanonly = args.humanonly if merged: vcf_file_dir = os.path.join('processed', 'gatk_merged_vcf') annovar_file_dir = os.path.join('results',",
"'.idx' not in vcf_file: base_name = vcf_file.split('.')[0] full_vcf_file = os.path.join(vcf_file_dir, vcf_file) output_vcf_file =",
"annotate \"\"\" import os import argparse import subprocess parser = argparse.ArgumentParser() parser.add_argument('-m', '--merged',",
"' \\ '-out {} -verbose -otherinfo -remove -protocol ' \\ 'refGene,cosmic70,gnomad_exome,dbnsfp30a ' \\",
"{} -format vcf4 -filter pass'.format(convert_annovar) anno_com = 'perl {} {} -buildver hg19'.format(table_annovar, humandb_dir)",
"= '{}_{}'.format(vcf_file_dir, human_string) annovar_file_dir = '{}_{}'.format(annovar_file_dir, human_string) annotated_file_dir = '{}_{}'.format(annotated_file_dir, human_string) annovar_dir =",
"'perl {} -format vcf4 -filter pass'.format(convert_annovar) anno_com = 'perl {} {} -buildver hg19'.format(table_annovar,",
"= args.merged humanonly = args.humanonly if merged: vcf_file_dir = os.path.join('processed', 'gatk_merged_vcf') annovar_file_dir =",
"parser = argparse.ArgumentParser() parser.add_argument('-m', '--merged', action='store_true', help='use directory for merged VCFs') parser.add_argument('-g', '--humanonly',",
"= os.path.join(annovar_file_dir, annovar_file) annotated_vcf_file = os.path.join(annotated_file_dir, '{}.annotated'.format(base_name)) if not os.path.isfile(annotated_vcf_file): file_command = 'perl",
"vcf_file_dir = os.path.join('processed', 'gatk_merged_vcf') annovar_file_dir = os.path.join('results', 'annovar_merged_vcfs') annotated_file_dir = os.path.join('results', 'annotated_merged_vcfs') else:",
"'gatk_merged_vcf') annovar_file_dir = os.path.join('results', 'annovar_merged_vcfs') annotated_file_dir = os.path.join('results', 'annotated_merged_vcfs') else: vcf_file_dir = os.path.join('results',",
"'annotated_vcfs') if humanonly: human_string = 'humanonly' vcf_file_dir = '{}_{}'.format(vcf_file_dir, human_string) annovar_file_dir = '{}_{}'.format(annovar_file_dir,",
"os.listdir(annovar_file_dir): base_name = annovar_file.split('.')[0] full_annov_file = os.path.join(annovar_file_dir, annovar_file) annotated_vcf_file = os.path.join(annotated_file_dir, '{}.annotated'.format(base_name)) if",
"humandb_dir) # Convert to annovar format for vcf_file in os.listdir(vcf_file_dir): if '.idx' not",
"then annotate \"\"\" import os import argparse import subprocess parser = argparse.ArgumentParser() parser.add_argument('-m',",
"'annotated_merged_vcfs') else: vcf_file_dir = os.path.join('results', 'gatk_vcfs') annovar_file_dir = os.path.join('results', 'annovar_vcfs') annotated_file_dir = os.path.join('results',",
"= args.humanonly if merged: vcf_file_dir = os.path.join('processed', 'gatk_merged_vcf') annovar_file_dir = os.path.join('results', 'annovar_merged_vcfs') annotated_file_dir",
"subprocess.call(file_command, shell=True) # Annotate annovar formatted files with given databases for annovar_file in",
"import subprocess parser = argparse.ArgumentParser() parser.add_argument('-m', '--merged', action='store_true', help='use directory for merged VCFs')",
"= 'perl {} {} modules/annovar/humandb -buildver hg19 ' \\ '-out {} -verbose -otherinfo",
"= parser.parse_args() merged = args.merged humanonly = args.humanonly if merged: vcf_file_dir = os.path.join('processed',",
"for vcf_file in os.listdir(vcf_file_dir): if '.idx' not in vcf_file: base_name = vcf_file.split('.')[0] full_vcf_file",
"if not os.path.isfile(annotated_vcf_file): file_command = 'perl {} {} modules/annovar/humandb -buildver hg19 ' \\",
"directory for merged VCFs') args = parser.parse_args() merged = args.merged humanonly = args.humanonly",
"annovar format and then annotate \"\"\" import os import argparse import subprocess parser",
"to first convert a sample into annovar format and then annotate \"\"\" import",
"'annovar_vcfs') annotated_file_dir = os.path.join('results', 'annotated_vcfs') if humanonly: human_string = 'humanonly' vcf_file_dir = '{}_{}'.format(vcf_file_dir,",
"{}'.format(conv_com, full_vcf_file, output_vcf_file) subprocess.call(file_command, shell=True) # Annotate annovar formatted files with given databases",
"convert a sample into annovar format and then annotate \"\"\" import os import",
"in os.listdir(vcf_file_dir): if '.idx' not in vcf_file: base_name = vcf_file.split('.')[0] full_vcf_file = os.path.join(vcf_file_dir,",
"{} > {}'.format(conv_com, full_vcf_file, output_vcf_file) subprocess.call(file_command, shell=True) # Annotate annovar formatted files with",
"annovar_file_dir = os.path.join('results', 'annovar_merged_vcfs') annotated_file_dir = os.path.join('results', 'annotated_merged_vcfs') else: vcf_file_dir = os.path.join('results', 'gatk_vcfs')",
"> {}'.format(conv_com, full_vcf_file, output_vcf_file) subprocess.call(file_command, shell=True) # Annotate annovar formatted files with given",
"-protocol ' \\ 'refGene,cosmic70,gnomad_exome,dbnsfp30a ' \\ '-operation g,f,f,f -nastring . -csvout ' \\",
"'perl {} {} modules/annovar/humandb -buildver hg19 ' \\ '-out {} -verbose -otherinfo -remove",
"base_name = annovar_file.split('.')[0] full_annov_file = os.path.join(annovar_file_dir, annovar_file) annotated_vcf_file = os.path.join(annotated_file_dir, '{}.annotated'.format(base_name)) if not",
"full_vcf_file = os.path.join(vcf_file_dir, vcf_file) output_vcf_file = os.path.join(annovar_file_dir, '{}.annovar.vcf'.format(base_name)) if not os.path.isfile(output_vcf_file): file_command =",
"\\ '-out {} -verbose -otherinfo -remove -protocol ' \\ 'refGene,cosmic70,gnomad_exome,dbnsfp30a ' \\ '-operation",
"human_string) annovar_file_dir = '{}_{}'.format(annovar_file_dir, human_string) annotated_file_dir = '{}_{}'.format(annotated_file_dir, human_string) annovar_dir = os.path.join('modules', 'annovar')",
"'convert2annovar.pl') table_annovar = os.path.join(annovar_dir, 'table_annovar.pl') conv_com = 'perl {} -format vcf4 -filter pass'.format(convert_annovar)",
"'-out {} -verbose -otherinfo -remove -protocol ' \\ 'refGene,cosmic70,gnomad_exome,dbnsfp30a ' \\ '-operation g,f,f,f",
"merged VCFs') args = parser.parse_args() merged = args.merged humanonly = args.humanonly if merged:",
"2017 scripts/annotate_variants.py Use ANNOVAR to first convert a sample into annovar format and",
"= '{} {} > {}'.format(conv_com, full_vcf_file, output_vcf_file) subprocess.call(file_command, shell=True) # Annotate annovar formatted",
"os.path.join('results', 'annotated_merged_vcfs') else: vcf_file_dir = os.path.join('results', 'gatk_vcfs') annovar_file_dir = os.path.join('results', 'annovar_vcfs') annotated_file_dir =",
"not in vcf_file: base_name = vcf_file.split('.')[0] full_vcf_file = os.path.join(vcf_file_dir, vcf_file) output_vcf_file = os.path.join(annovar_file_dir,",
"shell=True) # Annotate annovar formatted files with given databases for annovar_file in os.listdir(annovar_file_dir):",
"in vcf_file: base_name = vcf_file.split('.')[0] full_vcf_file = os.path.join(vcf_file_dir, vcf_file) output_vcf_file = os.path.join(annovar_file_dir, '{}.annovar.vcf'.format(base_name))",
"for annovar_file in os.listdir(annovar_file_dir): base_name = annovar_file.split('.')[0] full_annov_file = os.path.join(annovar_file_dir, annovar_file) annotated_vcf_file =",
"full_annov_file = os.path.join(annovar_file_dir, annovar_file) annotated_vcf_file = os.path.join(annotated_file_dir, '{}.annotated'.format(base_name)) if not os.path.isfile(annotated_vcf_file): file_command =",
"a sample into annovar format and then annotate \"\"\" import os import argparse",
"convert_annovar = os.path.join(annovar_dir, 'convert2annovar.pl') table_annovar = os.path.join(annovar_dir, 'table_annovar.pl') conv_com = 'perl {} -format",
"= os.path.join(annotated_file_dir, '{}.annotated'.format(base_name)) if not os.path.isfile(annotated_vcf_file): file_command = 'perl {} {} modules/annovar/humandb -buildver",
"os.path.join('results', 'gatk_vcfs') annovar_file_dir = os.path.join('results', 'annovar_vcfs') annotated_file_dir = os.path.join('results', 'annotated_vcfs') if humanonly: human_string",
"argparse.ArgumentParser() parser.add_argument('-m', '--merged', action='store_true', help='use directory for merged VCFs') parser.add_argument('-g', '--humanonly', action='store_true', help='use",
"-verbose -otherinfo -remove -protocol ' \\ 'refGene,cosmic70,gnomad_exome,dbnsfp30a ' \\ '-operation g,f,f,f -nastring .",
"' \\ '-operation g,f,f,f -nastring . -csvout ' \\ '-polish'.format(table_annovar, full_annov_file, annotated_vcf_file) subprocess.call(file_command,",
"-format vcf4 -filter pass'.format(convert_annovar) anno_com = 'perl {} {} -buildver hg19'.format(table_annovar, humandb_dir) #",
"into annovar format and then annotate \"\"\" import os import argparse import subprocess",
"output_vcf_file = os.path.join(annovar_file_dir, '{}.annovar.vcf'.format(base_name)) if not os.path.isfile(output_vcf_file): file_command = '{} {} > {}'.format(conv_com,",
"base_name = vcf_file.split('.')[0] full_vcf_file = os.path.join(vcf_file_dir, vcf_file) output_vcf_file = os.path.join(annovar_file_dir, '{}.annovar.vcf'.format(base_name)) if not",
"if humanonly: human_string = 'humanonly' vcf_file_dir = '{}_{}'.format(vcf_file_dir, human_string) annovar_file_dir = '{}_{}'.format(annovar_file_dir, human_string)",
"parser.add_argument('-g', '--humanonly', action='store_true', help='use humanonly directory for merged VCFs') args = parser.parse_args() merged",
"-remove -protocol ' \\ 'refGene,cosmic70,gnomad_exome,dbnsfp30a ' \\ '-operation g,f,f,f -nastring . -csvout '",
"'annovar_merged_vcfs') annotated_file_dir = os.path.join('results', 'annotated_merged_vcfs') else: vcf_file_dir = os.path.join('results', 'gatk_vcfs') annovar_file_dir = os.path.join('results',",
"= os.path.join('modules', 'annovar') humandb_dir = os.path.join(annovar_dir, 'humandb/') convert_annovar = os.path.join(annovar_dir, 'convert2annovar.pl') table_annovar =",
"merged = args.merged humanonly = args.humanonly if merged: vcf_file_dir = os.path.join('processed', 'gatk_merged_vcf') annovar_file_dir",
"annovar format for vcf_file in os.listdir(vcf_file_dir): if '.idx' not in vcf_file: base_name =",
"annovar_dir = os.path.join('modules', 'annovar') humandb_dir = os.path.join(annovar_dir, 'humandb/') convert_annovar = os.path.join(annovar_dir, 'convert2annovar.pl') table_annovar",
"ANNOVAR to first convert a sample into annovar format and then annotate \"\"\"",
"file_command = '{} {} > {}'.format(conv_com, full_vcf_file, output_vcf_file) subprocess.call(file_command, shell=True) # Annotate annovar",
"help='use directory for merged VCFs') parser.add_argument('-g', '--humanonly', action='store_true', help='use humanonly directory for merged",
"subprocess parser = argparse.ArgumentParser() parser.add_argument('-m', '--merged', action='store_true', help='use directory for merged VCFs') parser.add_argument('-g',",
"VCFs') args = parser.parse_args() merged = args.merged humanonly = args.humanonly if merged: vcf_file_dir",
"vcf_file: base_name = vcf_file.split('.')[0] full_vcf_file = os.path.join(vcf_file_dir, vcf_file) output_vcf_file = os.path.join(annovar_file_dir, '{}.annovar.vcf'.format(base_name)) if",
"vcf_file_dir = '{}_{}'.format(vcf_file_dir, human_string) annovar_file_dir = '{}_{}'.format(annovar_file_dir, human_string) annotated_file_dir = '{}_{}'.format(annotated_file_dir, human_string) annovar_dir",
"<NAME> 2017 scripts/annotate_variants.py Use ANNOVAR to first convert a sample into annovar format",
"help='use humanonly directory for merged VCFs') args = parser.parse_args() merged = args.merged humanonly",
"= 'perl {} {} -buildver hg19'.format(table_annovar, humandb_dir) # Convert to annovar format for",
"anno_com = 'perl {} {} -buildver hg19'.format(table_annovar, humandb_dir) # Convert to annovar format",
"'{}.annotated'.format(base_name)) if not os.path.isfile(annotated_vcf_file): file_command = 'perl {} {} modules/annovar/humandb -buildver hg19 '",
"'gatk_vcfs') annovar_file_dir = os.path.join('results', 'annovar_vcfs') annotated_file_dir = os.path.join('results', 'annotated_vcfs') if humanonly: human_string =",
"pass'.format(convert_annovar) anno_com = 'perl {} {} -buildver hg19'.format(table_annovar, humandb_dir) # Convert to annovar",
"'{}.annovar.vcf'.format(base_name)) if not os.path.isfile(output_vcf_file): file_command = '{} {} > {}'.format(conv_com, full_vcf_file, output_vcf_file) subprocess.call(file_command,",
"table_annovar = os.path.join(annovar_dir, 'table_annovar.pl') conv_com = 'perl {} -format vcf4 -filter pass'.format(convert_annovar) anno_com",
"if '.idx' not in vcf_file: base_name = vcf_file.split('.')[0] full_vcf_file = os.path.join(vcf_file_dir, vcf_file) output_vcf_file",
"and then annotate \"\"\" import os import argparse import subprocess parser = argparse.ArgumentParser()",
"annovar_file.split('.')[0] full_annov_file = os.path.join(annovar_file_dir, annovar_file) annotated_vcf_file = os.path.join(annotated_file_dir, '{}.annotated'.format(base_name)) if not os.path.isfile(annotated_vcf_file): file_command",
"-otherinfo -remove -protocol ' \\ 'refGene,cosmic70,gnomad_exome,dbnsfp30a ' \\ '-operation g,f,f,f -nastring . -csvout",
"= os.path.join(annovar_file_dir, '{}.annovar.vcf'.format(base_name)) if not os.path.isfile(output_vcf_file): file_command = '{} {} > {}'.format(conv_com, full_vcf_file,",
"os.path.join(annovar_dir, 'humandb/') convert_annovar = os.path.join(annovar_dir, 'convert2annovar.pl') table_annovar = os.path.join(annovar_dir, 'table_annovar.pl') conv_com = 'perl",
"\"\"\" import os import argparse import subprocess parser = argparse.ArgumentParser() parser.add_argument('-m', '--merged', action='store_true',",
"humanonly: human_string = 'humanonly' vcf_file_dir = '{}_{}'.format(vcf_file_dir, human_string) annovar_file_dir = '{}_{}'.format(annovar_file_dir, human_string) annotated_file_dir",
"os import argparse import subprocess parser = argparse.ArgumentParser() parser.add_argument('-m', '--merged', action='store_true', help='use directory",
"= argparse.ArgumentParser() parser.add_argument('-m', '--merged', action='store_true', help='use directory for merged VCFs') parser.add_argument('-g', '--humanonly', action='store_true',",
"action='store_true', help='use humanonly directory for merged VCFs') args = parser.parse_args() merged = args.merged",
"files with given databases for annovar_file in os.listdir(annovar_file_dir): base_name = annovar_file.split('.')[0] full_annov_file =",
"not os.path.isfile(annotated_vcf_file): file_command = 'perl {} {} modules/annovar/humandb -buildver hg19 ' \\ '-out",
"args.humanonly if merged: vcf_file_dir = os.path.join('processed', 'gatk_merged_vcf') annovar_file_dir = os.path.join('results', 'annovar_merged_vcfs') annotated_file_dir =",
"annovar_file_dir = os.path.join('results', 'annovar_vcfs') annotated_file_dir = os.path.join('results', 'annotated_vcfs') if humanonly: human_string = 'humanonly'",
"'{} {} > {}'.format(conv_com, full_vcf_file, output_vcf_file) subprocess.call(file_command, shell=True) # Annotate annovar formatted files",
"os.path.isfile(output_vcf_file): file_command = '{} {} > {}'.format(conv_com, full_vcf_file, output_vcf_file) subprocess.call(file_command, shell=True) # Annotate",
"modules/annovar/humandb -buildver hg19 ' \\ '-out {} -verbose -otherinfo -remove -protocol ' \\",
"-buildver hg19'.format(table_annovar, humandb_dir) # Convert to annovar format for vcf_file in os.listdir(vcf_file_dir): if",
"not os.path.isfile(output_vcf_file): file_command = '{} {} > {}'.format(conv_com, full_vcf_file, output_vcf_file) subprocess.call(file_command, shell=True) #",
"merged VCFs') parser.add_argument('-g', '--humanonly', action='store_true', help='use humanonly directory for merged VCFs') args =",
"with given databases for annovar_file in os.listdir(annovar_file_dir): base_name = annovar_file.split('.')[0] full_annov_file = os.path.join(annovar_file_dir,",
"Annotate annovar formatted files with given databases for annovar_file in os.listdir(annovar_file_dir): base_name =",
"human_string) annovar_dir = os.path.join('modules', 'annovar') humandb_dir = os.path.join(annovar_dir, 'humandb/') convert_annovar = os.path.join(annovar_dir, 'convert2annovar.pl')",
"in os.listdir(annovar_file_dir): base_name = annovar_file.split('.')[0] full_annov_file = os.path.join(annovar_file_dir, annovar_file) annotated_vcf_file = os.path.join(annotated_file_dir, '{}.annotated'.format(base_name))",
"{} -verbose -otherinfo -remove -protocol ' \\ 'refGene,cosmic70,gnomad_exome,dbnsfp30a ' \\ '-operation g,f,f,f -nastring",
"= os.path.join('results', 'annotated_merged_vcfs') else: vcf_file_dir = os.path.join('results', 'gatk_vcfs') annovar_file_dir = os.path.join('results', 'annovar_vcfs') annotated_file_dir",
"\\ 'refGene,cosmic70,gnomad_exome,dbnsfp30a ' \\ '-operation g,f,f,f -nastring . -csvout ' \\ '-polish'.format(table_annovar, full_annov_file,",
"os.path.join(vcf_file_dir, vcf_file) output_vcf_file = os.path.join(annovar_file_dir, '{}.annovar.vcf'.format(base_name)) if not os.path.isfile(output_vcf_file): file_command = '{} {}",
"'{}_{}'.format(annotated_file_dir, human_string) annovar_dir = os.path.join('modules', 'annovar') humandb_dir = os.path.join(annovar_dir, 'humandb/') convert_annovar = os.path.join(annovar_dir,",
"'perl {} {} -buildver hg19'.format(table_annovar, humandb_dir) # Convert to annovar format for vcf_file"
] |
[
"we started at index 0, we'd try to buy *and* sell at time",
"at the # min price and sold at the current price potential_profit =",
"since we must buy first, # and we can't buy and sell at",
"bought at the # min price and sold at the current price potential_profit",
"stock_prices[1] - stock_prices[0] # Start at the second (index 1) time # We",
"would give a profit of 0, which is a problem if our #",
"and we can't buy and sell at the same time! # If we",
"2 prices') # We'll greedily update min_price and max_profit, so we initialize #",
"= current_price - min_price # Update max_profit if we can do better max_profit",
"stock_prices[current_time] # See what our profit would be if we bought at the",
"(index 1) time # We can't sell at the first time, since we",
"- min_price # Update max_profit if we can do better max_profit = max(max_profit,",
"to the first price and the first possible profit min_price = stock_prices[0] max_profit",
"give a profit of 0, which is a problem if our # max_profit",
"This would give a profit of 0, which is a problem if our",
"max(max_profit, potential_profit) # Update min_price so it's always # the lowest price we've",
"0. for current_time in range(1, len(stock_prices)): current_price = stock_prices[current_time] # See what our",
"time # We can't sell at the first time, since we must buy",
"a problem if our # max_profit is supposed to be *negative*--we'd return 0.",
"# We can't sell at the first time, since we must buy first,",
"sell at the same time! # If we started at index 0, we'd",
"what our profit would be if we bought at the # min price",
"return 0. for current_time in range(1, len(stock_prices)): current_price = stock_prices[current_time] # See what",
"be if we bought at the # min price and sold at the",
"stock_prices[0] max_profit = stock_prices[1] - stock_prices[0] # Start at the second (index 1)",
"the lowest price we've seen so far min_price = min(min_price, current_price) return max_profit",
"potential_profit = current_price - min_price # Update max_profit if we can do better",
"len(stock_prices)): current_price = stock_prices[current_time] # See what our profit would be if we",
"# See what our profit would be if we bought at the #",
"requires at least 2 prices') # We'll greedily update min_price and max_profit, so",
"min price and sold at the current price potential_profit = current_price - min_price",
"# This would give a profit of 0, which is a problem if",
"so it's always # the lowest price we've seen so far min_price =",
"prices') # We'll greedily update min_price and max_profit, so we initialize # them",
"get_max_profit(stock_prices): if len(stock_prices) < 2: raise ValueError('Getting a profit requires at least 2",
"*and* sell at time 0. # This would give a profit of 0,",
"and sell at the same time! # If we started at index 0,",
"our profit would be if we bought at the # min price and",
"first time, since we must buy first, # and we can't buy and",
"Update max_profit if we can do better max_profit = max(max_profit, potential_profit) # Update",
"= stock_prices[0] max_profit = stock_prices[1] - stock_prices[0] # Start at the second (index",
"# max_profit is supposed to be *negative*--we'd return 0. for current_time in range(1,",
"# We'll greedily update min_price and max_profit, so we initialize # them to",
"# If we started at index 0, we'd try to buy *and* sell",
"current_time in range(1, len(stock_prices)): current_price = stock_prices[current_time] # See what our profit would",
"be *negative*--we'd return 0. for current_time in range(1, len(stock_prices)): current_price = stock_prices[current_time] #",
"in range(1, len(stock_prices)): current_price = stock_prices[current_time] # See what our profit would be",
"range(1, len(stock_prices)): current_price = stock_prices[current_time] # See what our profit would be if",
"the current price potential_profit = current_price - min_price # Update max_profit if we",
"current price potential_profit = current_price - min_price # Update max_profit if we can",
"Start at the second (index 1) time # We can't sell at the",
"the # min price and sold at the current price potential_profit = current_price",
"If we started at index 0, we'd try to buy *and* sell at",
"sell at time 0. # This would give a profit of 0, which",
"better max_profit = max(max_profit, potential_profit) # Update min_price so it's always # the",
"greedily update min_price and max_profit, so we initialize # them to the first",
"# them to the first price and the first possible profit min_price =",
"profit requires at least 2 prices') # We'll greedily update min_price and max_profit,",
"# min price and sold at the current price potential_profit = current_price -",
"# the lowest price we've seen so far min_price = min(min_price, current_price) return",
"len(stock_prices) < 2: raise ValueError('Getting a profit requires at least 2 prices') #",
"can't sell at the first time, since we must buy first, # and",
"index 0, we'd try to buy *and* sell at time 0. # This",
"is a problem if our # max_profit is supposed to be *negative*--we'd return",
"# and we can't buy and sell at the same time! # If",
"is supposed to be *negative*--we'd return 0. for current_time in range(1, len(stock_prices)): current_price",
"buy and sell at the same time! # If we started at index",
"= stock_prices[current_time] # See what our profit would be if we bought at",
"the first time, since we must buy first, # and we can't buy",
"we initialize # them to the first price and the first possible profit",
"the first possible profit min_price = stock_prices[0] max_profit = stock_prices[1] - stock_prices[0] #",
"if len(stock_prices) < 2: raise ValueError('Getting a profit requires at least 2 prices')",
"our # max_profit is supposed to be *negative*--we'd return 0. for current_time in",
"at time 0. # This would give a profit of 0, which is",
"= max(max_profit, potential_profit) # Update min_price so it's always # the lowest price",
"must buy first, # and we can't buy and sell at the same",
"at least 2 prices') # We'll greedily update min_price and max_profit, so we",
"at the same time! # If we started at index 0, we'd try",
"We'll greedily update min_price and max_profit, so we initialize # them to the",
"current_price - min_price # Update max_profit if we can do better max_profit =",
"them to the first price and the first possible profit min_price = stock_prices[0]",
"and the first possible profit min_price = stock_prices[0] max_profit = stock_prices[1] - stock_prices[0]",
"can do better max_profit = max(max_profit, potential_profit) # Update min_price so it's always",
"if we can do better max_profit = max(max_profit, potential_profit) # Update min_price so",
"min_price = stock_prices[0] max_profit = stock_prices[1] - stock_prices[0] # Start at the second",
"would be if we bought at the # min price and sold at",
"time, since we must buy first, # and we can't buy and sell",
"sold at the current price potential_profit = current_price - min_price # Update max_profit",
"max_profit = max(max_profit, potential_profit) # Update min_price so it's always # the lowest",
"See what our profit would be if we bought at the # min",
"and max_profit, so we initialize # them to the first price and the",
"at index 0, we'd try to buy *and* sell at time 0. #",
"it's always # the lowest price we've seen so far min_price = min(min_price,",
"second (index 1) time # We can't sell at the first time, since",
"def get_max_profit(stock_prices): if len(stock_prices) < 2: raise ValueError('Getting a profit requires at least",
"first price and the first possible profit min_price = stock_prices[0] max_profit = stock_prices[1]",
"# Update min_price so it's always # the lowest price we've seen so",
"least 2 prices') # We'll greedily update min_price and max_profit, so we initialize",
"supposed to be *negative*--we'd return 0. for current_time in range(1, len(stock_prices)): current_price =",
"so we initialize # them to the first price and the first possible",
"problem if our # max_profit is supposed to be *negative*--we'd return 0. for",
"for current_time in range(1, len(stock_prices)): current_price = stock_prices[current_time] # See what our profit",
"max_profit = stock_prices[1] - stock_prices[0] # Start at the second (index 1) time",
"price potential_profit = current_price - min_price # Update max_profit if we can do",
"at the first time, since we must buy first, # and we can't",
"always # the lowest price we've seen so far min_price = min(min_price, current_price)",
"stock_prices[0] # Start at the second (index 1) time # We can't sell",
"try to buy *and* sell at time 0. # This would give a",
"current_price = stock_prices[current_time] # See what our profit would be if we bought",
"0. # This would give a profit of 0, which is a problem",
"price and sold at the current price potential_profit = current_price - min_price #",
"buy *and* sell at time 0. # This would give a profit of",
"to buy *and* sell at time 0. # This would give a profit",
"of 0, which is a problem if our # max_profit is supposed to",
"0, which is a problem if our # max_profit is supposed to be",
"< 2: raise ValueError('Getting a profit requires at least 2 prices') # We'll",
"which is a problem if our # max_profit is supposed to be *negative*--we'd",
"if we bought at the # min price and sold at the current",
"max_profit if we can do better max_profit = max(max_profit, potential_profit) # Update min_price",
"and sold at the current price potential_profit = current_price - min_price # Update",
"we must buy first, # and we can't buy and sell at the",
"profit of 0, which is a problem if our # max_profit is supposed",
"2: raise ValueError('Getting a profit requires at least 2 prices') # We'll greedily",
"profit would be if we bought at the # min price and sold",
"0, we'd try to buy *and* sell at time 0. # This would",
"1) time # We can't sell at the first time, since we must",
"the same time! # If we started at index 0, we'd try to",
"we'd try to buy *and* sell at time 0. # This would give",
"if our # max_profit is supposed to be *negative*--we'd return 0. for current_time",
"do better max_profit = max(max_profit, potential_profit) # Update min_price so it's always #",
"initialize # them to the first price and the first possible profit min_price",
"we can do better max_profit = max(max_profit, potential_profit) # Update min_price so it's",
"buy first, # and we can't buy and sell at the same time!",
"potential_profit) # Update min_price so it's always # the lowest price we've seen",
"We can't sell at the first time, since we must buy first, #",
"we can't buy and sell at the same time! # If we started",
"first, # and we can't buy and sell at the same time! #",
"the second (index 1) time # We can't sell at the first time,",
"sell at the first time, since we must buy first, # and we",
"min_price so it's always # the lowest price we've seen so far min_price",
"update min_price and max_profit, so we initialize # them to the first price",
"raise ValueError('Getting a profit requires at least 2 prices') # We'll greedily update",
"max_profit, so we initialize # them to the first price and the first",
"= stock_prices[1] - stock_prices[0] # Start at the second (index 1) time #",
"started at index 0, we'd try to buy *and* sell at time 0.",
"we bought at the # min price and sold at the current price",
"Update min_price so it's always # the lowest price we've seen so far",
"- stock_prices[0] # Start at the second (index 1) time # We can't",
"time 0. # This would give a profit of 0, which is a",
"same time! # If we started at index 0, we'd try to buy",
"# Start at the second (index 1) time # We can't sell at",
"time! # If we started at index 0, we'd try to buy *and*",
"min_price # Update max_profit if we can do better max_profit = max(max_profit, potential_profit)",
"a profit requires at least 2 prices') # We'll greedily update min_price and",
"the first price and the first possible profit min_price = stock_prices[0] max_profit =",
"max_profit is supposed to be *negative*--we'd return 0. for current_time in range(1, len(stock_prices)):",
"at the current price potential_profit = current_price - min_price # Update max_profit if",
"to be *negative*--we'd return 0. for current_time in range(1, len(stock_prices)): current_price = stock_prices[current_time]",
"price and the first possible profit min_price = stock_prices[0] max_profit = stock_prices[1] -",
"# Update max_profit if we can do better max_profit = max(max_profit, potential_profit) #",
"min_price and max_profit, so we initialize # them to the first price and",
"possible profit min_price = stock_prices[0] max_profit = stock_prices[1] - stock_prices[0] # Start at",
"a profit of 0, which is a problem if our # max_profit is",
"profit min_price = stock_prices[0] max_profit = stock_prices[1] - stock_prices[0] # Start at the",
"*negative*--we'd return 0. for current_time in range(1, len(stock_prices)): current_price = stock_prices[current_time] # See",
"at the second (index 1) time # We can't sell at the first",
"can't buy and sell at the same time! # If we started at",
"ValueError('Getting a profit requires at least 2 prices') # We'll greedily update min_price",
"first possible profit min_price = stock_prices[0] max_profit = stock_prices[1] - stock_prices[0] # Start"
] |
[
"\"parse_condition_file\", \"parse_design\", \"loadspreadsheet\", \"loadmatrix\", \"loadpicklelzma\", \"dumppicklelzma\", \"make_cachefilepath\", \"cacheobj\", \"uncacheobj\", \"BidsDatabase\", \"ExcludeDatabase\", \"Database\", \"canonicalize_direction_code\",",
") from .signals import meansignals __all__ = [ \"DictListFile\", \"AdaptiveLock\", \"IndexedFile\", \"parse_condition_file\", \"parse_design\",",
"\"loadmatrix\", \"loadpicklelzma\", \"dumppicklelzma\", \"make_cachefilepath\", \"cacheobj\", \"uncacheobj\", \"BidsDatabase\", \"ExcludeDatabase\", \"Database\", \"canonicalize_direction_code\", \"direction_code_str\", \"MetadataLoader\", \"SidecarMetadataLoader\",",
") from .index import BidsDatabase, ExcludeDatabase, Database from .metadata import ( canonicalize_direction_code, direction_code_str,",
"from .index import BidsDatabase, ExcludeDatabase, Database from .metadata import ( canonicalize_direction_code, direction_code_str, MetadataLoader,",
"py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et:",
"\"AdaptiveLock\", \"IndexedFile\", \"parse_condition_file\", \"parse_design\", \"loadspreadsheet\", \"loadmatrix\", \"loadpicklelzma\", \"dumppicklelzma\", \"make_cachefilepath\", \"cacheobj\", \"uncacheobj\", \"BidsDatabase\", \"ExcludeDatabase\",",
"MetadataLoader, SidecarMetadataLoader, slice_timing_str, str_slice_timing, ) from .signals import meansignals __all__ = [ \"DictListFile\",",
"import ( canonicalize_direction_code, direction_code_str, MetadataLoader, SidecarMetadataLoader, slice_timing_str, str_slice_timing, ) from .signals import meansignals",
"uncacheobj, ) from .parse import ( parse_condition_file, parse_design, loadspreadsheet, loadmatrix, ) from .index",
"python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4",
"slice_timing_str, str_slice_timing, ) from .signals import meansignals __all__ = [ \"DictListFile\", \"AdaptiveLock\", \"IndexedFile\",",
"et: from .file import ( DictListFile, AdaptiveLock, loadpicklelzma, dumppicklelzma, make_cachefilepath, cacheobj, uncacheobj, )",
"ExcludeDatabase, Database from .metadata import ( canonicalize_direction_code, direction_code_str, MetadataLoader, SidecarMetadataLoader, slice_timing_str, str_slice_timing, )",
"-*- # vi: set ft=python sts=4 ts=4 sw=4 et: from .file import (",
"sts=4 ts=4 sw=4 et: from .file import ( DictListFile, AdaptiveLock, loadpicklelzma, dumppicklelzma, make_cachefilepath,",
"nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from .file import",
"loadpicklelzma, dumppicklelzma, make_cachefilepath, cacheobj, uncacheobj, ) from .parse import ( parse_condition_file, parse_design, loadspreadsheet,",
"AdaptiveLock, loadpicklelzma, dumppicklelzma, make_cachefilepath, cacheobj, uncacheobj, ) from .parse import ( parse_condition_file, parse_design,",
"emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python",
"ft=python sts=4 ts=4 sw=4 et: from .file import ( DictListFile, AdaptiveLock, loadpicklelzma, dumppicklelzma,",
"meansignals __all__ = [ \"DictListFile\", \"AdaptiveLock\", \"IndexedFile\", \"parse_condition_file\", \"parse_design\", \"loadspreadsheet\", \"loadmatrix\", \"loadpicklelzma\", \"dumppicklelzma\",",
"\"parse_design\", \"loadspreadsheet\", \"loadmatrix\", \"loadpicklelzma\", \"dumppicklelzma\", \"make_cachefilepath\", \"cacheobj\", \"uncacheobj\", \"BidsDatabase\", \"ExcludeDatabase\", \"Database\", \"canonicalize_direction_code\", \"direction_code_str\",",
"loadmatrix, ) from .index import BidsDatabase, ExcludeDatabase, Database from .metadata import ( canonicalize_direction_code,",
"import BidsDatabase, ExcludeDatabase, Database from .metadata import ( canonicalize_direction_code, direction_code_str, MetadataLoader, SidecarMetadataLoader, slice_timing_str,",
".metadata import ( canonicalize_direction_code, direction_code_str, MetadataLoader, SidecarMetadataLoader, slice_timing_str, str_slice_timing, ) from .signals import",
"__all__ = [ \"DictListFile\", \"AdaptiveLock\", \"IndexedFile\", \"parse_condition_file\", \"parse_design\", \"loadspreadsheet\", \"loadmatrix\", \"loadpicklelzma\", \"dumppicklelzma\", \"make_cachefilepath\",",
".parse import ( parse_condition_file, parse_design, loadspreadsheet, loadmatrix, ) from .index import BidsDatabase, ExcludeDatabase,",
"\"DictListFile\", \"AdaptiveLock\", \"IndexedFile\", \"parse_condition_file\", \"parse_design\", \"loadspreadsheet\", \"loadmatrix\", \"loadpicklelzma\", \"dumppicklelzma\", \"make_cachefilepath\", \"cacheobj\", \"uncacheobj\", \"BidsDatabase\",",
"direction_code_str, MetadataLoader, SidecarMetadataLoader, slice_timing_str, str_slice_timing, ) from .signals import meansignals __all__ = [",
"import ( parse_condition_file, parse_design, loadspreadsheet, loadmatrix, ) from .index import BidsDatabase, ExcludeDatabase, Database",
".index import BidsDatabase, ExcludeDatabase, Database from .metadata import ( canonicalize_direction_code, direction_code_str, MetadataLoader, SidecarMetadataLoader,",
"vi: set ft=python sts=4 ts=4 sw=4 et: from .file import ( DictListFile, AdaptiveLock,",
"import meansignals __all__ = [ \"DictListFile\", \"AdaptiveLock\", \"IndexedFile\", \"parse_condition_file\", \"parse_design\", \"loadspreadsheet\", \"loadmatrix\", \"loadpicklelzma\",",
"utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- #",
"BidsDatabase, ExcludeDatabase, Database from .metadata import ( canonicalize_direction_code, direction_code_str, MetadataLoader, SidecarMetadataLoader, slice_timing_str, str_slice_timing,",
"make_cachefilepath, cacheobj, uncacheobj, ) from .parse import ( parse_condition_file, parse_design, loadspreadsheet, loadmatrix, )",
"str_slice_timing, ) from .signals import meansignals __all__ = [ \"DictListFile\", \"AdaptiveLock\", \"IndexedFile\", \"parse_condition_file\",",
"parse_design, loadspreadsheet, loadmatrix, ) from .index import BidsDatabase, ExcludeDatabase, Database from .metadata import",
"dumppicklelzma, make_cachefilepath, cacheobj, uncacheobj, ) from .parse import ( parse_condition_file, parse_design, loadspreadsheet, loadmatrix,",
"( parse_condition_file, parse_design, loadspreadsheet, loadmatrix, ) from .index import BidsDatabase, ExcludeDatabase, Database from",
"Database from .metadata import ( canonicalize_direction_code, direction_code_str, MetadataLoader, SidecarMetadataLoader, slice_timing_str, str_slice_timing, ) from",
"set ft=python sts=4 ts=4 sw=4 et: from .file import ( DictListFile, AdaptiveLock, loadpicklelzma,",
"\"loadspreadsheet\", \"loadmatrix\", \"loadpicklelzma\", \"dumppicklelzma\", \"make_cachefilepath\", \"cacheobj\", \"uncacheobj\", \"BidsDatabase\", \"ExcludeDatabase\", \"Database\", \"canonicalize_direction_code\", \"direction_code_str\", \"MetadataLoader\",",
"-*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4",
"mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4",
"[ \"DictListFile\", \"AdaptiveLock\", \"IndexedFile\", \"parse_condition_file\", \"parse_design\", \"loadspreadsheet\", \"loadmatrix\", \"loadpicklelzma\", \"dumppicklelzma\", \"make_cachefilepath\", \"cacheobj\", \"uncacheobj\",",
"ts=4 sw=4 et: from .file import ( DictListFile, AdaptiveLock, loadpicklelzma, dumppicklelzma, make_cachefilepath, cacheobj,",
"DictListFile, AdaptiveLock, loadpicklelzma, dumppicklelzma, make_cachefilepath, cacheobj, uncacheobj, ) from .parse import ( parse_condition_file,",
"4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from",
"import ( DictListFile, AdaptiveLock, loadpicklelzma, dumppicklelzma, make_cachefilepath, cacheobj, uncacheobj, ) from .parse import",
"( canonicalize_direction_code, direction_code_str, MetadataLoader, SidecarMetadataLoader, slice_timing_str, str_slice_timing, ) from .signals import meansignals __all__",
"-*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil",
"parse_condition_file, parse_design, loadspreadsheet, loadmatrix, ) from .index import BidsDatabase, ExcludeDatabase, Database from .metadata",
") from .parse import ( parse_condition_file, parse_design, loadspreadsheet, loadmatrix, ) from .index import",
"# vi: set ft=python sts=4 ts=4 sw=4 et: from .file import ( DictListFile,",
"( DictListFile, AdaptiveLock, loadpicklelzma, dumppicklelzma, make_cachefilepath, cacheobj, uncacheobj, ) from .parse import (",
"canonicalize_direction_code, direction_code_str, MetadataLoader, SidecarMetadataLoader, slice_timing_str, str_slice_timing, ) from .signals import meansignals __all__ =",
"# -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode:",
"\"make_cachefilepath\", \"cacheobj\", \"uncacheobj\", \"BidsDatabase\", \"ExcludeDatabase\", \"Database\", \"canonicalize_direction_code\", \"direction_code_str\", \"MetadataLoader\", \"SidecarMetadataLoader\", \"slice_timing_str\", \"str_slice_timing\", \"meansignals\",",
"cacheobj, uncacheobj, ) from .parse import ( parse_condition_file, parse_design, loadspreadsheet, loadmatrix, ) from",
"\"IndexedFile\", \"parse_condition_file\", \"parse_design\", \"loadspreadsheet\", \"loadmatrix\", \"loadpicklelzma\", \"dumppicklelzma\", \"make_cachefilepath\", \"cacheobj\", \"uncacheobj\", \"BidsDatabase\", \"ExcludeDatabase\", \"Database\",",
"\"dumppicklelzma\", \"make_cachefilepath\", \"cacheobj\", \"uncacheobj\", \"BidsDatabase\", \"ExcludeDatabase\", \"Database\", \"canonicalize_direction_code\", \"direction_code_str\", \"MetadataLoader\", \"SidecarMetadataLoader\", \"slice_timing_str\", \"str_slice_timing\",",
".file import ( DictListFile, AdaptiveLock, loadpicklelzma, dumppicklelzma, make_cachefilepath, cacheobj, uncacheobj, ) from .parse",
"= [ \"DictListFile\", \"AdaptiveLock\", \"IndexedFile\", \"parse_condition_file\", \"parse_design\", \"loadspreadsheet\", \"loadmatrix\", \"loadpicklelzma\", \"dumppicklelzma\", \"make_cachefilepath\", \"cacheobj\",",
".signals import meansignals __all__ = [ \"DictListFile\", \"AdaptiveLock\", \"IndexedFile\", \"parse_condition_file\", \"parse_design\", \"loadspreadsheet\", \"loadmatrix\",",
"from .parse import ( parse_condition_file, parse_design, loadspreadsheet, loadmatrix, ) from .index import BidsDatabase,",
"from .file import ( DictListFile, AdaptiveLock, loadpicklelzma, dumppicklelzma, make_cachefilepath, cacheobj, uncacheobj, ) from",
"sw=4 et: from .file import ( DictListFile, AdaptiveLock, loadpicklelzma, dumppicklelzma, make_cachefilepath, cacheobj, uncacheobj,",
"from .signals import meansignals __all__ = [ \"DictListFile\", \"AdaptiveLock\", \"IndexedFile\", \"parse_condition_file\", \"parse_design\", \"loadspreadsheet\",",
"# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set",
"indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from .file",
"loadspreadsheet, loadmatrix, ) from .index import BidsDatabase, ExcludeDatabase, Database from .metadata import (",
"from .metadata import ( canonicalize_direction_code, direction_code_str, MetadataLoader, SidecarMetadataLoader, slice_timing_str, str_slice_timing, ) from .signals",
"\"loadpicklelzma\", \"dumppicklelzma\", \"make_cachefilepath\", \"cacheobj\", \"uncacheobj\", \"BidsDatabase\", \"ExcludeDatabase\", \"Database\", \"canonicalize_direction_code\", \"direction_code_str\", \"MetadataLoader\", \"SidecarMetadataLoader\", \"slice_timing_str\",",
"-*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi:",
"\"cacheobj\", \"uncacheobj\", \"BidsDatabase\", \"ExcludeDatabase\", \"Database\", \"canonicalize_direction_code\", \"direction_code_str\", \"MetadataLoader\", \"SidecarMetadataLoader\", \"slice_timing_str\", \"str_slice_timing\", \"meansignals\", ]",
"coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-",
"SidecarMetadataLoader, slice_timing_str, str_slice_timing, ) from .signals import meansignals __all__ = [ \"DictListFile\", \"AdaptiveLock\","
] |
[
"+ ops[i][0] if bounds_check((y,x), rows, cols) or (y,x) == previous.index: continue ret.append(grid[y][x]) return",
"bounds_check((y,x), rows, cols) or (y,x) == previous.index: continue ret.append(grid[y][x]) return ret def print_maze(grid):",
"for operations count = 0 for i in range(8): #bounds checking x =",
"grid[y,x] == 255: count += 1 return count def checkRules(grid, index, rule): c",
"== None: color = 255 # assign the given color to the cell",
"x + index[1]), grid.shape[0], grid.shape[1]): if y + index[0] == 0 or grid.shape[0]",
"== True: print('X', end=\", \") else: print('O', end=\", \") print(\"]\") def maze_index(index, dir):",
"index[1] * 2 + 1) def mark_change(idx, gif_arr, wall_idx, secondIdx = None, color",
"== 'L': return 1 elif dir == 'R': return 0 elif dir ==",
"index of the wall to break remove mark_as_white = maze_index(index, direction) # remove",
"(1,1)] for index in ops: if (y + index[0], x + index[1]) in",
"new_image def grid_to_image(index): return (index[0] * 2 + 1, index[1] * 2 +",
"Bottom Left, Top Right, Bottom Right ops = [(0,-1), (0,1), (-1,0), (1,0), (-1,-1),",
"== 255: count += 1 return count def checkRules(grid, index, rule): c =",
"print(\"]\") def print_index(grid): for i in range(len(grid)): print(\"[\", end=\"\") for j in range(len(grid[i])):",
"0 or index[0] > rows - 1: return True if index[1] < 0",
"len(dirs): break if not bounds_check((y + index[0], x + index[1]), grid.shape[0], grid.shape[1]): if",
"grid.shape[1]) for character in rule: if c == int(character): return True return False",
"end=\", \") else: print('O', end=\", \") print(\"]\") def maze_index(index, dir): if dir ==",
"the provided color) new_image[mark_as_white[0], mark_as_white[1]] = color return new_image def grid_to_image(index): return (index[0]",
"1) elif dir == 'R': return (index[0], index[1] + 1) elif dir ==",
"for i in range(len(grid)): print(\"[\", end=\"\") for j in range(len(grid[i])): if grid[i][j].visited ==",
"return (index[0], index[1] + 1) elif dir == 'T': return (index[0] - 1,",
"in range(len(grid)): print(\"[\", end=\"\") for j in range(len(grid[i])): if grid[i][j].visited == True: print('X',",
"Top Left, Bottom Left, Top Right, Bottom Right ops = [(0,-1), (0,1), (-1,0),",
"visited.add((y + index[0], x + index[1])) update_set(y + index[0], x + index[1], visited,",
"given color to the cell to mark it as active new_image[index[0], index[1]] =",
"if grid[i][j].walls[k] == 'X': if k == 0 or k == 1: maze[idx[0],",
"visited, grid, unvisited) count += 1 if count == 0: return False return",
"def nbr_index(index, dir): if dir == 'L': return (index[0], index[1] - 1) elif",
"0 or grid.shape[1] - 1 == x + index[1]: continue all_nodes.add((y,x)) if (y,x)",
"< 0 or index[1] > cols - 1: return True return False def",
"print(\"[\", end=\"\") for j in range(len(grid[i])): print(grid[i][j].index, end=\", \") print(\"]\") def print_visited(grid): for",
"grid[y][x].visited == False: if curr.walls[i] != 'X': ret.append(i) return ret def nbr_index(index, dir):",
"np #TODO: #1. create a streamlined and replicable gif creation set of functions",
"cols, previous): #order: Left, Right, Top, Down ops = [(0,-1), (0,1), (-1,0), (1,0)]",
"== None: newIMG = create_snapshot(gif_arr[-1].copy(), idx, -1, color) else: newIMG = create_snapshot(gif_arr[-1].copy(), idx,",
"i in range(4): #bounds checking x = curr.index[1] + ops[i][1] y = curr.index[0]",
"these functions into the generation algorithms available. def convert_2d(index, cols): return (index //",
"(-1,0), (1,0), (-1,-1), (1,-1), (-1,1), (1,1)] dirs = random.sample(ops, k=len(ops)) count = 0",
"- 1 == x + index[1]: continue all_nodes.add((y,x)) if (y,x) in unvisited: unvisited.remove((y,x))",
"= [(0,-1), (0,1), (-1,0), (1,0), (-1,-1), (1,-1), (-1,1), (1,1)] #short for operations count",
"-1: return 'D' def print_grid(grid): for i in range(len(grid)): print(\"[\", end=\"\") for j",
"x == -1: return 'L' if y == 1: return 'T' if y",
"== 1: return 'T' if y == -1: return 'D' def print_grid(grid): for",
"continue ret.append(grid[y][x]) return ret def print_maze(grid): maze = np.chararray((len(grid) * 2 + 1,",
"(-1,1), (1,1)] #short for operations count = 0 for i in range(8): #bounds",
"ops[i][0] if bounds_check((y,x), rows, cols): continue if grid[y,x] == 255: count += 1",
"x = curr.index[1] + ops[i][1] y = curr.index[0] + ops[i][0] if bounds_check((y,x), rows,",
"range(len(grid[i])): print(grid[i][j].index, end=\", \") print(\"]\") def print_visited(grid): for i in range(len(grid)): print(\"[\", end=\"\")",
"ops[i][0] if bounds_check((y,x), rows, cols) or (y,x) == previous.index: continue ret.append(grid[y][x]) return ret",
"== len(dirs): break if not bounds_check((y + index[0], x + index[1]), grid.shape[0], grid.shape[1]):",
"(-1,-1), (1,-1), (-1,1), (1,1)] for index in ops: if (y + index[0], x",
"cols): return (index // cols, index % cols) def bounds_check(index, rows, cols): if",
"dir): if dir == 'L': return (index[0], index[1] - 1) elif dir ==",
"provided color) new_image[mark_as_white[0], mark_as_white[1]] = color return new_image def grid_to_image(index): return (index[0] *",
"nbr_index[1] if x == 1: return 'R' if x == -1: return 'L'",
"#bounds checking x = index[1] + ops[i][1] y = index[0] + ops[i][0] if",
"(-1,-1), (1,-1), (-1,1), (1,1)] #short for operations count = 0 for i in",
"+ index[1]] = 255 visited.add((y + index[0], x + index[1])) update_set(y + index[0],",
"if (y + index[0], x + index[1]) in visited: return True return False",
"if bounds_check((y,x), rows, cols) or (y,x) == previous.index: continue ret.append(grid[y][x]) return ret def",
"(0,1), (-1,0), (1,0), (-1,-1), (1,-1), (-1,1), (1,1)] for index in ops: if y",
"+ 1,j * 2 + 1), k) maze[i * 2 + 1, j",
"- 1 == x + index[1]: continue grid[y + index[0], x + index[1]]",
"x, visited): ops = [(0,-1), (0,1), (-1,0), (1,0), (-1,-1), (1,-1), (-1,1), (1,1)] for",
"dir == 1: return (index[0], index[1] + 1) elif dir == 2: return",
"create_snapshot(gif_arr[-1].copy(), idx, wall_idx, color) else: newIMG = create_snapshot(gif_arr[-1].copy(), idx, wall_idx, color) newIMG =",
"if index[0] < 0 or index[0] > rows - 1: return True if",
"unvisited): ops = [(0,-1), (0,1), (-1,0), (1,0), (-1,-1), (1,-1), (-1,1), (1,1)] dirs =",
"= None): # mark one or two changes, algorithm specific if secondIdx ==",
"1: maze[idx[0], idx[1]] = '-' else: maze[idx[0], idx[1]] = '|' for i in",
"return (index[0], index[1] + 1) elif dir == 2: return (index[0] - 1,",
"this file. #2. implement these functions into the generation algorithms available. def convert_2d(index,",
"== 0 or k == 1: maze[idx[0], idx[1]] = '-' else: maze[idx[0], idx[1]]",
"= curr.index[1] + ops[i][1] y = curr.index[0] + ops[i][0] if bounds_check((y,x), rows, cols)",
"+ 1), k) maze[i * 2 + 1, j * 2 + 1]",
"1) def mark_change(idx, gif_arr, wall_idx, secondIdx = None, color = None): # mark",
"maze_index(index, dir): if dir == 0: return (index[0], index[1] - 1) elif dir",
"#short for operations count = 0 for i in range(8): #bounds checking x",
"* 2 + 1)) maze[:,:] = '@' for i in range(len(grid)): for j",
"1: return (index[0], index[1] + 1) elif dir == 2: return (index[0] -",
"range(len(grid[i])): for k in range(4): idx = maze_index((i * 2 + 1,j *",
"+ ops[i][0] if bounds_check((y,x), rows, cols): continue if grid[y][x].visited == False: if curr.walls[i]",
"== previous.index: continue ret.append(grid[y][x]) return ret def print_maze(grid): maze = np.chararray((len(grid) * 2",
"range(len(grid[i])): if grid[i][j].visited == True: print('X', end=\", \") else: print('O', end=\", \") print(\"]\")",
"conv_idx_dir(index, nbr_index): y = index[0] - nbr_index[0] x = index[1] - nbr_index[1] if",
"= color if direction < 0: return new_image # find the index of",
"x + index[1]: continue grid[y + index[0], x + index[1]] = 255 visited.add((y",
"1 == y + index[0] or x + index[1] == 0 or grid.shape[1]",
"j in range(maze.shape[1]): print(maze[i,j].decode('utf-8'), end=\" \") print() def countNeighbors(grid, index, rows, cols): #order:",
"in this file. #2. implement these functions into the generation algorithms available. def",
"(0,1), (-1,0), (1,0), (-1,-1), (1,-1), (-1,1), (1,1)] dirs = random.sample(ops, k=len(ops)) count =",
"1,j * 2 + 1), k) maze[i * 2 + 1, j *",
"< 0: return new_image # find the index of the wall to break",
"cols): #order: Left, Right, Top, Down ops = [(0,-1), (0,1), (-1,0), (1,0)] #short",
"maze_index(index, direction) # remove the wall (set it to the provided color) new_image[mark_as_white[0],",
"print('X', end=\", \") else: print('O', end=\", \") print(\"]\") def maze_index(index, dir): if dir",
"assign the given color to the cell to mark it as active new_image[index[0],",
"= None, color = None): # mark one or two changes, algorithm specific",
"1: return True if index[1] < 0 or index[1] > cols - 1:",
"if grid[y,x] == 255: count += 1 return count def checkRules(grid, index, rule):",
"i in range(len(grid)): for j in range(len(grid[i])): for k in range(4): idx =",
"2 return 3 def conv_idx_dir(index, nbr_index): y = index[0] - nbr_index[0] x =",
"the wall (set it to the provided color) new_image[mark_as_white[0], mark_as_white[1]] = color return",
"for j in range(len(grid[i])): for k in range(4): idx = maze_index((i * 2",
"(-1,-1), (1,-1), (-1,1), (1,1)] dirs = random.sample(ops, k=len(ops)) count = 0 for index",
"grid, unvisited): ops = [(0,-1), (0,1), (-1,0), (1,0), (-1,-1), (1,-1), (-1,1), (1,1)] for",
"= curr.index[0] + ops[i][0] if bounds_check((y,x), rows, cols) or (y,x) == previous.index: continue",
"for i in range(4): #bounds checking x = curr.index[1] + ops[i][1] y =",
"the given color to the cell to mark it as active new_image[index[0], index[1]]",
"color) new_image[mark_as_white[0], mark_as_white[1]] = color return new_image def grid_to_image(index): return (index[0] * 2",
"for index in ops: if (y + index[0], x + index[1]) in visited:",
"'L' if y == 1: return 'T' if y == -1: return 'D'",
"= create_snapshot(gif_arr[-1].copy(), idx, wall_idx, color) newIMG = create_snapshot(newIMG, secondIdx, -1, color) if not",
"create_snapshot(gif_arr[-1].copy(), idx, wall_idx, color) newIMG = create_snapshot(newIMG, secondIdx, -1, color) if not np.array_equal(newIMG,",
"if c == int(character): return True return False def start_cells(grid, y, x, random,",
"print(maze[i,j].decode('utf-8'), end=\" \") print() def countNeighbors(grid, index, rows, cols): #order: Left, Right, Top,",
"np.array_equal(newIMG, gif_arr[-1]): gif_arr.append(newIMG) def getNeighbor(grid, curr, rows, cols, previous): #order: Left, Right, Top,",
"(-1,1), (1,1)] dirs = random.sample(ops, k=len(ops)) count = 0 for index in dirs:",
"direction) # remove the wall (set it to the provided color) new_image[mark_as_white[0], mark_as_white[1]]",
"or grid.shape[1] - 1 == x + index[1]: continue grid[y + index[0], x",
"(index[0], index[1] - 1) elif dir == 'R': return (index[0], index[1] + 1)",
"1, index[1]) def conv_nbr_wall(dir): if dir == 'L': return 1 elif dir ==",
"ops[i][1] y = curr.index[0] + ops[i][0] if bounds_check((y,x), rows, cols): continue if grid[y][x].visited",
"else: newIMG = create_snapshot(gif_arr[-1].copy(), idx, -1, color) newIMG = create_snapshot(newIMG, secondIdx, -1, color)",
"newIMG = create_snapshot(newIMG, secondIdx, -1, color) if not np.array_equal(newIMG, gif_arr[-1]): gif_arr.append(newIMG) def mark_node(idx,",
"for i in range(len(grid)): print(\"[\", end=\"\") for j in range(len(grid[i])): print(grid[i][j].walls, end=\", \")",
"print(\"[\", end=\"\") for j in range(len(grid[i])): if grid[i][j].visited == True: print('X', end=\", \")",
"= create_snapshot(gif_arr[-1].copy(), idx, wall_idx, color) else: newIMG = create_snapshot(gif_arr[-1].copy(), idx, wall_idx, color) newIMG",
"range(maze.shape[0]): for j in range(maze.shape[1]): print(maze[i,j].decode('utf-8'), end=\" \") print() def countNeighbors(grid, index, rows,",
"for i in range(len(grid)): for j in range(len(grid[i])): for k in range(4): idx",
"end=\", \") print(\"]\") def print_visited(grid): for i in range(len(grid)): print(\"[\", end=\"\") for j",
"conv_nbr_wall(dir): if dir == 'L': return 1 elif dir == 'R': return 0",
"gif creation set of functions in this file. #2. implement these functions into",
"functions into the generation algorithms available. def convert_2d(index, cols): return (index // cols,",
"dir == 'R': return (index[0], index[1] + 1) elif dir == 'T': return",
"if x == -1: return 'L' if y == 1: return 'T' if",
"secondIdx = None, color = None): if secondIdx == None: newIMG = create_snapshot(gif_arr[-1].copy(),",
"0 or index[1] > cols - 1: return True return False def neighborCheck(grid,",
"255 # assign the given color to the cell to mark it as",
"gif_arr, wall_idx, secondIdx = None, color = None): # mark one or two",
"== 2: return (index[0] - 1, index[1]) return (index[0] + 1, index[1]) def",
"True if index[1] < 0 or index[1] > cols - 1: return True",
"elif dir == 'R': return 0 elif dir == 'T': return 2 return",
"(white) if none provided if color == None: color = 255 # assign",
"it to the provided color) new_image[mark_as_white[0], mark_as_white[1]] = color return new_image def grid_to_image(index):",
"index[1] - 1) elif dir == 'R': return (index[0], index[1] + 1) elif",
"in range(len(grid[i])): if grid[i][j].visited == True: print('X', end=\", \") else: print('O', end=\", \")",
"= 255 visited.add((y + index[0], x + index[1])) update_set(y + index[0], x +",
"= [(0,-1), (0,1), (-1,0), (1,0)] #short for operations ret = [] for i",
"elif dir == 1: return (index[0], index[1] + 1) elif dir == 2:",
"grid.shape[0], grid.shape[1]) for character in rule: if c == int(character): return True return",
"index in ops: if y + index[0] == 0 or grid.shape[0] - 1",
"(index[0], index[1] + 1) elif dir == 'T': return (index[0] - 1, index[1])",
"def check_visited(y, x, visited): ops = [(0,-1), (0,1), (-1,0), (1,0), (-1,-1), (1,-1), (-1,1),",
"curr.index[1] + ops[i][1] y = curr.index[0] + ops[i][0] if bounds_check((y,x), rows, cols): continue",
"available. def convert_2d(index, cols): return (index // cols, index % cols) def bounds_check(index,",
"= curr.index[1] + ops[i][1] y = curr.index[0] + ops[i][0] if bounds_check((y,x), rows, cols):",
"grid.shape[1]): if y + index[0] == 0 or grid.shape[0] - 1 == y",
"mark_node(idx, gif_arr, secondIdx = None, color = None): if secondIdx == None: newIMG",
"+ index[0], x + index[1])) update_set(y + index[0], x + index[1], visited, grid,",
"[(0,-1), (0,1), (-1,0), (1,0), (-1,-1), (1,-1), (-1,1), (1,1)] for index in ops: if",
"return (index[0], index[1] - 1) elif dir == 1: return (index[0], index[1] +",
"the generation algorithms available. def convert_2d(index, cols): return (index // cols, index %",
"if bounds_check((y,x), rows, cols): continue if grid[y,x] == 255: count += 1 return",
"1 if count == 0: return False return True def check_visited(y, x, visited):",
"count = 0 for index in dirs: if count == len(dirs): break if",
"newIMG = create_snapshot(newIMG, secondIdx, -1, color) if not np.array_equal(newIMG, gif_arr[-1]): gif_arr.append(newIMG) def getNeighbor(grid,",
"-1, color) newIMG = create_snapshot(newIMG, secondIdx, -1, color) if not np.array_equal(newIMG, gif_arr[-1]): gif_arr.append(newIMG)",
"cols - 1: return True return False def neighborCheck(grid, curr, rows, cols): #order:",
"dir == 0: return (index[0], index[1] - 1) elif dir == 1: return",
"nbr_index): y = index[0] - nbr_index[0] x = index[1] - nbr_index[1] if x",
"1, index[1]) return (index[0] + 1, index[1]) def conv_nbr_wall(dir): if dir == 'L':",
"color) else: newIMG = create_snapshot(gif_arr[-1].copy(), idx, -1, color) newIMG = create_snapshot(newIMG, secondIdx, -1,",
"replicable gif creation set of functions in this file. #2. implement these functions",
"#2. implement these functions into the generation algorithms available. def convert_2d(index, cols): return",
"2 + 1) def mark_change(idx, gif_arr, wall_idx, secondIdx = None, color = None):",
"index[0], x + index[1]), grid.shape[0], grid.shape[1]): if y + index[0] == 0 or",
"color if direction < 0: return new_image # find the index of the",
"if not bounds_check((y + index[0], x + index[1]), grid.shape[0], grid.shape[1]): if y +",
"color == None: color = 255 # assign the given color to the",
"def print_maze(grid): maze = np.chararray((len(grid) * 2 + 1, len(grid[0]) * 2 +",
"in range(4): #bounds checking x = curr.index[1] + ops[i][1] y = curr.index[0] +",
"index[1] == 0 or grid.shape[1] - 1 == x + index[1]: continue grid[y",
"k=len(ops)) count = 0 for index in dirs: if count == len(dirs): break",
"return 'L' if y == 1: return 'T' if y == -1: return",
"for i in range(len(grid)): print(\"[\", end=\"\") for j in range(len(grid[i])): print(grid[i][j].index, end=\", \")",
"True: print('X', end=\", \") else: print('O', end=\", \") print(\"]\") def maze_index(index, dir): if",
"x + index[1]] = 255 visited.add((y + index[0], x + index[1])) update_set(y +",
"'R': return 0 elif dir == 'T': return 2 return 3 def conv_idx_dir(index,",
"print(\"]\") def print_visited(grid): for i in range(len(grid)): print(\"[\", end=\"\") for j in range(len(grid[i])):",
"Top Right, Bottom Right ops = [(0,-1), (0,1), (-1,0), (1,0), (-1,-1), (1,-1), (-1,1),",
"(1,1)] dirs = random.sample(ops, k=len(ops)) count = 0 for index in dirs: if",
"* 2 + 1, len(grid[0]) * 2 + 1)) maze[:,:] = '@' for",
"or index[1] > cols - 1: return True return False def neighborCheck(grid, curr,",
"to the provided color) new_image[mark_as_white[0], mark_as_white[1]] = color return new_image def grid_to_image(index): return",
"in range(len(grid)): print(\"[\", end=\"\") for j in range(len(grid[i])): print(grid[i][j].index, end=\", \") print(\"]\") def",
"maze[:,:] = '@' for i in range(len(grid)): for j in range(len(grid[i])): for k",
"index[0], x + index[1])) update_set(y + index[0], x + index[1], visited, grid, unvisited)",
"count def checkRules(grid, index, rule): c = countNeighbors(grid, index, grid.shape[0], grid.shape[1]) for character",
"color = None): if secondIdx == None: newIMG = create_snapshot(gif_arr[-1].copy(), idx, -1, color)",
"color to the cell to mark it as active new_image[index[0], index[1]] = color",
"2 + 1] = '+' if grid[i][j].walls[k] == 'X': if k == 0",
"else: maze[idx[0], idx[1]] = '|' for i in range(maze.shape[0]): for j in range(maze.shape[1]):",
"= np.chararray((len(grid) * 2 + 1, len(grid[0]) * 2 + 1)) maze[:,:] =",
"(set it to the provided color) new_image[mark_as_white[0], mark_as_white[1]] = color return new_image def",
"if dir == 'L': return (index[0], index[1] - 1) elif dir == 'R':",
"= create_snapshot(gif_arr[-1].copy(), idx, -1, color) newIMG = create_snapshot(newIMG, secondIdx, -1, color) if not",
"in range(4): idx = maze_index((i * 2 + 1,j * 2 + 1),",
"dir == 'T': return 2 return 3 def conv_idx_dir(index, nbr_index): y = index[0]",
"- 1: return True return False def neighborCheck(grid, curr, rows, cols): #order: Left,",
"mark_change(idx, gif_arr, wall_idx, secondIdx = None, color = None): # mark one or",
"index[0] + ops[i][0] if bounds_check((y,x), rows, cols): continue if grid[y,x] == 255: count",
"1 return count def checkRules(grid, index, rule): c = countNeighbors(grid, index, grid.shape[0], grid.shape[1])",
"curr.index[1] + ops[i][1] y = curr.index[0] + ops[i][0] if bounds_check((y,x), rows, cols) or",
"x + index[1], visited, grid, unvisited) count += 1 if count == 0:",
"2 + 1, index[1] * 2 + 1) def mark_change(idx, gif_arr, wall_idx, secondIdx",
"Down, Top Left, Bottom Left, Top Right, Bottom Right ops = [(0,-1), (0,1),",
"+= 1 if count == 0: return False return True def check_visited(y, x,",
"rows, cols): if index[0] < 0 or index[0] > rows - 1: return",
"False: if curr.walls[i] != 'X': ret.append(i) return ret def nbr_index(index, dir): if dir",
"color return new_image def grid_to_image(index): return (index[0] * 2 + 1, index[1] *",
"Bottom Right ops = [(0,-1), (0,1), (-1,0), (1,0), (-1,-1), (1,-1), (-1,1), (1,1)] #short",
"if y == 1: return 'T' if y == -1: return 'D' def",
"index[0] > rows - 1: return True if index[1] < 0 or index[1]",
"ret.append(i) return ret def nbr_index(index, dir): if dir == 'L': return (index[0], index[1]",
"create_snapshot(newIMG, secondIdx, -1, color) if not np.array_equal(newIMG, gif_arr[-1]): gif_arr.append(newIMG) def mark_node(idx, gif_arr, secondIdx",
"+ index[0], x + index[1], visited, grid, unvisited) count += 1 if count",
"2 + 1, len(grid[0]) * 2 + 1)) maze[:,:] = '@' for i",
"# find the index of the wall to break remove mark_as_white = maze_index(index,",
"index[0], x + index[1]] = 255 visited.add((y + index[0], x + index[1])) update_set(y",
"maze[idx[0], idx[1]] = '-' else: maze[idx[0], idx[1]] = '|' for i in range(maze.shape[0]):",
"or x + index[1] == 0 or grid.shape[1] - 1 == x +",
"unvisited): ops = [(0,-1), (0,1), (-1,0), (1,0), (-1,-1), (1,-1), (-1,1), (1,1)] for index",
"def convert_2d(index, cols): return (index // cols, index % cols) def bounds_check(index, rows,",
"'|' for i in range(maze.shape[0]): for j in range(maze.shape[1]): print(maze[i,j].decode('utf-8'), end=\" \") print()",
"grid.shape[0], grid.shape[1]): if y + index[0] == 0 or grid.shape[0] - 1 ==",
"or index[0] > rows - 1: return True if index[1] < 0 or",
"return False def start_cells(grid, y, x, random, visited, unvisited): ops = [(0,-1), (0,1),",
"Left, Right, Top, Down ops = [(0,-1), (0,1), (-1,0), (1,0)] #short for operations",
"end=\"\") for j in range(len(grid[i])): print(grid[i][j].index, end=\", \") print(\"]\") def print_visited(grid): for i",
"update_set(y + index[0], x + index[1], visited, grid, unvisited) count += 1 if",
"as active new_image[index[0], index[1]] = color if direction < 0: return new_image #",
"wall_idx, secondIdx = None, color = None): # mark one or two changes,",
"def getNeighbor(grid, curr, rows, cols, previous): #order: Left, Right, Top, Down ops =",
"if curr.walls[i] != 'X': ret.append(i) return ret def nbr_index(index, dir): if dir ==",
"\") print(\"]\") def maze_index(index, dir): if dir == 0: return (index[0], index[1] -",
"idx = maze_index((i * 2 + 1,j * 2 + 1), k) maze[i",
"in range(len(grid[i])): print(grid[i][j].index, end=\", \") print(\"]\") def print_visited(grid): for i in range(len(grid)): print(\"[\",",
"print_grid(grid): for i in range(len(grid)): print(\"[\", end=\"\") for j in range(len(grid[i])): print(grid[i][j].walls, end=\",",
"def print_index(grid): for i in range(len(grid)): print(\"[\", end=\"\") for j in range(len(grid[i])): print(grid[i][j].index,",
"and replicable gif creation set of functions in this file. #2. implement these",
"if count == 0: return False return True def check_visited(y, x, visited): ops",
"cols) or (y,x) == previous.index: continue ret.append(grid[y][x]) return ret def print_maze(grid): maze =",
"+ ops[i][1] y = index[0] + ops[i][0] if bounds_check((y,x), rows, cols): continue if",
"a streamlined and replicable gif creation set of functions in this file. #2.",
"'T': return (index[0] - 1, index[1]) return (index[0] + 1, index[1]) def conv_nbr_wall(dir):",
"* 2 + 1, index[1] * 2 + 1) def mark_change(idx, gif_arr, wall_idx,",
"'T': return 2 return 3 def conv_idx_dir(index, nbr_index): y = index[0] - nbr_index[0]",
"return False def update_set(y, x, all_nodes, grid, unvisited): ops = [(0,-1), (0,1), (-1,0),",
"j in range(len(grid[i])): print(grid[i][j].index, end=\", \") print(\"]\") def print_visited(grid): for i in range(len(grid)):",
"= maze_index((i * 2 + 1,j * 2 + 1), k) maze[i *",
"end=\", \") print(\"]\") def maze_index(index, dir): if dir == 0: return (index[0], index[1]",
"(-1,0), (1,0), (-1,-1), (1,-1), (-1,1), (1,1)] for index in ops: if (y +",
"0 elif dir == 'T': return 2 return 3 def conv_idx_dir(index, nbr_index): y",
"range(len(grid)): print(\"[\", end=\"\") for j in range(len(grid[i])): if grid[i][j].visited == True: print('X', end=\",",
"return False def neighborCheck(grid, curr, rows, cols): #order: Left, Right, Top, Down ops",
"dir == 'L': return (index[0], index[1] - 1) elif dir == 'R': return",
"= 0 for i in range(8): #bounds checking x = index[1] + ops[i][1]",
"+ 1) elif dir == 2: return (index[0] - 1, index[1]) return (index[0]",
"+ 1, index[1]) def conv_nbr_wall(dir): if dir == 'L': return 1 elif dir",
"+ index[0] == 0 or grid.shape[0] - 1 == y + index[0] or",
"1, len(grid[0]) * 2 + 1)) maze[:,:] = '@' for i in range(len(grid)):",
"or k == 1: maze[idx[0], idx[1]] = '-' else: maze[idx[0], idx[1]] = '|'",
"= '-' else: maze[idx[0], idx[1]] = '|' for i in range(maze.shape[0]): for j",
"== 1: maze[idx[0], idx[1]] = '-' else: maze[idx[0], idx[1]] = '|' for i",
"for index in ops: if y + index[0] == 0 or grid.shape[0] -",
"count == len(dirs): break if not bounds_check((y + index[0], x + index[1]), grid.shape[0],",
"checkRules(grid, index, rule): c = countNeighbors(grid, index, grid.shape[0], grid.shape[1]) for character in rule:",
"rows - 1: return True if index[1] < 0 or index[1] > cols",
"wall_idx, color) else: newIMG = create_snapshot(gif_arr[-1].copy(), idx, wall_idx, color) newIMG = create_snapshot(newIMG, secondIdx,",
"in range(8): #bounds checking x = index[1] + ops[i][1] y = index[0] +",
"* 2 + 1), k) maze[i * 2 + 1, j * 2",
"new_image[index[0], index[1]] = color if direction < 0: return new_image # find the",
"index[0] == 0 or grid.shape[0] - 1 == y + index[0] or x",
"ops = [(0,-1), (0,1), (-1,0), (1,0), (-1,-1), (1,-1), (-1,1), (1,1)] dirs = random.sample(ops,",
"index, direction, color=None): # set marking color to 255 (white) if none provided",
"(1,0)] #short for operations ret = [] for i in range(4): #bounds checking",
"+ ops[i][1] y = curr.index[0] + ops[i][0] if bounds_check((y,x), rows, cols) or (y,x)",
"create_snapshot(gif_arr[-1].copy(), idx, -1, color) else: newIMG = create_snapshot(gif_arr[-1].copy(), idx, -1, color) newIMG =",
"it as active new_image[index[0], index[1]] = color if direction < 0: return new_image",
"index[1]] = 255 visited.add((y + index[0], x + index[1])) update_set(y + index[0], x",
"1, index[1] * 2 + 1) def mark_change(idx, gif_arr, wall_idx, secondIdx = None,",
"in visited: return True return False def update_set(y, x, all_nodes, grid, unvisited): ops",
"y = index[0] - nbr_index[0] x = index[1] - nbr_index[1] if x ==",
"1, index[1]) def create_snapshot(new_image, index, direction, color=None): # set marking color to 255",
"if dir == 'L': return 1 elif dir == 'R': return 0 elif",
"index[1]) def create_snapshot(new_image, index, direction, color=None): # set marking color to 255 (white)",
"-1, color) if not np.array_equal(newIMG, gif_arr[-1]): gif_arr.append(newIMG) def mark_node(idx, gif_arr, secondIdx = None,",
"== 'X': if k == 0 or k == 1: maze[idx[0], idx[1]] =",
"255: count += 1 return count def checkRules(grid, index, rule): c = countNeighbors(grid,",
"continue if grid[y,x] == 255: count += 1 return count def checkRules(grid, index,",
"idx[1]] = '-' else: maze[idx[0], idx[1]] = '|' for i in range(maze.shape[0]): for",
"creation set of functions in this file. #2. implement these functions into the",
"specific if secondIdx == None: newIMG = create_snapshot(gif_arr[-1].copy(), idx, wall_idx, color) else: newIMG",
"y == -1: return 'D' def print_grid(grid): for i in range(len(grid)): print(\"[\", end=\"\")",
"#short for operations ret = [] for i in range(4): #bounds checking x",
"rule): c = countNeighbors(grid, index, grid.shape[0], grid.shape[1]) for character in rule: if c",
"def print_grid(grid): for i in range(len(grid)): print(\"[\", end=\"\") for j in range(len(grid[i])): print(grid[i][j].walls,",
"return ret def print_maze(grid): maze = np.chararray((len(grid) * 2 + 1, len(grid[0]) *",
"j in range(len(grid[i])): for k in range(4): idx = maze_index((i * 2 +",
"= index[1] + ops[i][1] y = index[0] + ops[i][0] if bounds_check((y,x), rows, cols):",
"True return False def neighborCheck(grid, curr, rows, cols): #order: Left, Right, Top, Down",
"grid[y + index[0], x + index[1]] = 255 visited.add((y + index[0], x +",
"0 for index in dirs: if count == len(dirs): break if not bounds_check((y",
"y + index[0] == 0 or grid.shape[0] - 1 == y + index[0]",
"x == 1: return 'R' if x == -1: return 'L' if y",
"Top, Down ops = [(0,-1), (0,1), (-1,0), (1,0)] #short for operations ret =",
"to mark it as active new_image[index[0], index[1]] = color if direction < 0:",
"+= 1 return count def checkRules(grid, index, rule): c = countNeighbors(grid, index, grid.shape[0],",
"255 (white) if none provided if color == None: color = 255 #",
"cols): continue if grid[y][x].visited == False: if curr.walls[i] != 'X': ret.append(i) return ret",
"color) if not np.array_equal(newIMG, gif_arr[-1]): gif_arr.append(newIMG) def mark_node(idx, gif_arr, secondIdx = None, color",
"== y + index[0] or x + index[1] == 0 or grid.shape[1] -",
"mark_as_white = maze_index(index, direction) # remove the wall (set it to the provided",
"return ret def nbr_index(index, dir): if dir == 'L': return (index[0], index[1] -",
"index[0] - nbr_index[0] x = index[1] - nbr_index[1] if x == 1: return",
"print(grid[i][j].index, end=\", \") print(\"]\") def print_visited(grid): for i in range(len(grid)): print(\"[\", end=\"\") for",
"def maze_index(index, dir): if dir == 0: return (index[0], index[1] - 1) elif",
"if y + index[0] == 0 or grid.shape[0] - 1 == y +",
"(index[0] - 1, index[1]) return (index[0] + 1, index[1]) def conv_nbr_wall(dir): if dir",
"1, index[1]) return (index[0] + 1, index[1]) def create_snapshot(new_image, index, direction, color=None): #",
"to break remove mark_as_white = maze_index(index, direction) # remove the wall (set it",
"k) maze[i * 2 + 1, j * 2 + 1] = '+'",
"if bounds_check((y,x), rows, cols): continue if grid[y][x].visited == False: if curr.walls[i] != 'X':",
"in range(len(grid)): for j in range(len(grid[i])): for k in range(4): idx = maze_index((i",
"if none provided if color == None: color = 255 # assign the",
"j in range(len(grid[i])): print(grid[i][j].walls, end=\", \") print(\"]\") def print_index(grid): for i in range(len(grid)):",
"i in range(maze.shape[0]): for j in range(maze.shape[1]): print(maze[i,j].decode('utf-8'), end=\" \") print() def countNeighbors(grid,",
"create a streamlined and replicable gif creation set of functions in this file.",
"range(maze.shape[1]): print(maze[i,j].decode('utf-8'), end=\" \") print() def countNeighbors(grid, index, rows, cols): #order: Left, Right,",
"- 1) elif dir == 1: return (index[0], index[1] + 1) elif dir",
"gif_arr[-1]): gif_arr.append(newIMG) def mark_node(idx, gif_arr, secondIdx = None, color = None): if secondIdx",
"-1, color) else: newIMG = create_snapshot(gif_arr[-1].copy(), idx, -1, color) newIMG = create_snapshot(newIMG, secondIdx,",
"y = curr.index[0] + ops[i][0] if bounds_check((y,x), rows, cols) or (y,x) == previous.index:",
"x = index[1] + ops[i][1] y = index[0] + ops[i][0] if bounds_check((y,x), rows,",
"= countNeighbors(grid, index, grid.shape[0], grid.shape[1]) for character in rule: if c == int(character):",
"ops[i][0] if bounds_check((y,x), rows, cols): continue if grid[y][x].visited == False: if curr.walls[i] !=",
"i in range(8): #bounds checking x = index[1] + ops[i][1] y = index[0]",
"Down ops = [(0,-1), (0,1), (-1,0), (1,0)] #short for operations ret = []",
"index[1] == 0 or grid.shape[1] - 1 == x + index[1]: continue all_nodes.add((y,x))",
"into the generation algorithms available. def convert_2d(index, cols): return (index // cols, index",
"elif dir == 'T': return 2 return 3 def conv_idx_dir(index, nbr_index): y =",
"== 0: return (index[0], index[1] - 1) elif dir == 1: return (index[0],",
"color) else: newIMG = create_snapshot(gif_arr[-1].copy(), idx, wall_idx, color) newIMG = create_snapshot(newIMG, secondIdx, -1,",
"def countNeighbors(grid, index, rows, cols): #order: Left, Right, Top, Down, Top Left, Bottom",
"(1,1)] #short for operations count = 0 for i in range(8): #bounds checking",
"'-' else: maze[idx[0], idx[1]] = '|' for i in range(maze.shape[0]): for j in",
"+ index[1]: continue grid[y + index[0], x + index[1]] = 255 visited.add((y +",
"(1,-1), (-1,1), (1,1)] for index in ops: if y + index[0] == 0",
"1), k) maze[i * 2 + 1, j * 2 + 1] =",
"ret def print_maze(grid): maze = np.chararray((len(grid) * 2 + 1, len(grid[0]) * 2",
"def create_snapshot(new_image, index, direction, color=None): # set marking color to 255 (white) if",
"rows, cols): continue if grid[y,x] == 255: count += 1 return count def",
"= random.sample(ops, k=len(ops)) count = 0 for index in dirs: if count ==",
"x + index[1] == 0 or grid.shape[1] - 1 == x + index[1]:",
"secondIdx = None, color = None): # mark one or two changes, algorithm",
"dirs = random.sample(ops, k=len(ops)) count = 0 for index in dirs: if count",
"1 elif dir == 'R': return 0 elif dir == 'T': return 2",
"idx, -1, color) else: newIMG = create_snapshot(gif_arr[-1].copy(), idx, -1, color) newIMG = create_snapshot(newIMG,",
"= curr.index[0] + ops[i][0] if bounds_check((y,x), rows, cols): continue if grid[y][x].visited == False:",
"= index[0] - nbr_index[0] x = index[1] - nbr_index[1] if x == 1:",
"direction, color=None): # set marking color to 255 (white) if none provided if",
"return count def checkRules(grid, index, rule): c = countNeighbors(grid, index, grid.shape[0], grid.shape[1]) for",
"def mark_change(idx, gif_arr, wall_idx, secondIdx = None, color = None): # mark one",
"not np.array_equal(newIMG, gif_arr[-1]): gif_arr.append(newIMG) def mark_node(idx, gif_arr, secondIdx = None, color = None):",
"def neighborCheck(grid, curr, rows, cols): #order: Left, Right, Top, Down ops = [(0,-1),",
"ops: if y + index[0] == 0 or grid.shape[0] - 1 == y",
"range(len(grid[i])): print(grid[i][j].walls, end=\", \") print(\"]\") def print_index(grid): for i in range(len(grid)): print(\"[\", end=\"\")",
"set of functions in this file. #2. implement these functions into the generation",
"[] for i in range(4): #bounds checking x = curr.index[1] + ops[i][1] y",
"+ ops[i][1] y = curr.index[0] + ops[i][0] if bounds_check((y,x), rows, cols): continue if",
"'R': return (index[0], index[1] + 1) elif dir == 'T': return (index[0] -",
"getNeighbor(grid, curr, rows, cols, previous): #order: Left, Right, Top, Down ops = [(0,-1),",
"False return True def check_visited(y, x, visited): ops = [(0,-1), (0,1), (-1,0), (1,0),",
"- 1: return True if index[1] < 0 or index[1] > cols -",
"dir == 'R': return 0 elif dir == 'T': return 2 return 3",
"0: return new_image # find the index of the wall to break remove",
"(-1,-1), (1,-1), (-1,1), (1,1)] for index in ops: if y + index[0] ==",
"functions in this file. #2. implement these functions into the generation algorithms available.",
"for j in range(len(grid[i])): print(grid[i][j].walls, end=\", \") print(\"]\") def print_index(grid): for i in",
"if color == None: color = 255 # assign the given color to",
"== 1: return (index[0], index[1] + 1) elif dir == 2: return (index[0]",
"for i in range(maze.shape[0]): for j in range(maze.shape[1]): print(maze[i,j].decode('utf-8'), end=\" \") print() def",
"(y,x) == previous.index: continue ret.append(grid[y][x]) return ret def print_maze(grid): maze = np.chararray((len(grid) *",
"x, random, visited, unvisited): ops = [(0,-1), (0,1), (-1,0), (1,0), (-1,-1), (1,-1), (-1,1),",
"!= 'X': ret.append(i) return ret def nbr_index(index, dir): if dir == 'L': return",
"= 0 for index in dirs: if count == len(dirs): break if not",
"grid.shape[0] - 1 == y + index[0] or x + index[1] == 0",
"+ index[0], x + index[1]), grid.shape[0], grid.shape[1]): if y + index[0] == 0",
"index[1]) def conv_nbr_wall(dir): if dir == 'L': return 1 elif dir == 'R':",
"create_snapshot(new_image, index, direction, color=None): # set marking color to 255 (white) if none",
"1) elif dir == 2: return (index[0] - 1, index[1]) return (index[0] +",
"False def start_cells(grid, y, x, random, visited, unvisited): ops = [(0,-1), (0,1), (-1,0),",
"len(grid[0]) * 2 + 1)) maze[:,:] = '@' for i in range(len(grid)): for",
"x = index[1] - nbr_index[1] if x == 1: return 'R' if x",
"numpy as np #TODO: #1. create a streamlined and replicable gif creation set",
"dir == 2: return (index[0] - 1, index[1]) return (index[0] + 1, index[1])",
"or (y,x) == previous.index: continue ret.append(grid[y][x]) return ret def print_maze(grid): maze = np.chararray((len(grid)",
"int(character): return True return False def start_cells(grid, y, x, random, visited, unvisited): ops",
"secondIdx, -1, color) if not np.array_equal(newIMG, gif_arr[-1]): gif_arr.append(newIMG) def mark_node(idx, gif_arr, secondIdx =",
"operations count = 0 for i in range(8): #bounds checking x = index[1]",
"return new_image def grid_to_image(index): return (index[0] * 2 + 1, index[1] * 2",
"+ index[0], x + index[1]] = 255 visited.add((y + index[0], x + index[1]))",
"if secondIdx == None: newIMG = create_snapshot(gif_arr[-1].copy(), idx, -1, color) else: newIMG =",
"bounds_check((y + index[0], x + index[1]), grid.shape[0], grid.shape[1]): if y + index[0] ==",
"(-1,0), (1,0)] #short for operations ret = [] for i in range(4): #bounds",
"count == 0: return False return True def check_visited(y, x, visited): ops =",
"generation algorithms available. def convert_2d(index, cols): return (index // cols, index % cols)",
"(1,0), (-1,-1), (1,-1), (-1,1), (1,1)] #short for operations count = 0 for i",
"* 2 + 1,j * 2 + 1), k) maze[i * 2 +",
"if x == 1: return 'R' if x == -1: return 'L' if",
"gif_arr.append(newIMG) def mark_node(idx, gif_arr, secondIdx = None, color = None): if secondIdx ==",
"1: return 'T' if y == -1: return 'D' def print_grid(grid): for i",
"range(4): idx = maze_index((i * 2 + 1,j * 2 + 1), k)",
"i in range(len(grid)): print(\"[\", end=\"\") for j in range(len(grid[i])): if grid[i][j].visited == True:",
"== 0 or grid.shape[0] - 1 == y + index[0] or x +",
"True def check_visited(y, x, visited): ops = [(0,-1), (0,1), (-1,0), (1,0), (-1,-1), (1,-1),",
"3 def conv_idx_dir(index, nbr_index): y = index[0] - nbr_index[0] x = index[1] -",
"return (index[0] - 1, index[1]) return (index[0] + 1, index[1]) def conv_nbr_wall(dir): if",
"= color return new_image def grid_to_image(index): return (index[0] * 2 + 1, index[1]",
"index[1] > cols - 1: return True return False def neighborCheck(grid, curr, rows,",
"idx, -1, color) newIMG = create_snapshot(newIMG, secondIdx, -1, color) if not np.array_equal(newIMG, gif_arr[-1]):",
"ops[i][1] y = curr.index[0] + ops[i][0] if bounds_check((y,x), rows, cols) or (y,x) ==",
"range(len(grid)): for j in range(len(grid[i])): for k in range(4): idx = maze_index((i *",
"1) elif dir == 1: return (index[0], index[1] + 1) elif dir ==",
"return 'D' def print_grid(grid): for i in range(len(grid)): print(\"[\", end=\"\") for j in",
"to the cell to mark it as active new_image[index[0], index[1]] = color if",
"for k in range(4): idx = maze_index((i * 2 + 1,j * 2",
"file. #2. implement these functions into the generation algorithms available. def convert_2d(index, cols):",
"1] = '+' if grid[i][j].walls[k] == 'X': if k == 0 or k",
"maze[i * 2 + 1, j * 2 + 1] = '+' if",
"\") else: print('O', end=\", \") print(\"]\") def maze_index(index, dir): if dir == 0:",
"else: print('O', end=\", \") print(\"]\") def maze_index(index, dir): if dir == 0: return",
"for j in range(len(grid[i])): if grid[i][j].visited == True: print('X', end=\", \") else: print('O',",
"as np #TODO: #1. create a streamlined and replicable gif creation set of",
"+ 1] = '+' if grid[i][j].walls[k] == 'X': if k == 0 or",
"'L': return 1 elif dir == 'R': return 0 elif dir == 'T':",
"(1,-1), (-1,1), (1,1)] #short for operations count = 0 for i in range(8):",
"index[1] + 1) elif dir == 'T': return (index[0] - 1, index[1]) return",
"y == 1: return 'T' if y == -1: return 'D' def print_grid(grid):",
"marking color to 255 (white) if none provided if color == None: color",
"k == 0 or k == 1: maze[idx[0], idx[1]] = '-' else: maze[idx[0],",
"in ops: if y + index[0] == 0 or grid.shape[0] - 1 ==",
"index[1]] = color if direction < 0: return new_image # find the index",
"index[1]) in visited: return True return False def update_set(y, x, all_nodes, grid, unvisited):",
"grid.shape[1] - 1 == x + index[1]: continue all_nodes.add((y,x)) if (y,x) in unvisited:",
"Right ops = [(0,-1), (0,1), (-1,0), (1,0), (-1,-1), (1,-1), (-1,1), (1,1)] #short for",
"new_image # find the index of the wall to break remove mark_as_white =",
"none provided if color == None: color = 255 # assign the given",
"if not np.array_equal(newIMG, gif_arr[-1]): gif_arr.append(newIMG) def mark_node(idx, gif_arr, secondIdx = None, color =",
"index[0] or x + index[1] == 0 or grid.shape[1] - 1 == x",
"index, grid.shape[0], grid.shape[1]) for character in rule: if c == int(character): return True",
"return (index // cols, index % cols) def bounds_check(index, rows, cols): if index[0]",
"print_index(grid): for i in range(len(grid)): print(\"[\", end=\"\") for j in range(len(grid[i])): print(grid[i][j].index, end=\",",
"idx, wall_idx, color) else: newIMG = create_snapshot(gif_arr[-1].copy(), idx, wall_idx, color) newIMG = create_snapshot(newIMG,",
"2 + 1, j * 2 + 1] = '+' if grid[i][j].walls[k] ==",
"False def update_set(y, x, all_nodes, grid, unvisited): ops = [(0,-1), (0,1), (-1,0), (1,0),",
"bounds_check((y,x), rows, cols): continue if grid[y][x].visited == False: if curr.walls[i] != 'X': ret.append(i)",
"+ 1, index[1] * 2 + 1) def mark_change(idx, gif_arr, wall_idx, secondIdx =",
"set marking color to 255 (white) if none provided if color == None:",
"def conv_idx_dir(index, nbr_index): y = index[0] - nbr_index[0] x = index[1] - nbr_index[1]",
"return (index[0] + 1, index[1]) def create_snapshot(new_image, index, direction, color=None): # set marking",
"index[1] - 1) elif dir == 1: return (index[0], index[1] + 1) elif",
"(-1,0), (1,0), (-1,-1), (1,-1), (-1,1), (1,1)] for index in ops: if y +",
"== -1: return 'L' if y == 1: return 'T' if y ==",
"x + index[1])) update_set(y + index[0], x + index[1], visited, grid, unvisited) count",
"(index[0] * 2 + 1, index[1] * 2 + 1) def mark_change(idx, gif_arr,",
"def mark_node(idx, gif_arr, secondIdx = None, color = None): if secondIdx == None:",
"for i in range(8): #bounds checking x = index[1] + ops[i][1] y =",
"mark_as_white[1]] = color return new_image def grid_to_image(index): return (index[0] * 2 + 1,",
"== 'L': return (index[0], index[1] - 1) elif dir == 'R': return (index[0],",
"def bounds_check(index, rows, cols): if index[0] < 0 or index[0] > rows -",
"algorithm specific if secondIdx == None: newIMG = create_snapshot(gif_arr[-1].copy(), idx, wall_idx, color) else:",
"rows, cols): #order: Left, Right, Top, Down ops = [(0,-1), (0,1), (-1,0), (1,0)]",
"previous.index: continue ret.append(grid[y][x]) return ret def print_maze(grid): maze = np.chararray((len(grid) * 2 +",
"k in range(4): idx = maze_index((i * 2 + 1,j * 2 +",
"curr.index[0] + ops[i][0] if bounds_check((y,x), rows, cols) or (y,x) == previous.index: continue ret.append(grid[y][x])",
"checking x = curr.index[1] + ops[i][1] y = curr.index[0] + ops[i][0] if bounds_check((y,x),",
"Left, Top Right, Bottom Right ops = [(0,-1), (0,1), (-1,0), (1,0), (-1,-1), (1,-1),",
"grid.shape[1] - 1 == x + index[1]: continue grid[y + index[0], x +",
"elif dir == 'R': return (index[0], index[1] + 1) elif dir == 'T':",
"active new_image[index[0], index[1]] = color if direction < 0: return new_image # find",
"#order: Left, Right, Top, Down, Top Left, Bottom Left, Top Right, Bottom Right",
"grid, unvisited) count += 1 if count == 0: return False return True",
"secondIdx == None: newIMG = create_snapshot(gif_arr[-1].copy(), idx, wall_idx, color) else: newIMG = create_snapshot(gif_arr[-1].copy(),",
"= create_snapshot(newIMG, secondIdx, -1, color) if not np.array_equal(newIMG, gif_arr[-1]): gif_arr.append(newIMG) def getNeighbor(grid, curr,",
"newIMG = create_snapshot(gif_arr[-1].copy(), idx, wall_idx, color) newIMG = create_snapshot(newIMG, secondIdx, -1, color) if",
"index[1]), grid.shape[0], grid.shape[1]): if y + index[0] == 0 or grid.shape[0] - 1",
"newIMG = create_snapshot(gif_arr[-1].copy(), idx, -1, color) newIMG = create_snapshot(newIMG, secondIdx, -1, color) if",
"return (index[0] - 1, index[1]) return (index[0] + 1, index[1]) def create_snapshot(new_image, index,",
"#bounds checking x = curr.index[1] + ops[i][1] y = curr.index[0] + ops[i][0] if",
"= 255 # assign the given color to the cell to mark it",
"Right, Top, Down ops = [(0,-1), (0,1), (-1,0), (1,0)] #short for operations ret",
"(1,-1), (-1,1), (1,1)] for index in ops: if (y + index[0], x +",
"elif dir == 2: return (index[0] - 1, index[1]) return (index[0] + 1,",
"return 1 elif dir == 'R': return 0 elif dir == 'T': return",
"= [] for i in range(4): #bounds checking x = curr.index[1] + ops[i][1]",
"- 1) elif dir == 'R': return (index[0], index[1] + 1) elif dir",
"+ 1) elif dir == 'T': return (index[0] - 1, index[1]) return (index[0]",
"end=\"\") for j in range(len(grid[i])): print(grid[i][j].walls, end=\", \") print(\"]\") def print_index(grid): for i",
"\") print(\"]\") def print_index(grid): for i in range(len(grid)): print(\"[\", end=\"\") for j in",
"def print_visited(grid): for i in range(len(grid)): print(\"[\", end=\"\") for j in range(len(grid[i])): if",
"2 + 1)) maze[:,:] = '@' for i in range(len(grid)): for j in",
"+ index[1] == 0 or grid.shape[1] - 1 == x + index[1]: continue",
"x + index[1]) in visited: return True return False def update_set(y, x, all_nodes,",
"'X': ret.append(i) return ret def nbr_index(index, dir): if dir == 'L': return (index[0],",
"y, x, random, visited, unvisited): ops = [(0,-1), (0,1), (-1,0), (1,0), (-1,-1), (1,-1),",
"count += 1 if count == 0: return False return True def check_visited(y,",
"in range(len(grid)): print(\"[\", end=\"\") for j in range(len(grid[i])): print(grid[i][j].walls, end=\", \") print(\"]\") def",
"# assign the given color to the cell to mark it as active",
"curr.walls[i] != 'X': ret.append(i) return ret def nbr_index(index, dir): if dir == 'L':",
"if k == 0 or k == 1: maze[idx[0], idx[1]] = '-' else:",
"-1, color) if not np.array_equal(newIMG, gif_arr[-1]): gif_arr.append(newIMG) def getNeighbor(grid, curr, rows, cols, previous):",
"checking x = index[1] + ops[i][1] y = index[0] + ops[i][0] if bounds_check((y,x),",
"countNeighbors(grid, index, grid.shape[0], grid.shape[1]) for character in rule: if c == int(character): return",
"ops = [(0,-1), (0,1), (-1,0), (1,0), (-1,-1), (1,-1), (-1,1), (1,1)] for index in",
"[(0,-1), (0,1), (-1,0), (1,0)] #short for operations ret = [] for i in",
"else: newIMG = create_snapshot(gif_arr[-1].copy(), idx, wall_idx, color) newIMG = create_snapshot(newIMG, secondIdx, -1, color)",
"return 'T' if y == -1: return 'D' def print_grid(grid): for i in",
"index in dirs: if count == len(dirs): break if not bounds_check((y + index[0],",
"grid[i][j].visited == True: print('X', end=\", \") else: print('O', end=\", \") print(\"]\") def maze_index(index,",
"index[1]) return (index[0] + 1, index[1]) def conv_nbr_wall(dir): if dir == 'L': return",
"count = 0 for i in range(8): #bounds checking x = index[1] +",
"for character in rule: if c == int(character): return True return False def",
"maze_index((i * 2 + 1,j * 2 + 1), k) maze[i * 2",
"start_cells(grid, y, x, random, visited, unvisited): ops = [(0,-1), (0,1), (-1,0), (1,0), (-1,-1),",
"Right, Top, Down, Top Left, Bottom Left, Top Right, Bottom Right ops =",
"index[1] + ops[i][1] y = index[0] + ops[i][0] if bounds_check((y,x), rows, cols): continue",
"* 2 + 1] = '+' if grid[i][j].walls[k] == 'X': if k ==",
"random.sample(ops, k=len(ops)) count = 0 for index in dirs: if count == len(dirs):",
"// cols, index % cols) def bounds_check(index, rows, cols): if index[0] < 0",
"print(\"]\") def maze_index(index, dir): if dir == 0: return (index[0], index[1] - 1)",
"rows, cols) or (y,x) == previous.index: continue ret.append(grid[y][x]) return ret def print_maze(grid): maze",
"np.chararray((len(grid) * 2 + 1, len(grid[0]) * 2 + 1)) maze[:,:] = '@'",
"(0,1), (-1,0), (1,0)] #short for operations ret = [] for i in range(4):",
"in range(len(grid[i])): for k in range(4): idx = maze_index((i * 2 + 1,j",
"wall (set it to the provided color) new_image[mark_as_white[0], mark_as_white[1]] = color return new_image",
"gif_arr, secondIdx = None, color = None): if secondIdx == None: newIMG =",
"ops = [(0,-1), (0,1), (-1,0), (1,0)] #short for operations ret = [] for",
"wall to break remove mark_as_white = maze_index(index, direction) # remove the wall (set",
"+ 1)) maze[:,:] = '@' for i in range(len(grid)): for j in range(len(grid[i])):",
"count += 1 return count def checkRules(grid, index, rule): c = countNeighbors(grid, index,",
"check_visited(y, x, visited): ops = [(0,-1), (0,1), (-1,0), (1,0), (-1,-1), (1,-1), (-1,1), (1,1)]",
"cols): #order: Left, Right, Top, Down, Top Left, Bottom Left, Top Right, Bottom",
"j * 2 + 1] = '+' if grid[i][j].walls[k] == 'X': if k",
"range(len(grid)): print(\"[\", end=\"\") for j in range(len(grid[i])): print(grid[i][j].walls, end=\", \") print(\"]\") def print_index(grid):",
"color) newIMG = create_snapshot(newIMG, secondIdx, -1, color) if not np.array_equal(newIMG, gif_arr[-1]): gif_arr.append(newIMG) def",
"random, visited, unvisited): ops = [(0,-1), (0,1), (-1,0), (1,0), (-1,-1), (1,-1), (-1,1), (1,1)]",
"the wall to break remove mark_as_white = maze_index(index, direction) # remove the wall",
"bounds_check((y,x), rows, cols): continue if grid[y,x] == 255: count += 1 return count",
"mark one or two changes, algorithm specific if secondIdx == None: newIMG =",
"= maze_index(index, direction) # remove the wall (set it to the provided color)",
"= '+' if grid[i][j].walls[k] == 'X': if k == 0 or k ==",
"c = countNeighbors(grid, index, grid.shape[0], grid.shape[1]) for character in rule: if c ==",
"- 1, index[1]) return (index[0] + 1, index[1]) def conv_nbr_wall(dir): if dir ==",
"end=\"\") for j in range(len(grid[i])): if grid[i][j].visited == True: print('X', end=\", \") else:",
"ops[i][1] y = index[0] + ops[i][0] if bounds_check((y,x), rows, cols): continue if grid[y,x]",
"y = index[0] + ops[i][0] if bounds_check((y,x), rows, cols): continue if grid[y,x] ==",
"grid[i][j].walls[k] == 'X': if k == 0 or k == 1: maze[idx[0], idx[1]]",
"-1: return 'L' if y == 1: return 'T' if y == -1:",
"print(\"[\", end=\"\") for j in range(len(grid[i])): print(grid[i][j].walls, end=\", \") print(\"]\") def print_index(grid): for",
"return True def check_visited(y, x, visited): ops = [(0,-1), (0,1), (-1,0), (1,0), (-1,-1),",
"provided if color == None: color = 255 # assign the given color",
"in rule: if c == int(character): return True return False def start_cells(grid, y,",
"ret = [] for i in range(4): #bounds checking x = curr.index[1] +",
"not np.array_equal(newIMG, gif_arr[-1]): gif_arr.append(newIMG) def getNeighbor(grid, curr, rows, cols, previous): #order: Left, Right,",
"index[1], visited, grid, unvisited) count += 1 if count == 0: return False",
"== x + index[1]: continue grid[y + index[0], x + index[1]] = 255",
"- 1, index[1]) return (index[0] + 1, index[1]) def create_snapshot(new_image, index, direction, color=None):",
"dir == 'L': return 1 elif dir == 'R': return 0 elif dir",
"* 2 + 1, j * 2 + 1] = '+' if grid[i][j].walls[k]",
"(1,0), (-1,-1), (1,-1), (-1,1), (1,1)] dirs = random.sample(ops, k=len(ops)) count = 0 for",
"\") print(\"]\") def print_visited(grid): for i in range(len(grid)): print(\"[\", end=\"\") for j in",
"255 visited.add((y + index[0], x + index[1])) update_set(y + index[0], x + index[1],",
"algorithms available. def convert_2d(index, cols): return (index // cols, index % cols) def",
"bounds_check(index, rows, cols): if index[0] < 0 or index[0] > rows - 1:",
"> cols - 1: return True return False def neighborCheck(grid, curr, rows, cols):",
"== int(character): return True return False def start_cells(grid, y, x, random, visited, unvisited):",
"2 + 1), k) maze[i * 2 + 1, j * 2 +",
"the index of the wall to break remove mark_as_white = maze_index(index, direction) #",
"newIMG = create_snapshot(gif_arr[-1].copy(), idx, wall_idx, color) else: newIMG = create_snapshot(gif_arr[-1].copy(), idx, wall_idx, color)",
"maze[idx[0], idx[1]] = '|' for i in range(maze.shape[0]): for j in range(maze.shape[1]): print(maze[i,j].decode('utf-8'),",
"= index[0] + ops[i][0] if bounds_check((y,x), rows, cols): continue if grid[y,x] == 255:",
"= '|' for i in range(maze.shape[0]): for j in range(maze.shape[1]): print(maze[i,j].decode('utf-8'), end=\" \")",
"return True return False def start_cells(grid, y, x, random, visited, unvisited): ops =",
"dirs: if count == len(dirs): break if not bounds_check((y + index[0], x +",
"return new_image # find the index of the wall to break remove mark_as_white",
"neighborCheck(grid, curr, rows, cols): #order: Left, Right, Top, Down ops = [(0,-1), (0,1),",
"idx[1]] = '|' for i in range(maze.shape[0]): for j in range(maze.shape[1]): print(maze[i,j].decode('utf-8'), end=\"",
"(-1,0), (1,0), (-1,-1), (1,-1), (-1,1), (1,1)] #short for operations count = 0 for",
"(index[0] - 1, index[1]) return (index[0] + 1, index[1]) def create_snapshot(new_image, index, direction,",
"or two changes, algorithm specific if secondIdx == None: newIMG = create_snapshot(gif_arr[-1].copy(), idx,",
"== -1: return 'D' def print_grid(grid): for i in range(len(grid)): print(\"[\", end=\"\") for",
"dir): if dir == 0: return (index[0], index[1] - 1) elif dir ==",
"y = curr.index[0] + ops[i][0] if bounds_check((y,x), rows, cols): continue if grid[y][x].visited ==",
"+ 1, index[1]) def create_snapshot(new_image, index, direction, color=None): # set marking color to",
"return 'R' if x == -1: return 'L' if y == 1: return",
"cols, index % cols) def bounds_check(index, rows, cols): if index[0] < 0 or",
"[(0,-1), (0,1), (-1,0), (1,0), (-1,-1), (1,-1), (-1,1), (1,1)] dirs = random.sample(ops, k=len(ops)) count",
"return True return False def neighborCheck(grid, curr, rows, cols): #order: Left, Right, Top,",
"ops: if (y + index[0], x + index[1]) in visited: return True return",
"continue grid[y + index[0], x + index[1]] = 255 visited.add((y + index[0], x",
"None: newIMG = create_snapshot(gif_arr[-1].copy(), idx, wall_idx, color) else: newIMG = create_snapshot(gif_arr[-1].copy(), idx, wall_idx,",
"(index[0] + 1, index[1]) def conv_nbr_wall(dir): if dir == 'L': return 1 elif",
"* 2 + 1) def mark_change(idx, gif_arr, wall_idx, secondIdx = None, color =",
"previous): #order: Left, Right, Top, Down ops = [(0,-1), (0,1), (-1,0), (1,0)] #short",
"color = None): # mark one or two changes, algorithm specific if secondIdx",
"(0,1), (-1,0), (1,0), (-1,-1), (1,-1), (-1,1), (1,1)] for index in ops: if (y",
"index[1]) return (index[0] + 1, index[1]) def create_snapshot(new_image, index, direction, color=None): # set",
"1) elif dir == 'T': return (index[0] - 1, index[1]) return (index[0] +",
"'L': return (index[0], index[1] - 1) elif dir == 'R': return (index[0], index[1]",
"== 0 or grid.shape[1] - 1 == x + index[1]: continue all_nodes.add((y,x)) if",
"index[1] - nbr_index[1] if x == 1: return 'R' if x == -1:",
"ret def nbr_index(index, dir): if dir == 'L': return (index[0], index[1] - 1)",
"if secondIdx == None: newIMG = create_snapshot(gif_arr[-1].copy(), idx, wall_idx, color) else: newIMG =",
"(index[0] + 1, index[1]) def create_snapshot(new_image, index, direction, color=None): # set marking color",
"convert_2d(index, cols): return (index // cols, index % cols) def bounds_check(index, rows, cols):",
"(y + index[0], x + index[1]) in visited: return True return False def",
"True return False def update_set(y, x, all_nodes, grid, unvisited): ops = [(0,-1), (0,1),",
"rows, cols, previous): #order: Left, Right, Top, Down ops = [(0,-1), (0,1), (-1,0),",
"operations ret = [] for i in range(4): #bounds checking x = curr.index[1]",
"ret.append(grid[y][x]) return ret def print_maze(grid): maze = np.chararray((len(grid) * 2 + 1, len(grid[0])",
"range(len(grid)): print(\"[\", end=\"\") for j in range(len(grid[i])): print(grid[i][j].index, end=\", \") print(\"]\") def print_visited(grid):",
"color=None): # set marking color to 255 (white) if none provided if color",
"two changes, algorithm specific if secondIdx == None: newIMG = create_snapshot(gif_arr[-1].copy(), idx, wall_idx,",
"in range(maze.shape[1]): print(maze[i,j].decode('utf-8'), end=\" \") print() def countNeighbors(grid, index, rows, cols): #order: Left,",
"of the wall to break remove mark_as_white = maze_index(index, direction) # remove the",
"break remove mark_as_white = maze_index(index, direction) # remove the wall (set it to",
"'@' for i in range(len(grid)): for j in range(len(grid[i])): for k in range(4):",
"return (index[0] + 1, index[1]) def conv_nbr_wall(dir): if dir == 'L': return 1",
"Right, Bottom Right ops = [(0,-1), (0,1), (-1,0), (1,0), (-1,-1), (1,-1), (-1,1), (1,1)]",
"index, rule): c = countNeighbors(grid, index, grid.shape[0], grid.shape[1]) for character in rule: if",
"None): # mark one or two changes, algorithm specific if secondIdx == None:",
"Left, Right, Top, Down, Top Left, Bottom Left, Top Right, Bottom Right ops",
"# set marking color to 255 (white) if none provided if color ==",
"True return False def start_cells(grid, y, x, random, visited, unvisited): ops = [(0,-1),",
"2 + 1,j * 2 + 1), k) maze[i * 2 + 1,",
"break if not bounds_check((y + index[0], x + index[1]), grid.shape[0], grid.shape[1]): if y",
"find the index of the wall to break remove mark_as_white = maze_index(index, direction)",
"return True if index[1] < 0 or index[1] > cols - 1: return",
"(1,-1), (-1,1), (1,1)] dirs = random.sample(ops, k=len(ops)) count = 0 for index in",
"y + index[0] or x + index[1] == 0 or grid.shape[1] - 1",
"return (index[0] * 2 + 1, index[1] * 2 + 1) def mark_change(idx,",
"index[1])) update_set(y + index[0], x + index[1], visited, grid, unvisited) count += 1",
"return False return True def check_visited(y, x, visited): ops = [(0,-1), (0,1), (-1,0),",
"None, color = None): # mark one or two changes, algorithm specific if",
"gif_arr.append(newIMG) def getNeighbor(grid, curr, rows, cols, previous): #order: Left, Right, Top, Down ops",
"print_maze(grid): maze = np.chararray((len(grid) * 2 + 1, len(grid[0]) * 2 + 1))",
"if y == -1: return 'D' def print_grid(grid): for i in range(len(grid)): print(\"[\",",
"visited): ops = [(0,-1), (0,1), (-1,0), (1,0), (-1,-1), (1,-1), (-1,1), (1,1)] for index",
"= [(0,-1), (0,1), (-1,0), (1,0), (-1,-1), (1,-1), (-1,1), (1,1)] for index in ops:",
"(-1,1), (1,1)] for index in ops: if y + index[0] == 0 or",
"countNeighbors(grid, index, rows, cols): #order: Left, Right, Top, Down, Top Left, Bottom Left,",
"= '@' for i in range(len(grid)): for j in range(len(grid[i])): for k in",
"= create_snapshot(newIMG, secondIdx, -1, color) if not np.array_equal(newIMG, gif_arr[-1]): gif_arr.append(newIMG) def mark_node(idx, gif_arr,",
"- 1 == y + index[0] or x + index[1] == 0 or",
"== 1: return 'R' if x == -1: return 'L' if y ==",
"+ index[1], visited, grid, unvisited) count += 1 if count == 0: return",
"to 255 (white) if none provided if color == None: color = 255",
"== 'T': return 2 return 3 def conv_idx_dir(index, nbr_index): y = index[0] -",
"np.array_equal(newIMG, gif_arr[-1]): gif_arr.append(newIMG) def mark_node(idx, gif_arr, secondIdx = None, color = None): if",
"None: newIMG = create_snapshot(gif_arr[-1].copy(), idx, -1, color) else: newIMG = create_snapshot(gif_arr[-1].copy(), idx, -1,",
"print() def countNeighbors(grid, index, rows, cols): #order: Left, Right, Top, Down, Top Left,",
"not bounds_check((y + index[0], x + index[1]), grid.shape[0], grid.shape[1]): if y + index[0]",
"== None: newIMG = create_snapshot(gif_arr[-1].copy(), idx, wall_idx, color) else: newIMG = create_snapshot(gif_arr[-1].copy(), idx,",
"Left, Bottom Left, Top Right, Bottom Right ops = [(0,-1), (0,1), (-1,0), (1,0),",
"or grid.shape[0] - 1 == y + index[0] or x + index[1] ==",
"(-1,1), (1,1)] for index in ops: if (y + index[0], x + index[1])",
"k == 1: maze[idx[0], idx[1]] = '-' else: maze[idx[0], idx[1]] = '|' for",
"'X': if k == 0 or k == 1: maze[idx[0], idx[1]] = '-'",
"if not np.array_equal(newIMG, gif_arr[-1]): gif_arr.append(newIMG) def getNeighbor(grid, curr, rows, cols, previous): #order: Left,",
"color) if not np.array_equal(newIMG, gif_arr[-1]): gif_arr.append(newIMG) def getNeighbor(grid, curr, rows, cols, previous): #order:",
"continue if grid[y][x].visited == False: if curr.walls[i] != 'X': ret.append(i) return ret def",
"if dir == 0: return (index[0], index[1] - 1) elif dir == 1:",
"(1,1)] for index in ops: if y + index[0] == 0 or grid.shape[0]",
"return 0 elif dir == 'T': return 2 return 3 def conv_idx_dir(index, nbr_index):",
"for j in range(maze.shape[1]): print(maze[i,j].decode('utf-8'), end=\" \") print() def countNeighbors(grid, index, rows, cols):",
"[(0,-1), (0,1), (-1,0), (1,0), (-1,-1), (1,-1), (-1,1), (1,1)] #short for operations count =",
"rule: if c == int(character): return True return False def start_cells(grid, y, x,",
"+ 1, len(grid[0]) * 2 + 1)) maze[:,:] = '@' for i in",
"print(grid[i][j].walls, end=\", \") print(\"]\") def print_index(grid): for i in range(len(grid)): print(\"[\", end=\"\") for",
"mark it as active new_image[index[0], index[1]] = color if direction < 0: return",
"def grid_to_image(index): return (index[0] * 2 + 1, index[1] * 2 + 1)",
"maze = np.chararray((len(grid) * 2 + 1, len(grid[0]) * 2 + 1)) maze[:,:]",
"#TODO: #1. create a streamlined and replicable gif creation set of functions in",
"+ 1, j * 2 + 1] = '+' if grid[i][j].walls[k] == 'X':",
"create_snapshot(newIMG, secondIdx, -1, color) if not np.array_equal(newIMG, gif_arr[-1]): gif_arr.append(newIMG) def getNeighbor(grid, curr, rows,",
"import numpy as np #TODO: #1. create a streamlined and replicable gif creation",
"'D' def print_grid(grid): for i in range(len(grid)): print(\"[\", end=\"\") for j in range(len(grid[i])):",
"(1,0), (-1,-1), (1,-1), (-1,1), (1,1)] for index in ops: if (y + index[0],",
"return 2 return 3 def conv_idx_dir(index, nbr_index): y = index[0] - nbr_index[0] x",
"> rows - 1: return True if index[1] < 0 or index[1] >",
"in ops: if (y + index[0], x + index[1]) in visited: return True",
"dir == 'T': return (index[0] - 1, index[1]) return (index[0] + 1, index[1])",
"(index[0], index[1] - 1) elif dir == 1: return (index[0], index[1] + 1)",
"new_image[mark_as_white[0], mark_as_white[1]] = color return new_image def grid_to_image(index): return (index[0] * 2 +",
"for j in range(len(grid[i])): print(grid[i][j].index, end=\", \") print(\"]\") def print_visited(grid): for i in",
"color to 255 (white) if none provided if color == None: color =",
"if direction < 0: return new_image # find the index of the wall",
"False def neighborCheck(grid, curr, rows, cols): #order: Left, Right, Top, Down ops =",
"index[0], x + index[1], visited, grid, unvisited) count += 1 if count ==",
"grid_to_image(index): return (index[0] * 2 + 1, index[1] * 2 + 1) def",
"== 0: return False return True def check_visited(y, x, visited): ops = [(0,-1),",
"gif_arr[-1]): gif_arr.append(newIMG) def getNeighbor(grid, curr, rows, cols, previous): #order: Left, Right, Top, Down",
"unvisited) count += 1 if count == 0: return False return True def",
"= create_snapshot(gif_arr[-1].copy(), idx, -1, color) else: newIMG = create_snapshot(gif_arr[-1].copy(), idx, -1, color) newIMG",
"elif dir == 'T': return (index[0] - 1, index[1]) return (index[0] + 1,",
"1)) maze[:,:] = '@' for i in range(len(grid)): for j in range(len(grid[i])): for",
"= None): if secondIdx == None: newIMG = create_snapshot(gif_arr[-1].copy(), idx, -1, color) else:",
"return 3 def conv_idx_dir(index, nbr_index): y = index[0] - nbr_index[0] x = index[1]",
"if grid[y][x].visited == False: if curr.walls[i] != 'X': ret.append(i) return ret def nbr_index(index,",
"end=\", \") print(\"]\") def print_index(grid): for i in range(len(grid)): print(\"[\", end=\"\") for j",
"None, color = None): if secondIdx == None: newIMG = create_snapshot(gif_arr[-1].copy(), idx, -1,",
"range(8): #bounds checking x = index[1] + ops[i][1] y = index[0] + ops[i][0]",
"= index[1] - nbr_index[1] if x == 1: return 'R' if x ==",
"x, all_nodes, grid, unvisited): ops = [(0,-1), (0,1), (-1,0), (1,0), (-1,-1), (1,-1), (-1,1),",
"print('O', end=\", \") print(\"]\") def maze_index(index, dir): if dir == 0: return (index[0],",
"1: return True return False def neighborCheck(grid, curr, rows, cols): #order: Left, Right,",
"== False: if curr.walls[i] != 'X': ret.append(i) return ret def nbr_index(index, dir): if",
"all_nodes, grid, unvisited): ops = [(0,-1), (0,1), (-1,0), (1,0), (-1,-1), (1,-1), (-1,1), (1,1)]",
"one or two changes, algorithm specific if secondIdx == None: newIMG = create_snapshot(gif_arr[-1].copy(),",
"for index in dirs: if count == len(dirs): break if not bounds_check((y +",
"print_visited(grid): for i in range(len(grid)): print(\"[\", end=\"\") for j in range(len(grid[i])): if grid[i][j].visited",
"in range(len(grid[i])): print(grid[i][j].walls, end=\", \") print(\"]\") def print_index(grid): for i in range(len(grid)): print(\"[\",",
"cols): continue if grid[y,x] == 255: count += 1 return count def checkRules(grid,",
"secondIdx == None: newIMG = create_snapshot(gif_arr[-1].copy(), idx, -1, color) else: newIMG = create_snapshot(gif_arr[-1].copy(),",
"curr, rows, cols, previous): #order: Left, Right, Top, Down ops = [(0,-1), (0,1),",
"== 0 or grid.shape[1] - 1 == x + index[1]: continue grid[y +",
"if index[1] < 0 or index[1] > cols - 1: return True return",
"remove the wall (set it to the provided color) new_image[mark_as_white[0], mark_as_white[1]] = color",
"in dirs: if count == len(dirs): break if not bounds_check((y + index[0], x",
"curr, rows, cols): #order: Left, Right, Top, Down ops = [(0,-1), (0,1), (-1,0),",
"def checkRules(grid, index, rule): c = countNeighbors(grid, index, grid.shape[0], grid.shape[1]) for character in",
"1: return 'R' if x == -1: return 'L' if y == 1:",
"return (index[0], index[1] - 1) elif dir == 'R': return (index[0], index[1] +",
"# mark one or two changes, algorithm specific if secondIdx == None: newIMG",
"- nbr_index[1] if x == 1: return 'R' if x == -1: return",
"== 'T': return (index[0] - 1, index[1]) return (index[0] + 1, index[1]) def",
"i in range(len(grid)): print(\"[\", end=\"\") for j in range(len(grid[i])): print(grid[i][j].walls, end=\", \") print(\"]\")",
"nbr_index(index, dir): if dir == 'L': return (index[0], index[1] - 1) elif dir",
"0 or grid.shape[0] - 1 == y + index[0] or x + index[1]",
"None: color = 255 # assign the given color to the cell to",
"+ index[1])) update_set(y + index[0], x + index[1], visited, grid, unvisited) count +=",
"- nbr_index[0] x = index[1] - nbr_index[1] if x == 1: return 'R'",
"create_snapshot(gif_arr[-1].copy(), idx, -1, color) newIMG = create_snapshot(newIMG, secondIdx, -1, color) if not np.array_equal(newIMG,",
"(1,0), (-1,-1), (1,-1), (-1,1), (1,1)] for index in ops: if y + index[0]",
"def start_cells(grid, y, x, random, visited, unvisited): ops = [(0,-1), (0,1), (-1,0), (1,0),",
"== 'R': return 0 elif dir == 'T': return 2 return 3 def",
"1 == x + index[1]: continue grid[y + index[0], x + index[1]] =",
"< 0 or index[0] > rows - 1: return True if index[1] <",
"in range(maze.shape[0]): for j in range(maze.shape[1]): print(maze[i,j].decode('utf-8'), end=\" \") print() def countNeighbors(grid, index,",
"(index[0], index[1] + 1) elif dir == 2: return (index[0] - 1, index[1])",
"remove mark_as_white = maze_index(index, direction) # remove the wall (set it to the",
"(index // cols, index % cols) def bounds_check(index, rows, cols): if index[0] <",
"rows, cols): continue if grid[y][x].visited == False: if curr.walls[i] != 'X': ret.append(i) return",
"newIMG = create_snapshot(gif_arr[-1].copy(), idx, -1, color) else: newIMG = create_snapshot(gif_arr[-1].copy(), idx, -1, color)",
"2: return (index[0] - 1, index[1]) return (index[0] + 1, index[1]) def create_snapshot(new_image,",
"index in ops: if (y + index[0], x + index[1]) in visited: return",
"0: return False return True def check_visited(y, x, visited): ops = [(0,-1), (0,1),",
"index % cols) def bounds_check(index, rows, cols): if index[0] < 0 or index[0]",
"cols): if index[0] < 0 or index[0] > rows - 1: return True",
"(0,1), (-1,0), (1,0), (-1,-1), (1,-1), (-1,1), (1,1)] #short for operations count = 0",
"implement these functions into the generation algorithms available. def convert_2d(index, cols): return (index",
"None): if secondIdx == None: newIMG = create_snapshot(gif_arr[-1].copy(), idx, -1, color) else: newIMG",
"i in range(len(grid)): print(\"[\", end=\"\") for j in range(len(grid[i])): print(grid[i][j].index, end=\", \") print(\"]\")",
"+ 1) def mark_change(idx, gif_arr, wall_idx, secondIdx = None, color = None): #",
"return True return False def update_set(y, x, all_nodes, grid, unvisited): ops = [(0,-1),",
"index[0], x + index[1]) in visited: return True return False def update_set(y, x,",
"'+' if grid[i][j].walls[k] == 'X': if k == 0 or k == 1:",
"Top, Down, Top Left, Bottom Left, Top Right, Bottom Right ops = [(0,-1),",
"ops = [(0,-1), (0,1), (-1,0), (1,0), (-1,-1), (1,-1), (-1,1), (1,1)] #short for operations",
"def conv_nbr_wall(dir): if dir == 'L': return 1 elif dir == 'R': return",
"= [(0,-1), (0,1), (-1,0), (1,0), (-1,-1), (1,-1), (-1,1), (1,1)] dirs = random.sample(ops, k=len(ops))",
"\") print() def countNeighbors(grid, index, rows, cols): #order: Left, Right, Top, Down, Top",
"index[1]: continue grid[y + index[0], x + index[1]] = 255 visited.add((y + index[0],",
"% cols) def bounds_check(index, rows, cols): if index[0] < 0 or index[0] >",
"nbr_index[0] x = index[1] - nbr_index[1] if x == 1: return 'R' if",
"visited, unvisited): ops = [(0,-1), (0,1), (-1,0), (1,0), (-1,-1), (1,-1), (-1,1), (1,1)] dirs",
"+ index[1]) in visited: return True return False def update_set(y, x, all_nodes, grid,",
"== 'R': return (index[0], index[1] + 1) elif dir == 'T': return (index[0]",
"1, j * 2 + 1] = '+' if grid[i][j].walls[k] == 'X': if",
"c == int(character): return True return False def start_cells(grid, y, x, random, visited,",
"+ index[0], x + index[1]) in visited: return True return False def update_set(y,",
"0 or k == 1: maze[idx[0], idx[1]] = '-' else: maze[idx[0], idx[1]] =",
"streamlined and replicable gif creation set of functions in this file. #2. implement",
"= None, color = None): if secondIdx == None: newIMG = create_snapshot(gif_arr[-1].copy(), idx,",
"+ index[1]), grid.shape[0], grid.shape[1]): if y + index[0] == 0 or grid.shape[0] -",
"+ ops[i][0] if bounds_check((y,x), rows, cols): continue if grid[y,x] == 255: count +=",
"curr.index[0] + ops[i][0] if bounds_check((y,x), rows, cols): continue if grid[y][x].visited == False: if",
"changes, algorithm specific if secondIdx == None: newIMG = create_snapshot(gif_arr[-1].copy(), idx, wall_idx, color)",
"def update_set(y, x, all_nodes, grid, unvisited): ops = [(0,-1), (0,1), (-1,0), (1,0), (-1,-1),",
"range(4): #bounds checking x = curr.index[1] + ops[i][1] y = curr.index[0] + ops[i][0]",
"cols) def bounds_check(index, rows, cols): if index[0] < 0 or index[0] > rows",
"update_set(y, x, all_nodes, grid, unvisited): ops = [(0,-1), (0,1), (-1,0), (1,0), (-1,-1), (1,-1),",
"of functions in this file. #2. implement these functions into the generation algorithms",
"for operations ret = [] for i in range(4): #bounds checking x =",
"index, rows, cols): #order: Left, Right, Top, Down, Top Left, Bottom Left, Top",
"visited: return True return False def update_set(y, x, all_nodes, grid, unvisited): ops =",
"index[0] < 0 or index[0] > rows - 1: return True if index[1]",
"if grid[i][j].visited == True: print('X', end=\", \") else: print('O', end=\", \") print(\"]\") def",
"0 or grid.shape[1] - 1 == x + index[1]: continue grid[y + index[0],",
"'R' if x == -1: return 'L' if y == 1: return 'T'",
"+ index[0] or x + index[1] == 0 or grid.shape[1] - 1 ==",
"direction < 0: return new_image # find the index of the wall to",
"cell to mark it as active new_image[index[0], index[1]] = color if direction <",
"'T' if y == -1: return 'D' def print_grid(grid): for i in range(len(grid)):",
"end=\" \") print() def countNeighbors(grid, index, rows, cols): #order: Left, Right, Top, Down,",
"rows, cols): #order: Left, Right, Top, Down, Top Left, Bottom Left, Top Right,",
"color = 255 # assign the given color to the cell to mark",
"#order: Left, Right, Top, Down ops = [(0,-1), (0,1), (-1,0), (1,0)] #short for",
"index[1] + 1) elif dir == 2: return (index[0] - 1, index[1]) return",
"#1. create a streamlined and replicable gif creation set of functions in this",
"the cell to mark it as active new_image[index[0], index[1]] = color if direction",
"# remove the wall (set it to the provided color) new_image[mark_as_white[0], mark_as_white[1]] =",
"idx, wall_idx, color) newIMG = create_snapshot(newIMG, secondIdx, -1, color) if not np.array_equal(newIMG, gif_arr[-1]):",
"index[1] < 0 or index[1] > cols - 1: return True return False",
"j in range(len(grid[i])): if grid[i][j].visited == True: print('X', end=\", \") else: print('O', end=\",",
"wall_idx, color) newIMG = create_snapshot(newIMG, secondIdx, -1, color) if not np.array_equal(newIMG, gif_arr[-1]): gif_arr.append(newIMG)",
"secondIdx, -1, color) if not np.array_equal(newIMG, gif_arr[-1]): gif_arr.append(newIMG) def getNeighbor(grid, curr, rows, cols,",
"character in rule: if c == int(character): return True return False def start_cells(grid,",
"if count == len(dirs): break if not bounds_check((y + index[0], x + index[1]),",
"0 for i in range(8): #bounds checking x = index[1] + ops[i][1] y",
"or grid.shape[1] - 1 == x + index[1]: continue all_nodes.add((y,x)) if (y,x) in",
"0: return (index[0], index[1] - 1) elif dir == 1: return (index[0], index[1]"
] |
[
"analyzed\").pack() btn = tk.Button(l, text = \"import\", width = 10, command=select).pack() l.mainloop() #TODO:",
"AND CLASSIFY # # OR DO SECTIONS LIKE 10x10 GRIDS # # #",
"# WITH ITS GRAYSCALE AND CLASSIFY # # OR DO SECTIONS LIKE 10x10",
"to define exact points - have user select roi again - use x,",
"KNeighborsClassifier from tkinter import filedialog from PIL import ImageTk, Image l = tk.Tk()",
"img = c.imread(name, 0) icon = ImageTk.PhotoImage(Image.open(name)) label = tk.Label(l, image = icon).pack()",
"\"Please select an image to by analyzed\").pack() btn = tk.Button(l, text = \"import\",",
"old algorithm to define exact points - have user select roi again -",
"10x10 GRIDS # # # # # # # # # # #",
"# AND GRAYSCALE AS CLASSIFICATION # # VALUES, AND THEN RECEIVE A POINT",
"= filedialog.askopenfilename(initialdir = \"/Users/Dariush/Desktop/python_code/mri_machine_learning\",title = \"Select file\",filetypes = ((\"jpeg files\",\"*.jpg\"),(\"all files\",\"*.*\"))) img =",
"# # USE KNN FOR A THREE DIMENSIONAL # # PLANE IN WHICH",
"width = 10, command=select).pack() l.mainloop() #TODO: ALL THIS HERE # # # #",
"def select(): global name name = filedialog.askopenfilename(initialdir = \"/Users/Dariush/Desktop/python_code/mri_machine_learning\",title = \"Select file\",filetypes =",
"import filedialog from PIL import ImageTk, Image l = tk.Tk() l.geometry('300x300') def select():",
"text = \"import\", width = 10, command=select).pack() l.mainloop() #TODO: ALL THIS HERE #",
"again - use x, y, and grayscale as three point in knn classification",
"- have user select roi again - use x, y, and grayscale as",
"[5*3, 2*1, 9*5, 9*9] values_g = [1, 0, 5, 10] image l.title('MRI Analysis",
"from tkinter import filedialog from PIL import ImageTk, Image l = tk.Tk() l.geometry('300x300')",
"= icon).pack() analyze(name) def analyze(image): values_xy = [5*3, 2*1, 9*5, 9*9] values_g =",
"# # AND GRAYSCALE AS CLASSIFICATION # # VALUES, AND THEN RECEIVE A",
"5, 10] image l.title('MRI Analysis tool') lbl = tk.Label(l, text = \"Please select",
"# # PLANE IN WHICH YOU USE DISTANCE # # AND GRAYSCALE AS",
"files\",\"*.jpg\"),(\"all files\",\"*.*\"))) img = c.imread(name, 0) icon = ImageTk.PhotoImage(Image.open(name)) label = tk.Label(l, image",
"2*1, 9*5, 9*9] values_g = [1, 0, 5, 10] image l.title('MRI Analysis tool')",
"Analysis tool') lbl = tk.Label(l, text = \"Please select an image to by",
"CLASSIFICATION # # VALUES, AND THEN RECEIVE A POINT # # WITH ITS",
"# # # ''' Notes: - maybe use old algorithm to define exact",
"# # # # ''' Notes: - maybe use old algorithm to define",
"# # ''' Notes: - maybe use old algorithm to define exact points",
"user select roi again - use x, y, and grayscale as three point",
"text = \"Please select an image to by analyzed\").pack() btn = tk.Button(l, text",
"DISTANCE # # AND GRAYSCALE AS CLASSIFICATION # # VALUES, AND THEN RECEIVE",
"an image to by analyzed\").pack() btn = tk.Button(l, text = \"import\", width =",
"have user select roi again - use x, y, and grayscale as three",
"select an image to by analyzed\").pack() btn = tk.Button(l, text = \"import\", width",
"numpy as np import tkinter as tk from sklearn.neighbors import KNeighborsClassifier from tkinter",
"tk from sklearn.neighbors import KNeighborsClassifier from tkinter import filedialog from PIL import ImageTk,",
"# # # # # # # # # # # # '''",
"to by analyzed\").pack() btn = tk.Button(l, text = \"import\", width = 10, command=select).pack()",
"files\",\"*.*\"))) img = c.imread(name, 0) icon = ImageTk.PhotoImage(Image.open(name)) label = tk.Label(l, image =",
"= \"/Users/Dariush/Desktop/python_code/mri_machine_learning\",title = \"Select file\",filetypes = ((\"jpeg files\",\"*.jpg\"),(\"all files\",\"*.*\"))) img = c.imread(name, 0)",
"ImageTk, Image l = tk.Tk() l.geometry('300x300') def select(): global name name = filedialog.askopenfilename(initialdir",
"10] image l.title('MRI Analysis tool') lbl = tk.Label(l, text = \"Please select an",
"# # # # # ''' Notes: - maybe use old algorithm to",
"9*5, 9*9] values_g = [1, 0, 5, 10] image l.title('MRI Analysis tool') lbl",
"[1, 0, 5, 10] image l.title('MRI Analysis tool') lbl = tk.Label(l, text =",
"file\",filetypes = ((\"jpeg files\",\"*.jpg\"),(\"all files\",\"*.*\"))) img = c.imread(name, 0) icon = ImageTk.PhotoImage(Image.open(name)) label",
"#TODO: ALL THIS HERE # # # # # # # # #",
"# # # USE KNN FOR A THREE DIMENSIONAL # # PLANE IN",
"# # # # # # # # # USE KNN FOR A",
"= tk.Tk() l.geometry('300x300') def select(): global name name = filedialog.askopenfilename(initialdir = \"/Users/Dariush/Desktop/python_code/mri_machine_learning\",title =",
"import tkinter as tk from sklearn.neighbors import KNeighborsClassifier from tkinter import filedialog from",
"# # # # # # ''' Notes: - maybe use old algorithm",
"ImageTk.PhotoImage(Image.open(name)) label = tk.Label(l, image = icon).pack() analyze(name) def analyze(image): values_xy = [5*3,",
"# ''' Notes: - maybe use old algorithm to define exact points -",
"# # # # # # # # ''' Notes: - maybe use",
"= c.imread(name, 0) icon = ImageTk.PhotoImage(Image.open(name)) label = tk.Label(l, image = icon).pack() analyze(name)",
"= tk.Label(l, image = icon).pack() analyze(name) def analyze(image): values_xy = [5*3, 2*1, 9*5,",
"Image l = tk.Tk() l.geometry('300x300') def select(): global name name = filedialog.askopenfilename(initialdir =",
"GRAYSCALE AS CLASSIFICATION # # VALUES, AND THEN RECEIVE A POINT # #",
"PIL import ImageTk, Image l = tk.Tk() l.geometry('300x300') def select(): global name name",
"name = filedialog.askopenfilename(initialdir = \"/Users/Dariush/Desktop/python_code/mri_machine_learning\",title = \"Select file\",filetypes = ((\"jpeg files\",\"*.jpg\"),(\"all files\",\"*.*\"))) img",
"# # # # # # # USE KNN FOR A THREE DIMENSIONAL",
"l.title('MRI Analysis tool') lbl = tk.Label(l, text = \"Please select an image to",
"from sklearn.neighbors import KNeighborsClassifier from tkinter import filedialog from PIL import ImageTk, Image",
"= \"Please select an image to by analyzed\").pack() btn = tk.Button(l, text =",
"lbl = tk.Label(l, text = \"Please select an image to by analyzed\").pack() btn",
"image to by analyzed\").pack() btn = tk.Button(l, text = \"import\", width = 10,",
"ALL THIS HERE # # # # # # # # # #",
"# # # # # # # # # # # USE KNN",
"# # # # # # USE KNN FOR A THREE DIMENSIONAL #",
"# # VALUES, AND THEN RECEIVE A POINT # # WITH ITS GRAYSCALE",
"# # # # # # # # # # ''' Notes: -",
"Notes: - maybe use old algorithm to define exact points - have user",
"- maybe use old algorithm to define exact points - have user select",
"icon = ImageTk.PhotoImage(Image.open(name)) label = tk.Label(l, image = icon).pack() analyze(name) def analyze(image): values_xy",
"A THREE DIMENSIONAL # # PLANE IN WHICH YOU USE DISTANCE # #",
"0, 5, 10] image l.title('MRI Analysis tool') lbl = tk.Label(l, text = \"Please",
"= 10, command=select).pack() l.mainloop() #TODO: ALL THIS HERE # # # # #",
"A POINT # # WITH ITS GRAYSCALE AND CLASSIFY # # OR DO",
"''' Notes: - maybe use old algorithm to define exact points - have",
"# # # # # # # # # # USE KNN FOR",
"# VALUES, AND THEN RECEIVE A POINT # # WITH ITS GRAYSCALE AND",
"ITS GRAYSCALE AND CLASSIFY # # OR DO SECTIONS LIKE 10x10 GRIDS #",
"filedialog.askopenfilename(initialdir = \"/Users/Dariush/Desktop/python_code/mri_machine_learning\",title = \"Select file\",filetypes = ((\"jpeg files\",\"*.jpg\"),(\"all files\",\"*.*\"))) img = c.imread(name,",
"WITH ITS GRAYSCALE AND CLASSIFY # # OR DO SECTIONS LIKE 10x10 GRIDS",
"<reponame>Hsuirad/Hemorrhagic-Volume-Assessment import cv2 as c import numpy as np import tkinter as tk",
"# # WITH ITS GRAYSCALE AND CLASSIFY # # OR DO SECTIONS LIKE",
"GRAYSCALE AND CLASSIFY # # OR DO SECTIONS LIKE 10x10 GRIDS # #",
"# # OR DO SECTIONS LIKE 10x10 GRIDS # # # # #",
"tk.Button(l, text = \"import\", width = 10, command=select).pack() l.mainloop() #TODO: ALL THIS HERE",
"# # # # # # # # # # # # USE",
"as c import numpy as np import tkinter as tk from sklearn.neighbors import",
"= [1, 0, 5, 10] image l.title('MRI Analysis tool') lbl = tk.Label(l, text",
"use old algorithm to define exact points - have user select roi again",
"global name name = filedialog.askopenfilename(initialdir = \"/Users/Dariush/Desktop/python_code/mri_machine_learning\",title = \"Select file\",filetypes = ((\"jpeg files\",\"*.jpg\"),(\"all",
"- use x, y, and grayscale as three point in knn classification '''",
"select roi again - use x, y, and grayscale as three point in",
"THIS HERE # # # # # # # # # # #",
"VALUES, AND THEN RECEIVE A POINT # # WITH ITS GRAYSCALE AND CLASSIFY",
"command=select).pack() l.mainloop() #TODO: ALL THIS HERE # # # # # # #",
"select(): global name name = filedialog.askopenfilename(initialdir = \"/Users/Dariush/Desktop/python_code/mri_machine_learning\",title = \"Select file\",filetypes = ((\"jpeg",
"tool') lbl = tk.Label(l, text = \"Please select an image to by analyzed\").pack()",
"10, command=select).pack() l.mainloop() #TODO: ALL THIS HERE # # # # # #",
"# # # # # # # # # ''' Notes: - maybe",
"btn = tk.Button(l, text = \"import\", width = 10, command=select).pack() l.mainloop() #TODO: ALL",
"exact points - have user select roi again - use x, y, and",
"= ((\"jpeg files\",\"*.jpg\"),(\"all files\",\"*.*\"))) img = c.imread(name, 0) icon = ImageTk.PhotoImage(Image.open(name)) label =",
"CLASSIFY # # OR DO SECTIONS LIKE 10x10 GRIDS # # # #",
"l = tk.Tk() l.geometry('300x300') def select(): global name name = filedialog.askopenfilename(initialdir = \"/Users/Dariush/Desktop/python_code/mri_machine_learning\",title",
"maybe use old algorithm to define exact points - have user select roi",
"algorithm to define exact points - have user select roi again - use",
"= ImageTk.PhotoImage(Image.open(name)) label = tk.Label(l, image = icon).pack() analyze(name) def analyze(image): values_xy =",
"# USE KNN FOR A THREE DIMENSIONAL # # PLANE IN WHICH YOU",
"l.geometry('300x300') def select(): global name name = filedialog.askopenfilename(initialdir = \"/Users/Dariush/Desktop/python_code/mri_machine_learning\",title = \"Select file\",filetypes",
"= [5*3, 2*1, 9*5, 9*9] values_g = [1, 0, 5, 10] image l.title('MRI",
"YOU USE DISTANCE # # AND GRAYSCALE AS CLASSIFICATION # # VALUES, AND",
"OR DO SECTIONS LIKE 10x10 GRIDS # # # # # # #",
"\"Select file\",filetypes = ((\"jpeg files\",\"*.jpg\"),(\"all files\",\"*.*\"))) img = c.imread(name, 0) icon = ImageTk.PhotoImage(Image.open(name))",
"cv2 as c import numpy as np import tkinter as tk from sklearn.neighbors",
"icon).pack() analyze(name) def analyze(image): values_xy = [5*3, 2*1, 9*5, 9*9] values_g = [1,",
"from PIL import ImageTk, Image l = tk.Tk() l.geometry('300x300') def select(): global name",
"analyze(image): values_xy = [5*3, 2*1, 9*5, 9*9] values_g = [1, 0, 5, 10]",
"9*9] values_g = [1, 0, 5, 10] image l.title('MRI Analysis tool') lbl =",
"values_xy = [5*3, 2*1, 9*5, 9*9] values_g = [1, 0, 5, 10] image",
"sklearn.neighbors import KNeighborsClassifier from tkinter import filedialog from PIL import ImageTk, Image l",
"as tk from sklearn.neighbors import KNeighborsClassifier from tkinter import filedialog from PIL import",
"values_g = [1, 0, 5, 10] image l.title('MRI Analysis tool') lbl = tk.Label(l,",
"PLANE IN WHICH YOU USE DISTANCE # # AND GRAYSCALE AS CLASSIFICATION #",
"def analyze(image): values_xy = [5*3, 2*1, 9*5, 9*9] values_g = [1, 0, 5,",
"l.mainloop() #TODO: ALL THIS HERE # # # # # # # #",
"= tk.Button(l, text = \"import\", width = 10, command=select).pack() l.mainloop() #TODO: ALL THIS",
"# OR DO SECTIONS LIKE 10x10 GRIDS # # # # # #",
"POINT # # WITH ITS GRAYSCALE AND CLASSIFY # # OR DO SECTIONS",
"np import tkinter as tk from sklearn.neighbors import KNeighborsClassifier from tkinter import filedialog",
"filedialog from PIL import ImageTk, Image l = tk.Tk() l.geometry('300x300') def select(): global",
"DIMENSIONAL # # PLANE IN WHICH YOU USE DISTANCE # # AND GRAYSCALE",
"label = tk.Label(l, image = icon).pack() analyze(name) def analyze(image): values_xy = [5*3, 2*1,",
"FOR A THREE DIMENSIONAL # # PLANE IN WHICH YOU USE DISTANCE #",
"GRIDS # # # # # # # # # # # #",
"roi again - use x, y, and grayscale as three point in knn",
"image = icon).pack() analyze(name) def analyze(image): values_xy = [5*3, 2*1, 9*5, 9*9] values_g",
"AND THEN RECEIVE A POINT # # WITH ITS GRAYSCALE AND CLASSIFY #",
"# # # # # # # # # # # # #",
"analyze(name) def analyze(image): values_xy = [5*3, 2*1, 9*5, 9*9] values_g = [1, 0,",
"tk.Label(l, text = \"Please select an image to by analyzed\").pack() btn = tk.Button(l,",
"import numpy as np import tkinter as tk from sklearn.neighbors import KNeighborsClassifier from",
"# # # # # # # # USE KNN FOR A THREE",
"import KNeighborsClassifier from tkinter import filedialog from PIL import ImageTk, Image l =",
"tkinter import filedialog from PIL import ImageTk, Image l = tk.Tk() l.geometry('300x300') def",
"AS CLASSIFICATION # # VALUES, AND THEN RECEIVE A POINT # # WITH",
"WHICH YOU USE DISTANCE # # AND GRAYSCALE AS CLASSIFICATION # # VALUES,",
"SECTIONS LIKE 10x10 GRIDS # # # # # # # # #",
"as np import tkinter as tk from sklearn.neighbors import KNeighborsClassifier from tkinter import",
"tk.Label(l, image = icon).pack() analyze(name) def analyze(image): values_xy = [5*3, 2*1, 9*5, 9*9]",
"\"/Users/Dariush/Desktop/python_code/mri_machine_learning\",title = \"Select file\",filetypes = ((\"jpeg files\",\"*.jpg\"),(\"all files\",\"*.*\"))) img = c.imread(name, 0) icon",
"HERE # # # # # # # # # # # #",
"points - have user select roi again - use x, y, and grayscale",
"define exact points - have user select roi again - use x, y,",
"THREE DIMENSIONAL # # PLANE IN WHICH YOU USE DISTANCE # # AND",
"= tk.Label(l, text = \"Please select an image to by analyzed\").pack() btn =",
"LIKE 10x10 GRIDS # # # # # # # # # #",
"# # # # USE KNN FOR A THREE DIMENSIONAL # # PLANE",
"# # # # # USE KNN FOR A THREE DIMENSIONAL # #",
"IN WHICH YOU USE DISTANCE # # AND GRAYSCALE AS CLASSIFICATION # #",
"tkinter as tk from sklearn.neighbors import KNeighborsClassifier from tkinter import filedialog from PIL",
"= \"import\", width = 10, command=select).pack() l.mainloop() #TODO: ALL THIS HERE # #",
"c.imread(name, 0) icon = ImageTk.PhotoImage(Image.open(name)) label = tk.Label(l, image = icon).pack() analyze(name) def",
"USE DISTANCE # # AND GRAYSCALE AS CLASSIFICATION # # VALUES, AND THEN",
"# # # # # # # ''' Notes: - maybe use old",
"= \"Select file\",filetypes = ((\"jpeg files\",\"*.jpg\"),(\"all files\",\"*.*\"))) img = c.imread(name, 0) icon =",
"0) icon = ImageTk.PhotoImage(Image.open(name)) label = tk.Label(l, image = icon).pack() analyze(name) def analyze(image):",
"import cv2 as c import numpy as np import tkinter as tk from",
"AND GRAYSCALE AS CLASSIFICATION # # VALUES, AND THEN RECEIVE A POINT #",
"c import numpy as np import tkinter as tk from sklearn.neighbors import KNeighborsClassifier",
"name name = filedialog.askopenfilename(initialdir = \"/Users/Dariush/Desktop/python_code/mri_machine_learning\",title = \"Select file\",filetypes = ((\"jpeg files\",\"*.jpg\"),(\"all files\",\"*.*\")))",
"by analyzed\").pack() btn = tk.Button(l, text = \"import\", width = 10, command=select).pack() l.mainloop()",
"USE KNN FOR A THREE DIMENSIONAL # # PLANE IN WHICH YOU USE",
"RECEIVE A POINT # # WITH ITS GRAYSCALE AND CLASSIFY # # OR",
"# # # # # # # # # # # ''' Notes:",
"import ImageTk, Image l = tk.Tk() l.geometry('300x300') def select(): global name name =",
"image l.title('MRI Analysis tool') lbl = tk.Label(l, text = \"Please select an image",
"KNN FOR A THREE DIMENSIONAL # # PLANE IN WHICH YOU USE DISTANCE",
"# PLANE IN WHICH YOU USE DISTANCE # # AND GRAYSCALE AS CLASSIFICATION",
"THEN RECEIVE A POINT # # WITH ITS GRAYSCALE AND CLASSIFY # #",
"tk.Tk() l.geometry('300x300') def select(): global name name = filedialog.askopenfilename(initialdir = \"/Users/Dariush/Desktop/python_code/mri_machine_learning\",title = \"Select",
"DO SECTIONS LIKE 10x10 GRIDS # # # # # # # #",
"((\"jpeg files\",\"*.jpg\"),(\"all files\",\"*.*\"))) img = c.imread(name, 0) icon = ImageTk.PhotoImage(Image.open(name)) label = tk.Label(l,",
"\"import\", width = 10, command=select).pack() l.mainloop() #TODO: ALL THIS HERE # # #"
] |
[
"print(c, end=' ') print('Acabou') #pg que faz uma contagem de 2 em 2",
"contagem de pares.py for c in range(2, 51, 2): print(c, end=' ') print('Acabou')",
"de pares.py for c in range(2, 51, 2): print(c, end=' ') print('Acabou') #pg",
"para baixar/netcha/exr47 contagem de pares.py for c in range(2, 51, 2): print(c, end='",
"baixar/netcha/exr47 contagem de pares.py for c in range(2, 51, 2): print(c, end=' ')",
"') print('Acabou') #pg que faz uma contagem de 2 em 2 ebtre 1",
"<reponame>vany-oss/python<filename>pasta para baixar/netcha/exr47 contagem de pares.py for c in range(2, 51, 2): print(c,",
"#pg que faz uma contagem de 2 em 2 ebtre 1 a 50",
"2): print(c, end=' ') print('Acabou') #pg que faz uma contagem de 2 em",
"range(2, 51, 2): print(c, end=' ') print('Acabou') #pg que faz uma contagem de",
"end=' ') print('Acabou') #pg que faz uma contagem de 2 em 2 ebtre",
"print('Acabou') #pg que faz uma contagem de 2 em 2 ebtre 1 a",
"pares.py for c in range(2, 51, 2): print(c, end=' ') print('Acabou') #pg que",
"for c in range(2, 51, 2): print(c, end=' ') print('Acabou') #pg que faz",
"51, 2): print(c, end=' ') print('Acabou') #pg que faz uma contagem de 2",
"c in range(2, 51, 2): print(c, end=' ') print('Acabou') #pg que faz uma",
"in range(2, 51, 2): print(c, end=' ') print('Acabou') #pg que faz uma contagem"
] |
[
"= options self.restrictions[constants.OPTIONS_MANDATORY] = optionsMandatory self._validate = type_validators.DatetimeValidator(minimum, maximum, options, optionsMandatory) class ListType(BasePropertyType):",
"return _propertyNameClassMap[propertyTypeName](**restrictions) except TypeError: raise ConfigurationError(\"Restrictions for property type '%s' are invalid.\" %",
"to persistence format.\" % repr(item)) return result def fromPersistenceFormat(self, persistedValue): \"\"\" Ensures that",
"values. \"\"\" name = constants.BOOLEAN_TYPE def __init__(self, notNull=False): BasePropertyType.__init__(self, notNull) self._validate = type_validators.BooleanValidator()",
"PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT #OWNER OR CONTRIBUTORS",
"value in instance.walk(): try: value = descriptor.type.fromPersistenceFormat(persistedValue[name]) except KeyError: raise ValueError( \"Persisted domain",
"type constants. @param value: Python object. @type value: C{object} @return: Property type constant.",
"not fit defined domain class '%s'.\" % (self.name, str(persistedValue))) else: setattr(instance, name, value)",
"that the transformation for every list item is performed. \"\"\" if not persistedValue",
"class '%s'! Got '%s' instead!\" \\ % (fullDottedClassName, cls.__name__)) return cls def _handleImportError(self,",
"[bool], constants.DATETIME_TYPE: [datetime], constants.LIST_TYPE: [list], constants.NUMBER_TYPE: [int, float, Decimal], constants.STRING_TYPE: [str, unicode]} def",
"the property type. @type propertyTypeName: C{unicode} @param restrictions: Map of restriction parameters and",
"transformationSucceeded: raise ValueError(\"Cannot transform value '%s' to persistence format.\" % repr(item)) return result",
"value is None: if self._cls != value.__class__: raise ValueError(\"The value '%s' has not",
"constants.LIST_TYPE def __init__(self, allowedSubtypes=None, minimum=None, maximum=None, notNull=False): \"\"\" @see L{ListType.__init__<datafinder.core.configuration. properties.validators.type_validators.ListType.__init__>} for details",
"= \"\" # Here you find the concrete class identifier after initialization def",
"or not. @type notNull: C{bool} \"\"\" self.restrictions = dict() self.notNull = notNull def",
"return persistedValue def toPersistenceFormat(self, value): \"\"\" Transforms the value to the persistence layer",
"LIABILITY, OR TORT #(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF",
"persistence format.\" % repr(item)) return result def fromPersistenceFormat(self, persistedValue): \"\"\" Ensures that the",
"* Redistributions of source code must retain the above copyright # notice, this",
"% self.name) else: for instance, name, descriptor, value in instance.walk(): try: value =",
"# silent pylint return value class StringType(BasePropertyType): \"\"\" Represents string values. \"\"\" name",
"@type notNull: C{bool} \"\"\" self.restrictions = dict() self.notNull = notNull def validate(self, value):",
"if not value is None: result = None transformationSucceeded = False for subType",
"the concrete class identifier after initialization def __init__(self, cls=None, notNull=False): \"\"\" Constructor. @param",
"and the following disclaimer. # # * Redistributions in binary form must reproduce",
"conditions and the following disclaimer in the # documentation and/or other materials provided",
"constant matches the full dotted class name is returned. @see: L{constants<datafinder.core.configuration.properties.constants>} for property",
"FOR ANY DIRECT, INDIRECT, INCIDENTAL, #SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT",
"= None transformationSucceeded = False for subType in self._allowedTypes: try: value = subType.fromPersistenceFormat(persistedValue)",
"str(self._cls))) result = dict() try: for _, name, descriptor, subValue in value.walk(): result[name]",
"\"\"\" typeDisplayName = None valueType = type(value) for typeName, availableTypes in _typeConstantsPythonTypeMap.iteritems(): if",
"a nice representation of the dictionary representation = domain.DomainProperty(StringType()) def __init__(self, theDict): domain.DomainObject.__init__(self)",
"binary forms, with or without # #modification, are permitted provided that the following",
"typeDisplayName = None valueType = type(value) for typeName, availableTypes in _typeConstantsPythonTypeMap.iteritems(): if valueType",
"the names of # its contributors may be used to endorse or promote",
"transform value '%s' to persistence format.\" % repr(item)) return result def fromPersistenceFormat(self, persistedValue):",
"if not transformationSucceeded: raise ValueError(\"Cannot restore value '%s' from persistence format.\" % repr(persistedValue))",
"object. @type cls: C{unicode} or class object \"\"\" BasePropertyType.__init__(self, notNull) if cls is",
"raise ValueError(\"The value '%s' has not the required type '%s'.\" \\ % (str(value),",
"restrictions=dict()): \"\"\" Factory method for property type creation. @param propertyTypeName: Name of the",
"BasePropertyType.__init__(self, notNull) self.restrictions[constants.MINIMUM_LENGTH] = minimum self.restrictions[constants.MAXIMUM_LENGTH] = maximum self.restrictions[constants.ALLOWED_SUB_TYPES] = list() if allowedSubtypes",
"the German Aerospace Center nor the names of # its contributors may be",
"empty constructor.\" % self.name) else: for instance, name, descriptor, value in instance.walk(): try:",
"type. @type propertyTypeName: C{unicode} @param restrictions: Map of restriction parameters and corresponding values.",
"basestring): self.name = cls self._cls = self._importClass(cls) else: self.name = \"%s.%s\" % (cls.__module__,",
"EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \"\"\" Provides the supported",
"self._allowedSubtypes.append(DomainObjectType()) else: self._allowedSubtypes = allowedSubtypes subValidators = list() for subtype in self._allowedSubtypes: subValidators.append(subtype.validate)",
"try: value = subType.fromPersistenceFormat(persistedValue) subType.validate(value) result = value transformationSucceeded = True break except",
"list() self._allowedTypes.append(BooleanType()) self._allowedTypes.append(NumberType()) self._allowedTypes.append(DatetimeType()) self._allowedTypes.append(StringType()) self._allowedTypes.append(DomainObjectType()) self._allowedTypes.append(ListType()) else: self._allowedTypes = allowedTypes self.restrictions[constants.ALLOWED_SUB_TYPES] =",
"IN NO EVENT SHALL THE COPYRIGHT #OWNER OR CONTRIBUTORS BE LIABLE FOR ANY",
"ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #LIMITED TO, THE IMPLIED WARRANTIES",
"to the properties self.representation = str(theDict) class DomainObjectType(BasePropertyType): \"\"\" Represents a object values.",
"\"$Revision-Id:$\" _log = logging.getLogger() class BasePropertyType(object): \"\"\" Base class for all property types.",
"_): return ListType(deepcopy(self._allowedSubtypes), self.restrictions[constants.MINIMUM_LENGTH], self.restrictions[constants.MAXIMUM_LENGTH], self.notNull) class AnyType(BasePropertyType): \"\"\" Represents an unspecific property",
"domain from datafinder.core.configuration.properties.validators import base_validators from datafinder.core.configuration.properties.validators import type_validators from datafinder.core.error import ConfigurationError",
"nor the names of # its contributors may be used to endorse or",
"the dictionary representation = domain.DomainProperty(StringType()) def __init__(self, theDict): domain.DomainObject.__init__(self) self.theDict = theDict #",
"value found: '%s'\" % str(error.args)) def toPersistenceFormat(self, value): \"\"\" Transform the domain object",
"if not self._isValid: raise ValueError(\"The domain class could not be found. Please \"",
"True break except ValueError: continue if not transformationSucceeded: raise ValueError(\"Cannot restore value '%s'",
"base_validators.OrValidator(subValidators) def toPersistenceFormat(self, value): \"\"\" Ensures that the transformation for every supported type",
"class object. @type cls: C{unicode} or class object \"\"\" BasePropertyType.__init__(self, notNull) if cls",
"__import__(fullDottedModuleName, globals(), dict(), [\"\"]) cls = getattr(moduleInstance, className) except (ImportError, AttributeError, ValueError), error:",
"Represents list of primitive values. \"\"\" name = constants.LIST_TYPE def __init__(self, allowedSubtypes=None, minimum=None,",
"self._isValid: raise ValueError(\"The domain class could not be found. Please \" \\ +",
"above copyright # notice, this list of conditions and the following disclaimer. #",
"self._cls != value.__class__: raise ValueError(\"The value '%s' has not the required type '%s'.\"",
"the transformation for every supported type is tried. \"\"\" if not persistedValue is",
"in the # documentation and/or other materials provided with the # distribution. #",
"magic is useful to simplify the property type creation. Other approaches would \"blow",
"class identifier after initialization def __init__(self, cls=None, notNull=False): \"\"\" Constructor. @param cls: Full",
"concrete class identifier after initialization def __init__(self, cls=None, notNull=False): \"\"\" Constructor. @param cls:",
"numeric values. \"\"\" name = constants.NUMBER_TYPE def __init__(self, minimum=None, maximum=None, minDecimalPlaces=None, maxDecimalPlaces=None, options=None,",
"maxDecimalPlaces=None, options=None, optionsMandatory=None, notNull=False): \"\"\" @see L{NumberType.__init__<datafinder.core.configuration. properties.validators.type_validators.NumberType.__init__>} for details on restriction parameters.",
"is tried. \"\"\" if not persistedValue is None: result = None transformationSucceeded =",
"BooleanType.name: BooleanType, NumberType.name: NumberType, DatetimeType.name: DatetimeType, ListType.name: ListType, AnyType.name: AnyType} PROPERTY_TYPE_NAMES = _propertyNameClassMap.keys()[:]",
"value.__class__: raise ValueError(\"The value '%s' has not the required type '%s'.\" \\ %",
"or promote products derived # from this software without specific prior written permission.",
"def determinePropertyTypeConstant(value): \"\"\" Helper function to determine the property type constant of the",
"= False for subType in self._allowedTypes: try: subType.validate(value) result = subType.toPersistenceFormat(value) transformationSucceeded =",
"transformationSucceeded: raise ValueError(\"Cannot transform value '%s' to persistence format.\" % repr(value)) return result",
"'%s' from persistence format.\" % repr(item)) return result def __deepcopy__(self, _): return ListType(deepcopy(self._allowedSubtypes),",
"except AttributeError, error: raise ValueError(\"Cannot validate property value. Reason '%s'\" % str(error.args)) except",
"for details on restriction parameters. \"\"\" BasePropertyType.__init__(self, notNull) self.restrictions[constants.MINIMUM_VALUE] = minimum self.restrictions[constants.MAXIMUM_VALUE] =",
"None valueType = type(value) for typeName, availableTypes in _typeConstantsPythonTypeMap.iteritems(): if valueType in availableTypes:",
"\"\"\" self = self # silent pylint return persistedValue def toPersistenceFormat(self, value): \"\"\"",
"self.name = \"%s.%s\" % (cls.__module__, cls.__name__) self._cls = cls @property def _isValid(self): \"\"\"",
"= value transformationSucceeded = True break except ValueError: continue if not transformationSucceeded: raise",
"availableTypes: typeDisplayName = typeName break if typeDisplayName is None: typeDisplayName = \\ \"%s.%s\"",
"allowedTypes self.restrictions[constants.ALLOWED_SUB_TYPES] = list() subValidators = list() for subtype in self._allowedTypes: subValidators.append(subtype.validate) self.restrictions[constants.ALLOWED_SUB_TYPES].append(subtype.name)",
"\\ + \"correct the configuration.\") if not value is None: if self._cls !=",
"subType in self._allowedSubtypes: try: subType.validate(item) result.append(subType.toPersistenceFormat(item)) transformationSucceeded = True break except ValueError: continue",
"OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY #THEORY OF LIABILITY, WHETHER IN",
"L{StringValidator.__init__<datafinder.core.configuration. properties.validators.type_validators.StringValidator.__init__>} for details on restriction parameters. \"\"\" BasePropertyType.__init__(self, notNull) self.restrictions[constants.MINIMUM_LENGTH] = minimum",
"self.name) else: for instance, name, descriptor, value in instance.walk(): try: value = descriptor.type.fromPersistenceFormat(persistedValue[name])",
"format.\" % repr(persistedValue)) return result def __deepcopy__(self, _): return AnyType(deepcopy(self._allowedTypes), self.notNull) class UnknownDomainObject(domain.DomainObject):",
"code must retain the above copyright # notice, this list of conditions and",
"try: value.validate() except AttributeError, error: raise ValueError(\"Cannot validate property value. Reason '%s'\" %",
"of primitive values. \"\"\" name = constants.LIST_TYPE def __init__(self, allowedSubtypes=None, minimum=None, maximum=None, notNull=False):",
"problems during value transformation. \"\"\" self = self # silent pylint return value",
"from copy import deepcopy from datetime import datetime from decimal import Decimal import",
"class StringType(BasePropertyType): \"\"\" Represents string values. \"\"\" name = constants.STRING_TYPE def __init__(self, minimum=None,",
"identifier after initialization def __init__(self, cls=None, notNull=False): \"\"\" Constructor. @param cls: Full dotted",
"'%s'. Reason: '%s'\" % (self.name, reason) _log.warning(message) return UnknownDomainObject def _validate(self, value): \"\"\"",
"if allowedSubtypes is None: self._allowedSubtypes = list() self._allowedSubtypes.append(StringType()) self._allowedSubtypes.append(NumberType()) self._allowedSubtypes.append(BooleanType()) self._allowedSubtypes.append(DatetimeType()) self._allowedSubtypes.append(DomainObjectType()) else:",
"$Revision-Id$ # # Copyright (c) 2003-2011, German Aerospace Center (DLR) # All rights",
"if allowedTypes is None: self._allowedTypes = list() self._allowedTypes.append(BooleanType()) self._allowedTypes.append(NumberType()) self._allowedTypes.append(DatetimeType()) self._allowedTypes.append(StringType()) self._allowedTypes.append(DomainObjectType()) self._allowedTypes.append(ListType())",
"try: return _propertyNameClassMap[propertyTypeName](**restrictions) except TypeError: raise ConfigurationError(\"Restrictions for property type '%s' are invalid.\"",
"repr(item)) return result def __deepcopy__(self, _): return ListType(deepcopy(self._allowedSubtypes), self.restrictions[constants.MINIMUM_LENGTH], self.restrictions[constants.MAXIMUM_LENGTH], self.notNull) class AnyType(BasePropertyType):",
"None: if not isinstance(persistedValue, dict): raise ValueError(\"The persisted value '%s' is no dictionary.\"",
"to endorse or promote products derived # from this software without specific prior",
"constants.STRING_TYPE def __init__(self, minimum=None, maximum=None, pattern=None, options=None, optionsMandatory=None, notNull=False): \"\"\" @see L{StringValidator.__init__<datafinder.core.configuration. properties.validators.type_validators.StringValidator.__init__>}",
"loaded. \"\"\" # Used to have a nice representation of the dictionary representation",
"instance. \"\"\" if self._cls != value.__class__: raise ValueError(\"The value '%s' has not the",
"return instance _propertyNameClassMap = {StringType.name: StringType, BooleanType.name: BooleanType, NumberType.name: NumberType, DatetimeType.name: DatetimeType, ListType.name:",
"'%s' using empty constructor.\" % self.name) else: for instance, name, descriptor, value in",
"\"\"\" name = constants.ANY_TYPE def __init__(self, allowedTypes=None, notNull=False): \"\"\" Constructor. \"\"\" BasePropertyType.__init__(self, notNull)",
"validation of the value against the defined restrictions. Calls C{_validate} to perform concrete",
"subValidators = list() for subtype in self._allowedTypes: subValidators.append(subtype.validate) self.restrictions[constants.ALLOWED_SUB_TYPES].append(subtype.name) self.validate = base_validators.OrValidator(subValidators) def",
"have a nice representation of the dictionary representation = domain.DomainProperty(StringType()) def __init__(self, theDict):",
"options=None, optionsMandatory=None, notNull=False): \"\"\" @see L{NumberType.__init__<datafinder.core.configuration. properties.validators.type_validators.NumberType.__init__>} for details on restriction parameters. \"\"\"",
"not persistedValue is None: result = None transformationSucceeded = False for subType in",
"written permission. # #THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS",
"return result def __deepcopy__(self, _): return ListType(deepcopy(self._allowedSubtypes), self.restrictions[constants.MINIMUM_LENGTH], self.restrictions[constants.MAXIMUM_LENGTH], self.notNull) class AnyType(BasePropertyType): \"\"\"",
"% repr(persistedValue)) return result def __deepcopy__(self, _): return AnyType(deepcopy(self._allowedTypes), self.notNull) class UnknownDomainObject(domain.DomainObject): \"\"\"",
"property type creation. Other approaches would \"blow up\" the code here. \"\"\" #",
"to determine the property type constant of the given value. If the no",
"# * Redistributions of source code must retain the above copyright # notice,",
"on restriction parameters. \"\"\" BasePropertyType.__init__(self, notNull) self.restrictions[constants.MINIMUM_VALUE] = minimum self.restrictions[constants.MAXIMUM_VALUE] = maximum self.restrictions[constants.MINIMUM_NUMBER_OF_DECIMAL_PLACES]",
"Please \" \\ + \"correct the configuration.\") if not value is None: if",
"dictionary. \"\"\" if not self._isValid: raise ValueError(\"The domain class could not be found.",
"NumberType, DatetimeType.name: DatetimeType, ListType.name: ListType, AnyType.name: AnyType} PROPERTY_TYPE_NAMES = _propertyNameClassMap.keys()[:] def createPropertyType(propertyTypeName, restrictions=dict()):",
"distribution. # # * Neither the name of the German Aerospace Center nor",
"validation. @raise ValueError: indicates validation errors. \"\"\" if not value is None: self._validate(value)",
"[\"\"]) cls = getattr(moduleInstance, className) except (ImportError, AttributeError, ValueError), error: return self._handleImportError(str(error.args)) if",
"not transformationSucceeded: raise ValueError(\"Cannot restore value '%s' from persistence format.\" % repr(item)) return",
"self._allowedSubtypes.append(StringType()) self._allowedSubtypes.append(NumberType()) self._allowedSubtypes.append(BooleanType()) self._allowedSubtypes.append(DatetimeType()) self._allowedSubtypes.append(DomainObjectType()) else: self._allowedSubtypes = allowedSubtypes subValidators = list() for",
"import datetime from decimal import Decimal import logging from datafinder.core.configuration.properties import constants from",
"reason) _log.warning(message) return UnknownDomainObject def _validate(self, value): \"\"\" Delegates the validation to the",
"except AttributeError: raise ValueError(\"The value '%s' is no valid domain object.\" % str(value))",
"constructor.\" % self.name) else: for instance, name, descriptor, value in instance.walk(): try: value",
"INCIDENTAL, #SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT #LIMITED TO, PROCUREMENT OF",
"for property type creation. @param propertyTypeName: Name of the property type. @type propertyTypeName:",
"= minimum self.restrictions[constants.MAXIMUM_LENGTH] = maximum self.restrictions[constants.ALLOWED_SUB_TYPES] = list() if allowedSubtypes is None: self._allowedSubtypes",
"@param propertyTypeName: Name of the property type. @type propertyTypeName: C{unicode} @param restrictions: Map",
"result def fromPersistenceFormat(self, persistedValue): \"\"\" Restores the domain object from the given dictionary.",
"LOSS OF USE, #DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON",
"conditions are #met: # # * Redistributions of source code must retain the",
"BasePropertyType.__init__(self, notNull) if allowedTypes is None: self._allowedTypes = list() self._allowedTypes.append(BooleanType()) self._allowedTypes.append(NumberType()) self._allowedTypes.append(DatetimeType()) self._allowedTypes.append(StringType())",
"permission. # #THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #",
"import constants from datafinder.core.configuration.properties import domain from datafinder.core.configuration.properties.validators import base_validators from datafinder.core.configuration.properties.validators import",
"return ListType(deepcopy(self._allowedSubtypes), self.restrictions[constants.MINIMUM_LENGTH], self.restrictions[constants.MAXIMUM_LENGTH], self.notNull) class AnyType(BasePropertyType): \"\"\" Represents an unspecific property type.",
"ValueError(\"Cannot transform value '%s' to persistence format.\" % repr(value)) return result def fromPersistenceFormat(self,",
"valid domain object.\" % str(value)) return result def fromPersistenceFormat(self, persistedValue): \"\"\" Restores the",
"form must reproduce the above copyright # notice, this list of conditions and",
"options, optionsMandatory) class BooleanType(BasePropertyType): \"\"\" Represents a boolean values. \"\"\" name = constants.BOOLEAN_TYPE",
"error: return self._handleImportError(str(error.args)) if cls.__name__ != className: cls = self._handleImportError(\"Failed to import class",
"None transformationSucceeded = False for subType in self._allowedTypes: try: value = subType.fromPersistenceFormat(persistedValue) subType.validate(value)",
"goes wrong. \"\"\" fullDottedModuleName = fullDottedClassName[:fullDottedClassName.rfind(\".\")] className = fullDottedClassName[fullDottedClassName.rfind(\".\") + 1:] try: moduleInstance",
"value '%s' from persistence format.\" % repr(item)) return result def __deepcopy__(self, _): return",
"the following disclaimer. # # * Redistributions in binary form must reproduce the",
"object. @type value: C{object} @return: Property type constant. @rtype: C{string} \"\"\" typeDisplayName =",
"code here. \"\"\" # pylint: disable=W0142 if propertyTypeName in _propertyNameClassMap: try: return _propertyNameClassMap[propertyTypeName](**restrictions)",
"procedure to handle failed domain object imports. \"\"\" message = \"Cannot import '%s'.",
"\"blow up\" the code here. \"\"\" # pylint: disable=W0142 if propertyTypeName in _propertyNameClassMap:",
"\"\"\" Represents list of primitive values. \"\"\" name = constants.LIST_TYPE def __init__(self, allowedSubtypes=None,",
"minimum self.restrictions[constants.MAXIMUM_LENGTH] = maximum self.restrictions[constants.PATTERN] = pattern self.restrictions[constants.OPTIONS] = options self.restrictions[constants.OPTIONS_MANDATORY] = optionsMandatory",
"list item is performed. \"\"\" if not value is None: result = list()",
"cls: Full dotted class name (consists of package, module, and class name) or",
"self._allowedTypes.append(NumberType()) self._allowedTypes.append(DatetimeType()) self._allowedTypes.append(StringType()) self._allowedTypes.append(DomainObjectType()) self._allowedTypes.append(ListType()) else: self._allowedTypes = allowedTypes self.restrictions[constants.ALLOWED_SUB_TYPES] = list() subValidators",
"values. @type restrictions: C{dict} keys: C{unicode}, C{object} W0142: Here the */** magic is",
"self = self # silent pylint return value class StringType(BasePropertyType): \"\"\" Represents string",
"subtype in self._allowedSubtypes: subValidators.append(subtype.validate) self.restrictions[constants.ALLOWED_SUB_TYPES].append(subtype.name) self._validate = type_validators.ListValidator(minimum, maximum, subValidators) def toPersistenceFormat(self, value):",
"maximum, subValidators) def toPersistenceFormat(self, value): \"\"\" Ensures that the transformation for every list",
"list() for subtype in self._allowedSubtypes: subValidators.append(subtype.validate) self.restrictions[constants.ALLOWED_SUB_TYPES].append(subtype.name) self._validate = type_validators.ListValidator(minimum, maximum, subValidators) def",
"value '%s' from persistence format.\" % repr(persistedValue)) return result def __deepcopy__(self, _): return",
"= allowedSubtypes subValidators = list() for subtype in self._allowedSubtypes: subValidators.append(subtype.validate) self.restrictions[constants.ALLOWED_SUB_TYPES].append(subtype.name) self._validate =",
"'%s'! Got '%s' instead!\" \\ % (fullDottedClassName, cls.__name__)) return cls def _handleImportError(self, reason):",
"Aerospace Center nor the names of # its contributors may be used to",
"EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT #LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS",
"\"\"\" Ensures that the transformation for every supported type is tried. \"\"\" if",
"DatetimeType.name: DatetimeType, ListType.name: ListType, AnyType.name: AnyType} PROPERTY_TYPE_NAMES = _propertyNameClassMap.keys()[:] def createPropertyType(propertyTypeName, restrictions=dict()): \"\"\"",
"\"\"\" Represents numeric values. \"\"\" name = constants.NUMBER_TYPE def __init__(self, minimum=None, maximum=None, minDecimalPlaces=None,",
"# $Authors$ # Last Changed: $Date$ $Committer$ $Revision-Id$ # # Copyright (c) 2003-2011,",
"to the actual instance. \"\"\" if self._cls != value.__class__: raise ValueError(\"The value '%s'",
"to allow access to the properties self.representation = str(theDict) class DomainObjectType(BasePropertyType): \"\"\" Represents",
"L{DatetimeType.__init__<datafinder.core.configuration. properties.validators.type_validators.DatetimeType.__init__>} for details on restriction parameters. \"\"\" BasePropertyType.__init__(self, notNull) self.restrictions[constants.MINIMUM_VALUE] = minimum",
"source code must retain the above copyright # notice, this list of conditions",
"INTERRUPTION) HOWEVER CAUSED AND ON ANY #THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT",
"ListType.name: ListType, AnyType.name: AnyType} PROPERTY_TYPE_NAMES = _propertyNameClassMap.keys()[:] def createPropertyType(propertyTypeName, restrictions=dict()): \"\"\" Factory method",
"\"\"\" Restores the value from the persistence layer format. @raise ValueError: Indicates problems",
"been correctly loaded or not. \"\"\" return self._cls != UnknownDomainObject def _importClass(self, fullDottedClassName):",
"pattern, options, optionsMandatory) class BooleanType(BasePropertyType): \"\"\" Represents a boolean values. \"\"\" name =",
"type. \"\"\" name = constants.ANY_TYPE def __init__(self, allowedTypes=None, notNull=False): \"\"\" Constructor. \"\"\" BasePropertyType.__init__(self,",
"every list item is performed. \"\"\" if not value is None: result =",
"#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE #OF",
"NOT #LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, #DATA,",
"{constants.BOOLEAN_TYPE: [bool], constants.DATETIME_TYPE: [datetime], constants.LIST_TYPE: [list], constants.NUMBER_TYPE: [int, float, Decimal], constants.STRING_TYPE: [str, unicode]}",
"\"\"\" Delegates the validation to the actual instance. \"\"\" if self._cls != value.__class__:",
"\"\"\" if not self._isValid: raise ValueError(\"The domain class could not be found. Please",
"maximum=None, notNull=False): \"\"\" @see L{ListType.__init__<datafinder.core.configuration. properties.validators.type_validators.ListType.__init__>} for details on restriction parameters. \"\"\" BasePropertyType.__init__(self,",
"AnyType(BasePropertyType): \"\"\" Represents an unspecific property type. \"\"\" name = constants.ANY_TYPE def __init__(self,",
"class BooleanType(BasePropertyType): \"\"\" Represents a boolean values. \"\"\" name = constants.BOOLEAN_TYPE def __init__(self,",
"typeDisplayName = typeName break if typeDisplayName is None: typeDisplayName = \\ \"%s.%s\" %",
"self.restrictions[constants.MINIMUM_LENGTH] = minimum self.restrictions[constants.MAXIMUM_LENGTH] = maximum self.restrictions[constants.PATTERN] = pattern self.restrictions[constants.OPTIONS] = options self.restrictions[constants.OPTIONS_MANDATORY]",
"or without # #modification, are permitted provided that the following conditions are #met:",
"value is None: result = None transformationSucceeded = False for subType in self._allowedTypes:",
"ValueError( \"Persisted domain object '%s' does not fit defined domain class '%s'.\" %",
"for concrete validation within a sub class. \"\"\" pass def fromPersistenceFormat(self, persistedValue): \"\"\"",
"in self._allowedSubtypes: subValidators.append(subtype.validate) self.restrictions[constants.ALLOWED_SUB_TYPES].append(subtype.name) self._validate = type_validators.ListValidator(minimum, maximum, subValidators) def toPersistenceFormat(self, value): \"\"\"",
"\"\"\" Represents a object values. \"\"\" name = \"\" # Here you find",
"NO EVENT SHALL THE COPYRIGHT #OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,",
"continue if not transformationSucceeded: raise ValueError(\"Cannot transform value '%s' to persistence format.\" %",
"for property type '%s' are invalid.\" % propertyTypeName) else: return DomainObjectType(propertyTypeName) _typeConstantsPythonTypeMap =",
"ValueError(\"Value must not be None.\") def _validate(self, value): \"\"\" Template method for concrete",
"C{unicode}, C{object} W0142: Here the */** magic is useful to simplify the property",
"notNull): \"\"\" @param notNull: Indicates if a values may be C{None} or not.",
"type allows validation of property values against defined restrictions and performs transformation of",
"dictionary. \"\"\" if not persistedValue is None: if not isinstance(persistedValue, dict): raise ValueError(\"The",
"% (fullDottedClassName, cls.__name__)) return cls def _handleImportError(self, reason): \"\"\" Common procedure to handle",
"toPersistenceFormat(self, value): \"\"\" Ensures that the transformation for every list item is performed.",
"self.restrictions[constants.MAXIMUM_LENGTH], self.notNull) class AnyType(BasePropertyType): \"\"\" Represents an unspecific property type. \"\"\" name =",
"list of primitive values. \"\"\" name = constants.LIST_TYPE def __init__(self, allowedSubtypes=None, minimum=None, maximum=None,",
"self._isValid: return UnknownDomainObject(persistedValue) try: instance = self._cls() except TypeError: raise ValueError(\"Cannot create domain",
"transformationSucceeded = False for subType in self._allowedTypes: try: value = subType.fromPersistenceFormat(persistedValue) subType.validate(value) result",
"(str(value), str(self._cls))) result = dict() try: for _, name, descriptor, subValue in value.walk():",
"value = subType.fromPersistenceFormat(persistedValue) subType.validate(value) result = value transformationSucceeded = True break except ValueError:",
"in binary form must reproduce the above copyright # notice, this list of",
"= list() subValidators = list() for subtype in self._allowedTypes: subValidators.append(subtype.validate) self.restrictions[constants.ALLOWED_SUB_TYPES].append(subtype.name) self.validate =",
"#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY #THEORY OF",
"ValueError: indicates validation errors. \"\"\" if not value is None: self._validate(value) else: if",
"# Copyright (c) 2003-2011, German Aerospace Center (DLR) # All rights reserved. #",
"creation. @param propertyTypeName: Name of the property type. @type propertyTypeName: C{unicode} @param restrictions:",
"datafinder.core.configuration.properties import domain from datafinder.core.configuration.properties.validators import base_validators from datafinder.core.configuration.properties.validators import type_validators from datafinder.core.error",
"value.walk(): result[name] = descriptor.type.toPersistenceFormat(subValue) except AttributeError: raise ValueError(\"The value '%s' is no valid",
"_propertyNameClassMap = {StringType.name: StringType, BooleanType.name: BooleanType, NumberType.name: NumberType, DatetimeType.name: DatetimeType, ListType.name: ListType, AnyType.name:",
"name is returned. @see: L{constants<datafinder.core.configuration.properties.constants>} for property type constants. @param value: Python object.",
"value) return instance _propertyNameClassMap = {StringType.name: StringType, BooleanType.name: BooleanType, NumberType.name: NumberType, DatetimeType.name: DatetimeType,",
"transformationSucceeded = True break except ValueError: continue if not transformationSucceeded: raise ValueError(\"Cannot transform",
"(INCLUDING, BUT NOT #LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF",
"UnknownDomainObject(persistedValue) try: instance = self._cls() except TypeError: raise ValueError(\"Cannot create domain object '%s'",
"C{bool} \"\"\" self.restrictions = dict() self.notNull = notNull def validate(self, value): \"\"\" Performs",
"supported type is tried. \"\"\" if not persistedValue is None: result = None",
"ValueError(\"Invalid property value found: '%s'\" % str(error.args)) def toPersistenceFormat(self, value): \"\"\" Transform the",
"self.restrictions = dict() self.notNull = notNull def validate(self, value): \"\"\" Performs validation of",
"AnyType} PROPERTY_TYPE_NAMES = _propertyNameClassMap.keys()[:] def createPropertyType(propertyTypeName, restrictions=dict()): \"\"\" Factory method for property type",
"dict() try: for _, name, descriptor, subValue in value.walk(): result[name] = descriptor.type.toPersistenceFormat(subValue) except",
"values for the persistence layer. \"\"\" from copy import deepcopy from datetime import",
"def __init__(self, theDict): domain.DomainObject.__init__(self) self.theDict = theDict # Used to allow access to",
"name = constants.LIST_TYPE def __init__(self, allowedSubtypes=None, minimum=None, maximum=None, notNull=False): \"\"\" @see L{ListType.__init__<datafinder.core.configuration. properties.validators.type_validators.ListType.__init__>}",
"if not value is None: result = list() for item in value: transformationSucceeded",
"German Aerospace Center nor the names of # its contributors may be used",
"try: value = descriptor.type.fromPersistenceFormat(persistedValue[name]) except KeyError: raise ValueError( \"Persisted domain object '%s' does",
"import the associated class and raises a configuration error if something goes wrong.",
"if cls.__name__ != className: cls = self._handleImportError(\"Failed to import class '%s'! Got '%s'",
"self._validate = type_validators.StringValidator(minimum, maximum, pattern, options, optionsMandatory) class BooleanType(BasePropertyType): \"\"\" Represents a boolean",
"performed. \"\"\" if not value is None: result = list() for item in",
"parameters. \"\"\" BasePropertyType.__init__(self, notNull) self.restrictions[constants.MINIMUM_LENGTH] = minimum self.restrictions[constants.MAXIMUM_LENGTH] = maximum self.restrictions[constants.ALLOWED_SUB_TYPES] = list()",
"self._allowedSubtypes.append(BooleanType()) self._allowedSubtypes.append(DatetimeType()) self._allowedSubtypes.append(DomainObjectType()) else: self._allowedSubtypes = allowedSubtypes subValidators = list() for subtype in",
"FITNESS FOR #A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT",
"reproduce the above copyright # notice, this list of conditions and the following",
"THE POSSIBILITY OF SUCH DAMAGE. \"\"\" Provides the supported property types. A property",
"constants.BOOLEAN_TYPE def __init__(self, notNull=False): BasePropertyType.__init__(self, notNull) self._validate = type_validators.BooleanValidator() class NumberType(BasePropertyType): \"\"\" Represents",
"determinePropertyTypeConstant(value): \"\"\" Helper function to determine the property type constant of the given",
"self.representation = str(theDict) class DomainObjectType(BasePropertyType): \"\"\" Represents a object values. \"\"\" name =",
"if not transformationSucceeded: raise ValueError(\"Cannot transform value '%s' to persistence format.\" % repr(value))",
"not transformationSucceeded: raise ValueError(\"Cannot transform value '%s' to persistence format.\" % repr(value)) return",
"value class StringType(BasePropertyType): \"\"\" Represents string values. \"\"\" name = constants.STRING_TYPE def __init__(self,",
"return UnknownDomainObject def _validate(self, value): \"\"\" Delegates the validation to the actual instance.",
"class '%s'.\" % (self.name, str(persistedValue))) else: setattr(instance, name, value) return instance _propertyNameClassMap =",
"raise ValueError(\"Cannot restore value '%s' from persistence format.\" % repr(item)) return result def",
"is returned. @see: L{constants<datafinder.core.configuration.properties.constants>} for property type constants. @param value: Python object. @type",
"'%s' is no valid domain object.\" % str(value)) return result def fromPersistenceFormat(self, persistedValue):",
"if something goes wrong. \"\"\" fullDottedModuleName = fullDottedClassName[:fullDottedClassName.rfind(\".\")] className = fullDottedClassName[fullDottedClassName.rfind(\".\") + 1:]",
"for every supported type is tried. \"\"\" if not value is None: result",
"properties.validators.type_validators.StringValidator.__init__>} for details on restriction parameters. \"\"\" BasePropertyType.__init__(self, notNull) self.restrictions[constants.MINIMUM_LENGTH] = minimum self.restrictions[constants.MAXIMUM_LENGTH]",
"minimum=None, maximum=None, minDecimalPlaces=None, maxDecimalPlaces=None, options=None, optionsMandatory=None, notNull=False): \"\"\" @see L{NumberType.__init__<datafinder.core.configuration. properties.validators.type_validators.NumberType.__init__>} for details",
"the domain object from the given dictionary. \"\"\" if not persistedValue is None:",
"disclaimer. # # * Redistributions in binary form must reproduce the above copyright",
"from datafinder.core.configuration.properties import domain from datafinder.core.configuration.properties.validators import base_validators from datafinder.core.configuration.properties.validators import type_validators from",
"# distribution. # # * Neither the name of the German Aerospace Center",
"given value. If the no constant matches the full dotted class name is",
"dotted class name is returned. @see: L{constants<datafinder.core.configuration.properties.constants>} for property type constants. @param value:",
"= domain.DomainProperty(StringType()) def __init__(self, theDict): domain.DomainObject.__init__(self) self.theDict = theDict # Used to allow",
"globals(), dict(), [\"\"]) cls = getattr(moduleInstance, className) except (ImportError, AttributeError, ValueError), error: return",
"_propertyNameClassMap: try: return _propertyNameClassMap[propertyTypeName](**restrictions) except TypeError: raise ConfigurationError(\"Restrictions for property type '%s' are",
"return self._cls != UnknownDomainObject def _importClass(self, fullDottedClassName): \"\"\" Tries to import the associated",
"cls = self._handleImportError(\"Failed to import class '%s'! Got '%s' instead!\" \\ % (fullDottedClassName,",
"notNull) if cls is None: cls = UnknownDomainObject if isinstance(cls, basestring): self.name =",
"Transform the domain object into a dictionary. \"\"\" if not self._isValid: raise ValueError(\"The",
"\"\"\" from copy import deepcopy from datetime import datetime from decimal import Decimal",
"of conditions and the following disclaimer in the # documentation and/or other materials",
"of the value against the defined restrictions. Calls C{_validate} to perform concrete validation.",
"creation. Other approaches would \"blow up\" the code here. \"\"\" # pylint: disable=W0142",
"Full dotted class name (consists of package, module, and class name) or a",
"that the transformation for every supported type is tried. \"\"\" if not persistedValue",
"% repr(item)) return result def fromPersistenceFormat(self, persistedValue): \"\"\" Ensures that the transformation for",
"self._allowedSubtypes.append(NumberType()) self._allowedSubtypes.append(BooleanType()) self._allowedSubtypes.append(DatetimeType()) self._allowedSubtypes.append(DomainObjectType()) else: self._allowedSubtypes = allowedSubtypes subValidators = list() for subtype",
"minimum self.restrictions[constants.MAXIMUM_VALUE] = maximum self.restrictions[constants.MINIMUM_NUMBER_OF_DECIMAL_PLACES] = minDecimalPlaces self.restrictions[constants.MAXIMUM_NUMBER_OF_DECIMAL_PLACES] = maxDecimalPlaces self.restrictions[constants.OPTIONS] = options",
"If the no constant matches the full dotted class name is returned. @see:",
"type_validators from datafinder.core.error import ConfigurationError __version__ = \"$Revision-Id:$\" _log = logging.getLogger() class BasePropertyType(object):",
"validation errors. \"\"\" if not value is None: self._validate(value) else: if self.notNull: raise",
"\"\"\" self = self # silent pylint return value class StringType(BasePropertyType): \"\"\" Represents",
"*/** magic is useful to simplify the property type creation. Other approaches would",
"OF THE POSSIBILITY OF SUCH DAMAGE. \"\"\" Provides the supported property types. A",
"TypeError: raise ValueError(\"Cannot create domain object '%s' using empty constructor.\" % self.name) else:",
"restriction parameters and corresponding values. @type restrictions: C{dict} keys: C{unicode}, C{object} W0142: Here",
"from datafinder.core.error import ConfigurationError __version__ = \"$Revision-Id:$\" _log = logging.getLogger() class BasePropertyType(object): \"\"\"",
"IMPLIED WARRANTIES, INCLUDING, BUT NOT #LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND",
"constants.ANY_TYPE def __init__(self, allowedTypes=None, notNull=False): \"\"\" Constructor. \"\"\" BasePropertyType.__init__(self, notNull) if allowedTypes is",
"= False for subType in self._allowedSubtypes: try: subType.validate(item) result.append(subType.toPersistenceFormat(item)) transformationSucceeded = True break",
"for _, name, descriptor, subValue in value.walk(): result[name] = descriptor.type.toPersistenceFormat(subValue) except AttributeError: raise",
"\"\"\" Helper function to determine the property type constant of the given value.",
"self.name = cls self._cls = self._importClass(cls) else: self.name = \"%s.%s\" % (cls.__module__, cls.__name__)",
"return UnknownDomainObject(persistedValue) try: instance = self._cls() except TypeError: raise ValueError(\"Cannot create domain object",
"the name of the German Aerospace Center nor the names of # its",
"within a sub class. \"\"\" pass def fromPersistenceFormat(self, persistedValue): \"\"\" Restores the value",
"list() for item in value: transformationSucceeded = False for subType in self._allowedSubtypes: try:",
"if self._cls != value.__class__: raise ValueError(\"The value '%s' has not the required type",
"German Aerospace Center (DLR) # All rights reserved. # #Redistribution and use in",
"[int, float, Decimal], constants.STRING_TYPE: [str, unicode]} def determinePropertyTypeConstant(value): \"\"\" Helper function to determine",
"typeName break if typeDisplayName is None: typeDisplayName = \\ \"%s.%s\" % (value.__class__.__module__, value.__class__.__name__)",
"message = \"Cannot import '%s'. Reason: '%s'\" % (self.name, reason) _log.warning(message) return UnknownDomainObject",
"propertyTypeName) else: return DomainObjectType(propertyTypeName) _typeConstantsPythonTypeMap = {constants.BOOLEAN_TYPE: [bool], constants.DATETIME_TYPE: [datetime], constants.LIST_TYPE: [list], constants.NUMBER_TYPE:",
"Constructor. \"\"\" BasePropertyType.__init__(self, notNull) if allowedTypes is None: self._allowedTypes = list() self._allowedTypes.append(BooleanType()) self._allowedTypes.append(NumberType())",
"constants.LIST_TYPE: [list], constants.NUMBER_TYPE: [int, float, Decimal], constants.STRING_TYPE: [str, unicode]} def determinePropertyTypeConstant(value): \"\"\" Helper",
"DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT #OWNER OR CONTRIBUTORS BE LIABLE FOR",
"C{dict} keys: C{unicode}, C{object} W0142: Here the */** magic is useful to simplify",
"on restriction parameters. \"\"\" BasePropertyType.__init__(self, notNull) self.restrictions[constants.MINIMUM_LENGTH] = minimum self.restrictions[constants.MAXIMUM_LENGTH] = maximum self.restrictions[constants.PATTERN]",
"raise ValueError(\"Cannot validate property value. Reason '%s'\" % str(error.args)) except ValueError, error: raise",
"#met: # # * Redistributions of source code must retain the above copyright",
"raise ValueError(\"Cannot transform value '%s' to persistence format.\" % repr(value)) return result def",
"Changed: $Date$ $Committer$ $Revision-Id$ # # Copyright (c) 2003-2011, German Aerospace Center (DLR)",
"value: transformationSucceeded = False for subType in self._allowedSubtypes: try: subType.validate(item) result.append(subType.toPersistenceFormat(item)) transformationSucceeded =",
"found. Please \" \\ + \"correct the configuration.\") if not value is None:",
"C{unicode} @param restrictions: Map of restriction parameters and corresponding values. @type restrictions: C{dict}",
"$Authors$ # Last Changed: $Date$ $Committer$ $Revision-Id$ # # Copyright (c) 2003-2011, German",
"the actual instance. \"\"\" if self._cls != value.__class__: raise ValueError(\"The value '%s' has",
"notNull) self._validate = type_validators.BooleanValidator() class NumberType(BasePropertyType): \"\"\" Represents numeric values. \"\"\" name =",
"repr(item)) return result def fromPersistenceFormat(self, persistedValue): \"\"\" Ensures that the transformation for every",
"value '%s' is no dictionary.\" % str(persistedValue)) if not self._isValid: return UnknownDomainObject(persistedValue) try:",
"type_validators.BooleanValidator() class NumberType(BasePropertyType): \"\"\" Represents numeric values. \"\"\" name = constants.NUMBER_TYPE def __init__(self,",
"str(theDict) class DomainObjectType(BasePropertyType): \"\"\" Represents a object values. \"\"\" name = \"\" #",
"WARRANTIES OF MERCHANTABILITY AND FITNESS FOR #A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO",
"2003-2011, German Aerospace Center (DLR) # All rights reserved. # #Redistribution and use",
"in self._allowedTypes: subValidators.append(subtype.validate) self.restrictions[constants.ALLOWED_SUB_TYPES].append(subtype.name) self.validate = base_validators.OrValidator(subValidators) def toPersistenceFormat(self, value): \"\"\" Ensures that",
"str(persistedValue)) if not self._isValid: return UnknownDomainObject(persistedValue) try: instance = self._cls() except TypeError: raise",
"format.\" % repr(value)) return result def fromPersistenceFormat(self, persistedValue): \"\"\" Ensures that the transformation",
"COPYRIGHT #OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, #SPECIAL, EXEMPLARY,",
"self.notNull) class AnyType(BasePropertyType): \"\"\" Represents an unspecific property type. \"\"\" name = constants.ANY_TYPE",
"Transforms the value to the persistence layer format. @raise ValueError: Indicates problems during",
"the persistence layer. \"\"\" from copy import deepcopy from datetime import datetime from",
"SERVICES; LOSS OF USE, #DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND",
"this software without specific prior written permission. # #THIS SOFTWARE IS PROVIDED BY",
"@return: Property type constant. @rtype: C{string} \"\"\" typeDisplayName = None valueType = type(value)",
"USE, #DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY #THEORY",
"class NumberType(BasePropertyType): \"\"\" Represents numeric values. \"\"\" name = constants.NUMBER_TYPE def __init__(self, minimum=None,",
"AnyType.name: AnyType} PROPERTY_TYPE_NAMES = _propertyNameClassMap.keys()[:] def createPropertyType(propertyTypeName, restrictions=dict()): \"\"\" Factory method for property",
"type creation. @param propertyTypeName: Name of the property type. @type propertyTypeName: C{unicode} @param",
"in value.walk(): result[name] = descriptor.type.toPersistenceFormat(subValue) except AttributeError: raise ValueError(\"The value '%s' is no",
"tried. \"\"\" if not persistedValue is None: result = None transformationSucceeded = False",
"not the required type '%s'.\" \\ % (str(value), str(self._cls))) result = dict() try:",
"_propertyNameClassMap[propertyTypeName](**restrictions) except TypeError: raise ConfigurationError(\"Restrictions for property type '%s' are invalid.\" % propertyTypeName)",
"else: return DomainObjectType(propertyTypeName) _typeConstantsPythonTypeMap = {constants.BOOLEAN_TYPE: [bool], constants.DATETIME_TYPE: [datetime], constants.LIST_TYPE: [list], constants.NUMBER_TYPE: [int,",
"list() subValidators = list() for subtype in self._allowedTypes: subValidators.append(subtype.validate) self.restrictions[constants.ALLOWED_SUB_TYPES].append(subtype.name) self.validate = base_validators.OrValidator(subValidators)",
"ValueError), error: return self._handleImportError(str(error.args)) if cls.__name__ != className: cls = self._handleImportError(\"Failed to import",
"pass def fromPersistenceFormat(self, persistedValue): \"\"\" Restores the value from the persistence layer format.",
"@property def _isValid(self): \"\"\" Indicates whether the domain class has been correctly loaded",
"\"\" # Here you find the concrete class identifier after initialization def __init__(self,",
"L{ListType.__init__<datafinder.core.configuration. properties.validators.type_validators.ListType.__init__>} for details on restriction parameters. \"\"\" BasePropertyType.__init__(self, notNull) self.restrictions[constants.MINIMUM_LENGTH] = minimum",
"for subtype in self._allowedSubtypes: subValidators.append(subtype.validate) self.restrictions[constants.ALLOWED_SUB_TYPES].append(subtype.name) self._validate = type_validators.ListValidator(minimum, maximum, subValidators) def toPersistenceFormat(self,",
"\"\"\" BasePropertyType.__init__(self, notNull) if allowedTypes is None: self._allowedTypes = list() self._allowedTypes.append(BooleanType()) self._allowedTypes.append(NumberType()) self._allowedTypes.append(DatetimeType())",
"from datafinder.core.configuration.properties.validators import type_validators from datafinder.core.error import ConfigurationError __version__ = \"$Revision-Id:$\" _log =",
"'%s' is no dictionary.\" % str(persistedValue)) if not self._isValid: return UnknownDomainObject(persistedValue) try: instance",
"type '%s'.\" \\ % (str(value), str(self._cls))) result = dict() try: for _, name,",
"and corresponding values. @type restrictions: C{dict} keys: C{unicode}, C{object} W0142: Here the */**",
"from decimal import Decimal import logging from datafinder.core.configuration.properties import constants from datafinder.core.configuration.properties import",
"= minDecimalPlaces self.restrictions[constants.MAXIMUM_NUMBER_OF_DECIMAL_PLACES] = maxDecimalPlaces self.restrictions[constants.OPTIONS] = options self.restrictions[constants.OPTIONS_MANDATORY] = optionsMandatory self._validate =",
"ConfigurationError __version__ = \"$Revision-Id:$\" _log = logging.getLogger() class BasePropertyType(object): \"\"\" Base class for",
"\"correct the configuration.\") if not value is None: if self._cls != value.__class__: raise",
"the above copyright # notice, this list of conditions and the following disclaimer.",
"availableTypes in _typeConstantsPythonTypeMap.iteritems(): if valueType in availableTypes: typeDisplayName = typeName break if typeDisplayName",
"be found. Please \" \\ + \"correct the configuration.\") if not value is",
"is performed. \"\"\" if not persistedValue is None: result = list() for item",
"properties.validators.type_validators.ListType.__init__>} for details on restriction parameters. \"\"\" BasePropertyType.__init__(self, notNull) self.restrictions[constants.MINIMUM_LENGTH] = minimum self.restrictions[constants.MAXIMUM_LENGTH]",
"raise ValueError(\"Cannot restore value '%s' from persistence format.\" % repr(persistedValue)) return result def",
"\"Persisted domain object '%s' does not fit defined domain class '%s'.\" % (self.name,",
"names of # its contributors may be used to endorse or promote products",
"StringType, BooleanType.name: BooleanType, NumberType.name: NumberType, DatetimeType.name: DatetimeType, ListType.name: ListType, AnyType.name: AnyType} PROPERTY_TYPE_NAMES =",
"value. Reason '%s'\" % str(error.args)) except ValueError, error: raise ValueError(\"Invalid property value found:",
"restriction parameters. \"\"\" BasePropertyType.__init__(self, notNull) self.restrictions[constants.MINIMUM_LENGTH] = minimum self.restrictions[constants.MAXIMUM_LENGTH] = maximum self.restrictions[constants.ALLOWED_SUB_TYPES] =",
"None: result = None transformationSucceeded = False for subType in self._allowedTypes: try: subType.validate(value)",
"indicates validation errors. \"\"\" if not value is None: self._validate(value) else: if self.notNull:",
"HOWEVER CAUSED AND ON ANY #THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,",
"property types. \"\"\" name = \"\" def __init__(self, notNull): \"\"\" @param notNull: Indicates",
"= theDict # Used to allow access to the properties self.representation = str(theDict)",
"the value to the persistence layer format. @raise ValueError: Indicates problems during value",
"persistence format.\" % repr(persistedValue)) return result def __deepcopy__(self, _): return AnyType(deepcopy(self._allowedTypes), self.notNull) class",
"every supported type is tried. \"\"\" if not persistedValue is None: result =",
"property type constants. @param value: Python object. @type value: C{object} @return: Property type",
"the configuration.\") if not value is None: if self._cls != value.__class__: raise ValueError(\"The",
"L{constants<datafinder.core.configuration.properties.constants>} for property type constants. @param value: Python object. @type value: C{object} @return:",
"__deepcopy__(self, _): return AnyType(deepcopy(self._allowedTypes), self.notNull) class UnknownDomainObject(domain.DomainObject): \"\"\" Used to represent values of",
"persistedValue def toPersistenceFormat(self, value): \"\"\" Transforms the value to the persistence layer format.",
"self._validate(value) else: if self.notNull: raise ValueError(\"Value must not be None.\") def _validate(self, value):",
"None transformationSucceeded = False for subType in self._allowedTypes: try: subType.validate(value) result = subType.toPersistenceFormat(value)",
"OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, #SPECIAL, EXEMPLARY, OR CONSEQUENTIAL",
"= cls self._cls = self._importClass(cls) else: self.name = \"%s.%s\" % (cls.__module__, cls.__name__) self._cls",
"transformationSucceeded = False for subType in self._allowedSubtypes: try: value = subType.fromPersistenceFormat(item) subType.validate(value) result.append(value)",
"= options self.restrictions[constants.OPTIONS_MANDATORY] = optionsMandatory self._validate = type_validators.StringValidator(minimum, maximum, pattern, options, optionsMandatory) class",
"access to the properties self.representation = str(theDict) class DomainObjectType(BasePropertyType): \"\"\" Represents a object",
"None: self._allowedSubtypes = list() self._allowedSubtypes.append(StringType()) self._allowedSubtypes.append(NumberType()) self._allowedSubtypes.append(BooleanType()) self._allowedSubtypes.append(DatetimeType()) self._allowedSubtypes.append(DomainObjectType()) else: self._allowedSubtypes = allowedSubtypes",
"#OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, #SPECIAL, EXEMPLARY, OR",
"or class object \"\"\" BasePropertyType.__init__(self, notNull) if cls is None: cls = UnknownDomainObject",
"\"\"\" message = \"Cannot import '%s'. Reason: '%s'\" % (self.name, reason) _log.warning(message) return",
"specific prior written permission. # #THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS",
"\"\"\" if not persistedValue is None: result = None transformationSucceeded = False for",
"result.append(subType.toPersistenceFormat(item)) transformationSucceeded = True break except ValueError: continue if not transformationSucceeded: raise ValueError(\"Cannot",
"optionsMandatory self._validate = type_validators.StringValidator(minimum, maximum, pattern, options, optionsMandatory) class BooleanType(BasePropertyType): \"\"\" Represents a",
"type_validators.ListValidator(minimum, maximum, subValidators) def toPersistenceFormat(self, value): \"\"\" Ensures that the transformation for every",
"item is performed. \"\"\" if not value is None: result = list() for",
"def _importClass(self, fullDottedClassName): \"\"\" Tries to import the associated class and raises a",
"on restriction parameters. \"\"\" BasePropertyType.__init__(self, notNull) self.restrictions[constants.MINIMUM_VALUE] = minimum self.restrictions[constants.MAXIMUM_VALUE] = maximum self.restrictions[constants.OPTIONS]",
"ValueError, error: raise ValueError(\"Invalid property value found: '%s'\" % str(error.args)) def toPersistenceFormat(self, value):",
"AND ON ANY #THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT",
"optionsMandatory self._validate = type_validators.DatetimeValidator(minimum, maximum, options, optionsMandatory) class ListType(BasePropertyType): \"\"\" Represents list of",
"% (cls.__module__, cls.__name__) self._cls = cls @property def _isValid(self): \"\"\" Indicates whether the",
"ANY WAY OUT OF THE USE #OF THIS SOFTWARE, EVEN IF ADVISED OF",
"(self.name, reason) _log.warning(message) return UnknownDomainObject def _validate(self, value): \"\"\" Delegates the validation to",
"% (str(value), str(self._cls))) result = dict() try: for _, name, descriptor, subValue in",
"minDecimalPlaces=None, maxDecimalPlaces=None, options=None, optionsMandatory=None, notNull=False): \"\"\" @see L{NumberType.__init__<datafinder.core.configuration. properties.validators.type_validators.NumberType.__init__>} for details on restriction",
"self._allowedTypes: subValidators.append(subtype.validate) self.restrictions[constants.ALLOWED_SUB_TYPES].append(subtype.name) self.validate = base_validators.OrValidator(subValidators) def toPersistenceFormat(self, value): \"\"\" Ensures that the",
"transformationSucceeded: raise ValueError(\"Cannot restore value '%s' from persistence format.\" % repr(item)) return result",
"descriptor.type.toPersistenceFormat(subValue) except AttributeError: raise ValueError(\"The value '%s' is no valid domain object.\" %",
"the */** magic is useful to simplify the property type creation. Other approaches",
"__init__(self, allowedTypes=None, notNull=False): \"\"\" Constructor. \"\"\" BasePropertyType.__init__(self, notNull) if allowedTypes is None: self._allowedTypes",
"the # distribution. # # * Neither the name of the German Aerospace",
"DAMAGE. \"\"\" Provides the supported property types. A property type allows validation of",
"instance.walk(): try: value = descriptor.type.fromPersistenceFormat(persistedValue[name]) except KeyError: raise ValueError( \"Persisted domain object '%s'",
"to have a nice representation of the dictionary representation = domain.DomainProperty(StringType()) def __init__(self,",
"that the transformation for every supported type is tried. \"\"\" if not value",
"for every list item is performed. \"\"\" if not persistedValue is None: result",
"from persistence format.\" % repr(persistedValue)) return result def __deepcopy__(self, _): return AnyType(deepcopy(self._allowedTypes), self.notNull)",
"persistence layer format. @raise ValueError: Indicates problems during value transformation. \"\"\" self =",
"= str(theDict) class DomainObjectType(BasePropertyType): \"\"\" Represents a object values. \"\"\" name = \"\"",
"None: if self._cls != value.__class__: raise ValueError(\"The value '%s' has not the required",
"concrete validation within a sub class. \"\"\" pass def fromPersistenceFormat(self, persistedValue): \"\"\" Restores",
"notNull=False): \"\"\" Constructor. @param cls: Full dotted class name (consists of package, module,",
"restore value '%s' from persistence format.\" % repr(persistedValue)) return result def __deepcopy__(self, _):",
"break if typeDisplayName is None: typeDisplayName = \\ \"%s.%s\" % (value.__class__.__module__, value.__class__.__name__) return",
"and binary forms, with or without # #modification, are permitted provided that the",
"@raise ValueError: indicates validation errors. \"\"\" if not value is None: self._validate(value) else:",
"if not self._isValid: return UnknownDomainObject(persistedValue) try: instance = self._cls() except TypeError: raise ValueError(\"Cannot",
"object into a dictionary. \"\"\" if not self._isValid: raise ValueError(\"The domain class could",
"to the persistence layer format. @raise ValueError: Indicates problems during value transformation. \"\"\"",
"ValueError(\"Cannot restore value '%s' from persistence format.\" % repr(persistedValue)) return result def __deepcopy__(self,",
"__init__(self, theDict): domain.DomainObject.__init__(self) self.theDict = theDict # Used to allow access to the",
"Reason '%s'\" % str(error.args)) except ValueError, error: raise ValueError(\"Invalid property value found: '%s'\"",
"Indicates problems during value transformation. \"\"\" self = self # silent pylint return",
"with or without # #modification, are permitted provided that the following conditions are",
"conditions and the following disclaimer. # # * Redistributions in binary form must",
"_handleImportError(self, reason): \"\"\" Common procedure to handle failed domain object imports. \"\"\" message",
"BasePropertyType.__init__(self, notNull) self.restrictions[constants.MINIMUM_VALUE] = minimum self.restrictions[constants.MAXIMUM_VALUE] = maximum self.restrictions[constants.OPTIONS] = options self.restrictions[constants.OPTIONS_MANDATORY] =",
"toPersistenceFormat(self, value): \"\"\" Ensures that the transformation for every supported type is tried.",
"Redistributions in binary form must reproduce the above copyright # notice, this list",
"self.restrictions[constants.OPTIONS] = options self.restrictions[constants.OPTIONS_MANDATORY] = optionsMandatory self._validate = type_validators.DatetimeValidator(minimum, maximum, options, optionsMandatory) class",
"datafinder.core.configuration.properties.validators import base_validators from datafinder.core.configuration.properties.validators import type_validators from datafinder.core.error import ConfigurationError __version__ =",
"result = value transformationSucceeded = True break except ValueError: continue if not transformationSucceeded:",
"Represents a object values. \"\"\" name = \"\" # Here you find the",
"the given value. If the no constant matches the full dotted class name",
"fullDottedClassName[fullDottedClassName.rfind(\".\") + 1:] try: moduleInstance = __import__(fullDottedModuleName, globals(), dict(), [\"\"]) cls = getattr(moduleInstance,",
"DAMAGES (INCLUDING, BUT NOT #LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS",
"ValueError(\"The domain class could not be found. Please \" \\ + \"correct the",
"None: cls = UnknownDomainObject if isinstance(cls, basestring): self.name = cls self._cls = self._importClass(cls)",
"every list item is performed. \"\"\" if not persistedValue is None: result =",
"Center nor the names of # its contributors may be used to endorse",
"= maxDecimalPlaces self.restrictions[constants.OPTIONS] = options self.restrictions[constants.OPTIONS_MANDATORY] = optionsMandatory self._validate = type_validators.NumberValidator(minimum, maximum, minDecimalPlaces,",
"cls.__name__)) return cls def _handleImportError(self, reason): \"\"\" Common procedure to handle failed domain",
"value '%s' to persistence format.\" % repr(item)) return result def fromPersistenceFormat(self, persistedValue): \"\"\"",
"value '%s' has not the required type '%s'.\" \\ % (str(value), str(self._cls))) result",
"during value transformation. \"\"\" self = self # silent pylint return value class",
"the property type constant of the given value. If the no constant matches",
"= list() if allowedSubtypes is None: self._allowedSubtypes = list() self._allowedSubtypes.append(StringType()) self._allowedSubtypes.append(NumberType()) self._allowedSubtypes.append(BooleanType()) self._allowedSubtypes.append(DatetimeType())",
"COPYRIGHT HOLDERS AND CONTRIBUTORS # \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES,",
"of property values against defined restrictions and performs transformation of values for the",
"optionsMandatory self._validate = type_validators.NumberValidator(minimum, maximum, minDecimalPlaces, maxDecimalPlaces, options, optionsMandatory) class DatetimeType(BasePropertyType): \"\"\" Represents",
"_typeConstantsPythonTypeMap = {constants.BOOLEAN_TYPE: [bool], constants.DATETIME_TYPE: [datetime], constants.LIST_TYPE: [list], constants.NUMBER_TYPE: [int, float, Decimal], constants.STRING_TYPE:",
"try: for _, name, descriptor, subValue in value.walk(): result[name] = descriptor.type.toPersistenceFormat(subValue) except AttributeError:",
"= logging.getLogger() class BasePropertyType(object): \"\"\" Base class for all property types. \"\"\" name",
"ARISING IN ANY WAY OUT OF THE USE #OF THIS SOFTWARE, EVEN IF",
"self._allowedTypes: try: value = subType.fromPersistenceFormat(persistedValue) subType.validate(value) result = value transformationSucceeded = True break",
"transformation. \"\"\" self = self # silent pylint return value class StringType(BasePropertyType): \"\"\"",
"def toPersistenceFormat(self, value): \"\"\" Ensures that the transformation for every list item is",
"# * Redistributions in binary form must reproduce the above copyright # notice,",
"defined domain class '%s'.\" % (self.name, str(persistedValue))) else: setattr(instance, name, value) return instance",
"class name (consists of package, module, and class name) or a class object.",
"PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT #OWNER OR CONTRIBUTORS BE",
"str(persistedValue))) else: setattr(instance, name, value) return instance _propertyNameClassMap = {StringType.name: StringType, BooleanType.name: BooleanType,",
"else: self._allowedSubtypes = allowedSubtypes subValidators = list() for subtype in self._allowedSubtypes: subValidators.append(subtype.validate) self.restrictions[constants.ALLOWED_SUB_TYPES].append(subtype.name)",
"instance _propertyNameClassMap = {StringType.name: StringType, BooleanType.name: BooleanType, NumberType.name: NumberType, DatetimeType.name: DatetimeType, ListType.name: ListType,",
"% str(error.args)) except ValueError, error: raise ValueError(\"Invalid property value found: '%s'\" % str(error.args))",
"\"\"\" Transform the domain object into a dictionary. \"\"\" if not self._isValid: raise",
"def __init__(self, minimum=None, maximum=None, minDecimalPlaces=None, maxDecimalPlaces=None, options=None, optionsMandatory=None, notNull=False): \"\"\" @see L{NumberType.__init__<datafinder.core.configuration. properties.validators.type_validators.NumberType.__init__>}",
"found: '%s'\" % str(error.args)) def toPersistenceFormat(self, value): \"\"\" Transform the domain object into",
"Constructor. @param cls: Full dotted class name (consists of package, module, and class",
"from this software without specific prior written permission. # #THIS SOFTWARE IS PROVIDED",
"constants from datafinder.core.configuration.properties import domain from datafinder.core.configuration.properties.validators import base_validators from datafinder.core.configuration.properties.validators import type_validators",
"try: instance = self._cls() except TypeError: raise ValueError(\"Cannot create domain object '%s' using",
"#modification, are permitted provided that the following conditions are #met: # # *",
"name of the German Aerospace Center nor the names of # its contributors",
"self.theDict = theDict # Used to allow access to the properties self.representation =",
"property type allows validation of property values against defined restrictions and performs transformation",
"str(error.args)) def toPersistenceFormat(self, value): \"\"\" Transform the domain object into a dictionary. \"\"\"",
"# * Neither the name of the German Aerospace Center nor the names",
"name = \"\" # Here you find the concrete class identifier after initialization",
"minDecimalPlaces self.restrictions[constants.MAXIMUM_NUMBER_OF_DECIMAL_PLACES] = maxDecimalPlaces self.restrictions[constants.OPTIONS] = options self.restrictions[constants.OPTIONS_MANDATORY] = optionsMandatory self._validate = type_validators.NumberValidator(minimum,",
"values. \"\"\" name = constants.DATETIME_TYPE def __init__(self, minimum=None, maximum=None, options=None, optionsMandatory=None, notNull=False): \"\"\"",
"Calls C{_validate} to perform concrete validation. @raise ValueError: indicates validation errors. \"\"\" if",
"= dict() self.notNull = notNull def validate(self, value): \"\"\" Performs validation of the",
"parameters. \"\"\" BasePropertyType.__init__(self, notNull) self.restrictions[constants.MINIMUM_LENGTH] = minimum self.restrictions[constants.MAXIMUM_LENGTH] = maximum self.restrictions[constants.PATTERN] = pattern",
"details on restriction parameters. \"\"\" BasePropertyType.__init__(self, notNull) self.restrictions[constants.MINIMUM_VALUE] = minimum self.restrictions[constants.MAXIMUM_VALUE] = maximum",
"not be None.\") def _validate(self, value): \"\"\" Template method for concrete validation within",
"is None: cls = UnknownDomainObject if isinstance(cls, basestring): self.name = cls self._cls =",
"[list], constants.NUMBER_TYPE: [int, float, Decimal], constants.STRING_TYPE: [str, unicode]} def determinePropertyTypeConstant(value): \"\"\" Helper function",
"AttributeError, error: raise ValueError(\"Cannot validate property value. Reason '%s'\" % str(error.args)) except ValueError,",
"\"\"\" Indicates whether the domain class has been correctly loaded or not. \"\"\"",
"= dict() try: for _, name, descriptor, subValue in value.walk(): result[name] = descriptor.type.toPersistenceFormat(subValue)",
"is None: if not isinstance(persistedValue, dict): raise ValueError(\"The persisted value '%s' is no",
"PROPERTY_TYPE_NAMES = _propertyNameClassMap.keys()[:] def createPropertyType(propertyTypeName, restrictions=dict()): \"\"\" Factory method for property type creation.",
"performed. \"\"\" if not persistedValue is None: result = list() for item in",
"= subType.fromPersistenceFormat(persistedValue) subType.validate(value) result = value transformationSucceeded = True break except ValueError: continue",
"\"\"\" if not persistedValue is None: if not isinstance(persistedValue, dict): raise ValueError(\"The persisted",
"self.restrictions[constants.OPTIONS_MANDATORY] = optionsMandatory self._validate = type_validators.NumberValidator(minimum, maximum, minDecimalPlaces, maxDecimalPlaces, options, optionsMandatory) class DatetimeType(BasePropertyType):",
"constants.NUMBER_TYPE: [int, float, Decimal], constants.STRING_TYPE: [str, unicode]} def determinePropertyTypeConstant(value): \"\"\" Helper function to",
"defined restrictions. Calls C{_validate} to perform concrete validation. @raise ValueError: indicates validation errors.",
"Name of the property type. @type propertyTypeName: C{unicode} @param restrictions: Map of restriction",
"self.restrictions[constants.MAXIMUM_VALUE] = maximum self.restrictions[constants.MINIMUM_NUMBER_OF_DECIMAL_PLACES] = minDecimalPlaces self.restrictions[constants.MAXIMUM_NUMBER_OF_DECIMAL_PLACES] = maxDecimalPlaces self.restrictions[constants.OPTIONS] = options self.restrictions[constants.OPTIONS_MANDATORY]",
"required type '%s'.\" \\ % (str(value), str(self._cls))) result = dict() try: for _,",
"optionsMandatory) class BooleanType(BasePropertyType): \"\"\" Represents a boolean values. \"\"\" name = constants.BOOLEAN_TYPE def",
"try: moduleInstance = __import__(fullDottedModuleName, globals(), dict(), [\"\"]) cls = getattr(moduleInstance, className) except (ImportError,",
"@type value: C{object} @return: Property type constant. @rtype: C{string} \"\"\" typeDisplayName = None",
"(fullDottedClassName, cls.__name__)) return cls def _handleImportError(self, reason): \"\"\" Common procedure to handle failed",
"using empty constructor.\" % self.name) else: for instance, name, descriptor, value in instance.walk():",
"you find the concrete class identifier after initialization def __init__(self, cls=None, notNull=False): \"\"\"",
"value = descriptor.type.fromPersistenceFormat(persistedValue[name]) except KeyError: raise ValueError( \"Persisted domain object '%s' does not",
"notNull=False): BasePropertyType.__init__(self, notNull) self._validate = type_validators.BooleanValidator() class NumberType(BasePropertyType): \"\"\" Represents numeric values. \"\"\"",
"Redistributions of source code must retain the above copyright # notice, this list",
"#LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, #DATA, OR",
"__init__(self, minimum=None, maximum=None, options=None, optionsMandatory=None, notNull=False): \"\"\" @see L{DatetimeType.__init__<datafinder.core.configuration. properties.validators.type_validators.DatetimeType.__init__>} for details on",
"self._handleImportError(str(error.args)) if cls.__name__ != className: cls = self._handleImportError(\"Failed to import class '%s'! Got",
"fromPersistenceFormat(self, persistedValue): \"\"\" Restores the value from the persistence layer format. @raise ValueError:",
"def fromPersistenceFormat(self, persistedValue): \"\"\" Ensures that the transformation for every list item is",
"value): \"\"\" Delegates the validation to the actual instance. \"\"\" if self._cls !=",
"boolean values. \"\"\" name = constants.BOOLEAN_TYPE def __init__(self, notNull=False): BasePropertyType.__init__(self, notNull) self._validate =",
"the transformation for every list item is performed. \"\"\" if not value is",
"ConfigurationError(\"Restrictions for property type '%s' are invalid.\" % propertyTypeName) else: return DomainObjectType(propertyTypeName) _typeConstantsPythonTypeMap",
"restrictions: C{dict} keys: C{unicode}, C{object} W0142: Here the */** magic is useful to",
"maxDecimalPlaces self.restrictions[constants.OPTIONS] = options self.restrictions[constants.OPTIONS_MANDATORY] = optionsMandatory self._validate = type_validators.NumberValidator(minimum, maximum, minDecimalPlaces, maxDecimalPlaces,",
"the above copyright # notice, this list of conditions and the following disclaimer",
"without specific prior written permission. # #THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT",
"value is None: result = list() for item in value: transformationSucceeded = False",
"def __init__(self, allowedTypes=None, notNull=False): \"\"\" Constructor. \"\"\" BasePropertyType.__init__(self, notNull) if allowedTypes is None:",
"\"\"\" Common procedure to handle failed domain object imports. \"\"\" message = \"Cannot",
"\"\"\" name = \"\" # Here you find the concrete class identifier after",
"concrete validation. @raise ValueError: indicates validation errors. \"\"\" if not value is None:",
"are permitted provided that the following conditions are #met: # # * Redistributions",
"loaded or not. \"\"\" return self._cls != UnknownDomainObject def _importClass(self, fullDottedClassName): \"\"\" Tries",
"not. @type notNull: C{bool} \"\"\" self.restrictions = dict() self.notNull = notNull def validate(self,",
"= list() self._allowedSubtypes.append(StringType()) self._allowedSubtypes.append(NumberType()) self._allowedSubtypes.append(BooleanType()) self._allowedSubtypes.append(DatetimeType()) self._allowedSubtypes.append(DomainObjectType()) else: self._allowedSubtypes = allowedSubtypes subValidators =",
"% propertyTypeName) else: return DomainObjectType(propertyTypeName) _typeConstantsPythonTypeMap = {constants.BOOLEAN_TYPE: [bool], constants.DATETIME_TYPE: [datetime], constants.LIST_TYPE: [list],",
"in _typeConstantsPythonTypeMap.iteritems(): if valueType in availableTypes: typeDisplayName = typeName break if typeDisplayName is",
"= subType.fromPersistenceFormat(item) subType.validate(value) result.append(value) transformationSucceeded = True break except ValueError: continue if not",
"rights reserved. # #Redistribution and use in source and binary forms, with or",
"list item is performed. \"\"\" if not persistedValue is None: result = list()",
"could not be found. Please \" \\ + \"correct the configuration.\") if not",
"self.restrictions[constants.MINIMUM_LENGTH] = minimum self.restrictions[constants.MAXIMUM_LENGTH] = maximum self.restrictions[constants.ALLOWED_SUB_TYPES] = list() if allowedSubtypes is None:",
"raise ValueError(\"Invalid property value found: '%s'\" % str(error.args)) def toPersistenceFormat(self, value): \"\"\" Transform",
"and the following disclaimer in the # documentation and/or other materials provided with",
"type is tried. \"\"\" if not value is None: result = None transformationSucceeded",
"POSSIBILITY OF SUCH DAMAGE. \"\"\" Provides the supported property types. A property type",
"provided with the # distribution. # # * Neither the name of the",
"self._allowedTypes.append(BooleanType()) self._allowedTypes.append(NumberType()) self._allowedTypes.append(DatetimeType()) self._allowedTypes.append(StringType()) self._allowedTypes.append(DomainObjectType()) self._allowedTypes.append(ListType()) else: self._allowedTypes = allowedTypes self.restrictions[constants.ALLOWED_SUB_TYPES] = list()",
"to persistence format.\" % repr(value)) return result def fromPersistenceFormat(self, persistedValue): \"\"\" Ensures that",
"value.validate() except AttributeError, error: raise ValueError(\"Cannot validate property value. Reason '%s'\" % str(error.args))",
"layer format. @raise ValueError: Indicates problems during value transformation. \"\"\" self = self",
"= self._importClass(cls) else: self.name = \"%s.%s\" % (cls.__module__, cls.__name__) self._cls = cls @property",
"has not the required type '%s'.\" \\ % (str(value), str(self._cls))) result = dict()",
"raise ValueError(\"Value must not be None.\") def _validate(self, value): \"\"\" Template method for",
"up\" the code here. \"\"\" # pylint: disable=W0142 if propertyTypeName in _propertyNameClassMap: try:",
"in _propertyNameClassMap: try: return _propertyNameClassMap[propertyTypeName](**restrictions) except TypeError: raise ConfigurationError(\"Restrictions for property type '%s'",
"a configuration error if something goes wrong. \"\"\" fullDottedModuleName = fullDottedClassName[:fullDottedClassName.rfind(\".\")] className =",
"UnknownDomainObject(domain.DomainObject): \"\"\" Used to represent values of domain object types whose class could",
"_validate(self, value): \"\"\" Template method for concrete validation within a sub class. \"\"\"",
"propertyTypeName in _propertyNameClassMap: try: return _propertyNameClassMap[propertyTypeName](**restrictions) except TypeError: raise ConfigurationError(\"Restrictions for property type",
"BasePropertyType(object): \"\"\" Base class for all property types. \"\"\" name = \"\" def",
"optionsMandatory=None, notNull=False): \"\"\" @see L{StringValidator.__init__<datafinder.core.configuration. properties.validators.type_validators.StringValidator.__init__>} for details on restriction parameters. \"\"\" BasePropertyType.__init__(self,",
"performs transformation of values for the persistence layer. \"\"\" from copy import deepcopy",
"not transformationSucceeded: raise ValueError(\"Cannot restore value '%s' from persistence format.\" % repr(persistedValue)) return",
"subValidators) def toPersistenceFormat(self, value): \"\"\" Ensures that the transformation for every list item",
"TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR #A PARTICULAR PURPOSE ARE",
"= \"Cannot import '%s'. Reason: '%s'\" % (self.name, reason) _log.warning(message) return UnknownDomainObject def",
"property type. \"\"\" name = constants.ANY_TYPE def __init__(self, allowedTypes=None, notNull=False): \"\"\" Constructor. \"\"\"",
"USE #OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.",
"# from this software without specific prior written permission. # #THIS SOFTWARE IS",
"return DomainObjectType(propertyTypeName) _typeConstantsPythonTypeMap = {constants.BOOLEAN_TYPE: [bool], constants.DATETIME_TYPE: [datetime], constants.LIST_TYPE: [list], constants.NUMBER_TYPE: [int, float,",
"result = subType.toPersistenceFormat(value) transformationSucceeded = True break except ValueError: continue if not transformationSucceeded:",
"# #modification, are permitted provided that the following conditions are #met: # #",
"from datafinder.core.configuration.properties import constants from datafinder.core.configuration.properties import domain from datafinder.core.configuration.properties.validators import base_validators from",
"Tries to import the associated class and raises a configuration error if something",
"def __init__(self, notNull): \"\"\" @param notNull: Indicates if a values may be C{None}",
"persistedValue): \"\"\" Ensures that the transformation for every list item is performed. \"\"\"",
"return result def fromPersistenceFormat(self, persistedValue): \"\"\" Restores the domain object from the given",
"following disclaimer in the # documentation and/or other materials provided with the #",
"the given dictionary. \"\"\" if not persistedValue is None: if not isinstance(persistedValue, dict):",
"OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT #LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR",
"raise ValueError( \"Persisted domain object '%s' does not fit defined domain class '%s'.\"",
"= \"\" def __init__(self, notNull): \"\"\" @param notNull: Indicates if a values may",
"None: result = list() for item in value: transformationSucceeded = False for subType",
"maximum, minDecimalPlaces, maxDecimalPlaces, options, optionsMandatory) class DatetimeType(BasePropertyType): \"\"\" Represents date and time values.",
"C{unicode} or class object \"\"\" BasePropertyType.__init__(self, notNull) if cls is None: cls =",
"fromPersistenceFormat(self, persistedValue): \"\"\" Ensures that the transformation for every list item is performed.",
"\"\"\" name = \"\" def __init__(self, notNull): \"\"\" @param notNull: Indicates if a",
"minimum self.restrictions[constants.MAXIMUM_LENGTH] = maximum self.restrictions[constants.ALLOWED_SUB_TYPES] = list() if allowedSubtypes is None: self._allowedSubtypes =",
"'%s'.\" \\ % (str(value), str(self._cls))) result = dict() try: for _, name, descriptor,",
"optionsMandatory) class DatetimeType(BasePropertyType): \"\"\" Represents date and time values. \"\"\" name = constants.DATETIME_TYPE",
"@type cls: C{unicode} or class object \"\"\" BasePropertyType.__init__(self, notNull) if cls is None:",
"= self._cls() except TypeError: raise ValueError(\"Cannot create domain object '%s' using empty constructor.\"",
"cls.__name__ != className: cls = self._handleImportError(\"Failed to import class '%s'! Got '%s' instead!\"",
"EVENT SHALL THE COPYRIGHT #OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,",
"be loaded. \"\"\" # Used to have a nice representation of the dictionary",
"without # #modification, are permitted provided that the following conditions are #met: #",
"__init__(self, allowedSubtypes=None, minimum=None, maximum=None, notNull=False): \"\"\" @see L{ListType.__init__<datafinder.core.configuration. properties.validators.type_validators.ListType.__init__>} for details on restriction",
"self._cls() except TypeError: raise ValueError(\"Cannot create domain object '%s' using empty constructor.\" %",
"Python object. @type value: C{object} @return: Property type constant. @rtype: C{string} \"\"\" typeDisplayName",
"the properties self.representation = str(theDict) class DomainObjectType(BasePropertyType): \"\"\" Represents a object values. \"\"\"",
"\\ % (str(value), str(self._cls))) try: value.validate() except AttributeError, error: raise ValueError(\"Cannot validate property",
"is None: result = list() for item in persistedValue: transformationSucceeded = False for",
"\"\"\" Restores the domain object from the given dictionary. \"\"\" if not persistedValue",
"item in value: transformationSucceeded = False for subType in self._allowedSubtypes: try: subType.validate(item) result.append(subType.toPersistenceFormat(item))",
"configuration.\") if not value is None: if self._cls != value.__class__: raise ValueError(\"The value",
"TypeError: raise ConfigurationError(\"Restrictions for property type '%s' are invalid.\" % propertyTypeName) else: return",
"\"\"\" Constructor. \"\"\" BasePropertyType.__init__(self, notNull) if allowedTypes is None: self._allowedTypes = list() self._allowedTypes.append(BooleanType())",
"with the # distribution. # # * Neither the name of the German",
"for subType in self._allowedTypes: try: value = subType.fromPersistenceFormat(persistedValue) subType.validate(value) result = value transformationSucceeded",
"notNull) self.restrictions[constants.MINIMUM_VALUE] = minimum self.restrictions[constants.MAXIMUM_VALUE] = maximum self.restrictions[constants.MINIMUM_NUMBER_OF_DECIMAL_PLACES] = minDecimalPlaces self.restrictions[constants.MAXIMUM_NUMBER_OF_DECIMAL_PLACES] = maxDecimalPlaces",
"= \"%s.%s\" % (cls.__module__, cls.__name__) self._cls = cls @property def _isValid(self): \"\"\" Indicates",
"\"\"\" Ensures that the transformation for every list item is performed. \"\"\" if",
"date and time values. \"\"\" name = constants.DATETIME_TYPE def __init__(self, minimum=None, maximum=None, options=None,",
"name = constants.ANY_TYPE def __init__(self, allowedTypes=None, notNull=False): \"\"\" Constructor. \"\"\" BasePropertyType.__init__(self, notNull) if",
"KeyError: raise ValueError( \"Persisted domain object '%s' does not fit defined domain class",
"value): \"\"\" Ensures that the transformation for every supported type is tried. \"\"\"",
"$Committer$ $Revision-Id$ # # Copyright (c) 2003-2011, German Aerospace Center (DLR) # All",
"import domain from datafinder.core.configuration.properties.validators import base_validators from datafinder.core.configuration.properties.validators import type_validators from datafinder.core.error import",
"if not transformationSucceeded: raise ValueError(\"Cannot restore value '%s' from persistence format.\" % repr(item))",
"result.append(value) transformationSucceeded = True break except ValueError: continue if not transformationSucceeded: raise ValueError(\"Cannot",
"#Redistribution and use in source and binary forms, with or without # #modification,",
"if self.notNull: raise ValueError(\"Value must not be None.\") def _validate(self, value): \"\"\" Template",
"type(value) for typeName, availableTypes in _typeConstantsPythonTypeMap.iteritems(): if valueType in availableTypes: typeDisplayName = typeName",
"Here you find the concrete class identifier after initialization def __init__(self, cls=None, notNull=False):",
"be used to endorse or promote products derived # from this software without",
"= type_validators.BooleanValidator() class NumberType(BasePropertyType): \"\"\" Represents numeric values. \"\"\" name = constants.NUMBER_TYPE def",
"\"Cannot import '%s'. Reason: '%s'\" % (self.name, reason) _log.warning(message) return UnknownDomainObject def _validate(self,",
"try: value = subType.fromPersistenceFormat(item) subType.validate(value) result.append(value) transformationSucceeded = True break except ValueError: continue",
"% repr(value)) return result def fromPersistenceFormat(self, persistedValue): \"\"\" Ensures that the transformation for",
"persistedValue: transformationSucceeded = False for subType in self._allowedSubtypes: try: value = subType.fromPersistenceFormat(item) subType.validate(value)",
"value. If the no constant matches the full dotted class name is returned.",
"list() for subtype in self._allowedTypes: subValidators.append(subtype.validate) self.restrictions[constants.ALLOWED_SUB_TYPES].append(subtype.name) self.validate = base_validators.OrValidator(subValidators) def toPersistenceFormat(self, value):",
"of package, module, and class name) or a class object. @type cls: C{unicode}",
"\"\"\" BasePropertyType.__init__(self, notNull) self.restrictions[constants.MINIMUM_LENGTH] = minimum self.restrictions[constants.MAXIMUM_LENGTH] = maximum self.restrictions[constants.PATTERN] = pattern self.restrictions[constants.OPTIONS]",
"CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT #LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;",
"domain object types whose class could not be loaded. \"\"\" # Used to",
"the supported property types. A property type allows validation of property values against",
"domain object '%s' does not fit defined domain class '%s'.\" % (self.name, str(persistedValue)))",
"notNull=False): \"\"\" @see L{NumberType.__init__<datafinder.core.configuration. properties.validators.type_validators.NumberType.__init__>} for details on restriction parameters. \"\"\" BasePropertyType.__init__(self, notNull)",
"Used to have a nice representation of the dictionary representation = domain.DomainProperty(StringType()) def",
"property value found: '%s'\" % str(error.args)) def toPersistenceFormat(self, value): \"\"\" Transform the domain",
"if typeDisplayName is None: typeDisplayName = \\ \"%s.%s\" % (value.__class__.__module__, value.__class__.__name__) return typeDisplayName",
"format.\" % repr(item)) return result def __deepcopy__(self, _): return ListType(deepcopy(self._allowedSubtypes), self.restrictions[constants.MINIMUM_LENGTH], self.restrictions[constants.MAXIMUM_LENGTH], self.notNull)",
"has not the required type '%s'.\" \\ % (str(value), str(self._cls))) try: value.validate() except",
"are invalid.\" % propertyTypeName) else: return DomainObjectType(propertyTypeName) _typeConstantsPythonTypeMap = {constants.BOOLEAN_TYPE: [bool], constants.DATETIME_TYPE: [datetime],",
"'%s' does not fit defined domain class '%s'.\" % (self.name, str(persistedValue))) else: setattr(instance,",
"C{string} \"\"\" typeDisplayName = None valueType = type(value) for typeName, availableTypes in _typeConstantsPythonTypeMap.iteritems():",
"str(error.args)) except ValueError, error: raise ValueError(\"Invalid property value found: '%s'\" % str(error.args)) def",
"constants.STRING_TYPE: [str, unicode]} def determinePropertyTypeConstant(value): \"\"\" Helper function to determine the property type",
"result = None transformationSucceeded = False for subType in self._allowedTypes: try: value =",
"\"\"\" Used to represent values of domain object types whose class could not",
"'%s'\" % (self.name, reason) _log.warning(message) return UnknownDomainObject def _validate(self, value): \"\"\" Delegates the",
"Common procedure to handle failed domain object imports. \"\"\" message = \"Cannot import",
"products derived # from this software without specific prior written permission. # #THIS",
"function to determine the property type constant of the given value. If the",
"except KeyError: raise ValueError( \"Persisted domain object '%s' does not fit defined domain",
"name, value) return instance _propertyNameClassMap = {StringType.name: StringType, BooleanType.name: BooleanType, NumberType.name: NumberType, DatetimeType.name:",
"not be loaded. \"\"\" # Used to have a nice representation of the",
"item is performed. \"\"\" if not persistedValue is None: result = list() for",
"\"\"\" name = constants.STRING_TYPE def __init__(self, minimum=None, maximum=None, pattern=None, options=None, optionsMandatory=None, notNull=False): \"\"\"",
"the full dotted class name is returned. @see: L{constants<datafinder.core.configuration.properties.constants>} for property type constants.",
"valueType = type(value) for typeName, availableTypes in _typeConstantsPythonTypeMap.iteritems(): if valueType in availableTypes: typeDisplayName",
"Reason: '%s'\" % (self.name, reason) _log.warning(message) return UnknownDomainObject def _validate(self, value): \"\"\" Delegates",
"if not value is None: self._validate(value) else: if self.notNull: raise ValueError(\"Value must not",
"return result def fromPersistenceFormat(self, persistedValue): \"\"\" Ensures that the transformation for every list",
"WARRANTIES, INCLUDING, BUT NOT #LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS",
"toPersistenceFormat(self, value): \"\"\" Transforms the value to the persistence layer format. @raise ValueError:",
"constants.DATETIME_TYPE def __init__(self, minimum=None, maximum=None, options=None, optionsMandatory=None, notNull=False): \"\"\" @see L{DatetimeType.__init__<datafinder.core.configuration. properties.validators.type_validators.DatetimeType.__init__>} for",
"name, descriptor, value in instance.walk(): try: value = descriptor.type.fromPersistenceFormat(persistedValue[name]) except KeyError: raise ValueError(",
"not persistedValue is None: if not isinstance(persistedValue, dict): raise ValueError(\"The persisted value '%s'",
"AND FITNESS FOR #A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE",
"else: self.name = \"%s.%s\" % (cls.__module__, cls.__name__) self._cls = cls @property def _isValid(self):",
"value '%s' has not the required type '%s'.\" \\ % (str(value), str(self._cls))) try:",
"the following disclaimer in the # documentation and/or other materials provided with the",
"for subType in self._allowedSubtypes: try: value = subType.fromPersistenceFormat(item) subType.validate(value) result.append(value) transformationSucceeded = True",
"against defined restrictions and performs transformation of values for the persistence layer. \"\"\"",
"name (consists of package, module, and class name) or a class object. @type",
"in persistedValue: transformationSucceeded = False for subType in self._allowedSubtypes: try: value = subType.fromPersistenceFormat(item)",
"self._allowedTypes.append(DatetimeType()) self._allowedTypes.append(StringType()) self._allowedTypes.append(DomainObjectType()) self._allowedTypes.append(ListType()) else: self._allowedTypes = allowedTypes self.restrictions[constants.ALLOWED_SUB_TYPES] = list() subValidators =",
"self.validate = base_validators.OrValidator(subValidators) def toPersistenceFormat(self, value): \"\"\" Ensures that the transformation for every",
"maximum=None, options=None, optionsMandatory=None, notNull=False): \"\"\" @see L{DatetimeType.__init__<datafinder.core.configuration. properties.validators.type_validators.DatetimeType.__init__>} for details on restriction parameters.",
"\"\"\" fullDottedModuleName = fullDottedClassName[:fullDottedClassName.rfind(\".\")] className = fullDottedClassName[fullDottedClassName.rfind(\".\") + 1:] try: moduleInstance = __import__(fullDottedModuleName,",
"if cls is None: cls = UnknownDomainObject if isinstance(cls, basestring): self.name = cls",
"# #THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # \"AS",
"class AnyType(BasePropertyType): \"\"\" Represents an unspecific property type. \"\"\" name = constants.ANY_TYPE def",
"C{_validate} to perform concrete validation. @raise ValueError: indicates validation errors. \"\"\" if not",
"BasePropertyType.__init__(self, notNull) self._validate = type_validators.BooleanValidator() class NumberType(BasePropertyType): \"\"\" Represents numeric values. \"\"\" name",
"WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT #(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN",
"self.restrictions[constants.MAXIMUM_LENGTH] = maximum self.restrictions[constants.PATTERN] = pattern self.restrictions[constants.OPTIONS] = options self.restrictions[constants.OPTIONS_MANDATORY] = optionsMandatory self._validate",
"ANY #THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT #(INCLUDING NEGLIGENCE",
"transformation for every supported type is tried. \"\"\" if not value is None:",
"parameters. \"\"\" BasePropertyType.__init__(self, notNull) self.restrictions[constants.MINIMUM_VALUE] = minimum self.restrictions[constants.MAXIMUM_VALUE] = maximum self.restrictions[constants.MINIMUM_NUMBER_OF_DECIMAL_PLACES] = minDecimalPlaces",
"list of conditions and the following disclaimer in the # documentation and/or other",
"= type_validators.NumberValidator(minimum, maximum, minDecimalPlaces, maxDecimalPlaces, options, optionsMandatory) class DatetimeType(BasePropertyType): \"\"\" Represents date and",
"= maximum self.restrictions[constants.OPTIONS] = options self.restrictions[constants.OPTIONS_MANDATORY] = optionsMandatory self._validate = type_validators.DatetimeValidator(minimum, maximum, options,",
"and performs transformation of values for the persistence layer. \"\"\" from copy import",
"\"\"\" @see L{ListType.__init__<datafinder.core.configuration. properties.validators.type_validators.ListType.__init__>} for details on restriction parameters. \"\"\" BasePropertyType.__init__(self, notNull) self.restrictions[constants.MINIMUM_LENGTH]",
"StringType(BasePropertyType): \"\"\" Represents string values. \"\"\" name = constants.STRING_TYPE def __init__(self, minimum=None, maximum=None,",
"to import class '%s'! Got '%s' instead!\" \\ % (fullDottedClassName, cls.__name__)) return cls",
"= False for subType in self._allowedSubtypes: try: value = subType.fromPersistenceFormat(item) subType.validate(value) result.append(value) transformationSucceeded",
"notNull) if allowedTypes is None: self._allowedTypes = list() self._allowedTypes.append(BooleanType()) self._allowedTypes.append(NumberType()) self._allowedTypes.append(DatetimeType()) self._allowedTypes.append(StringType()) self._allowedTypes.append(DomainObjectType())",
"FOR #A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT #OWNER",
"pattern=None, options=None, optionsMandatory=None, notNull=False): \"\"\" @see L{StringValidator.__init__<datafinder.core.configuration. properties.validators.type_validators.StringValidator.__init__>} for details on restriction parameters.",
"of values for the persistence layer. \"\"\" from copy import deepcopy from datetime",
"for the persistence layer. \"\"\" from copy import deepcopy from datetime import datetime",
"self._handleImportError(\"Failed to import class '%s'! Got '%s' instead!\" \\ % (fullDottedClassName, cls.__name__)) return",
"decimal import Decimal import logging from datafinder.core.configuration.properties import constants from datafinder.core.configuration.properties import domain",
"+ 1:] try: moduleInstance = __import__(fullDottedModuleName, globals(), dict(), [\"\"]) cls = getattr(moduleInstance, className)",
"Copyright (c) 2003-2011, German Aerospace Center (DLR) # All rights reserved. # #Redistribution",
"self.restrictions[constants.OPTIONS] = options self.restrictions[constants.OPTIONS_MANDATORY] = optionsMandatory self._validate = type_validators.StringValidator(minimum, maximum, pattern, options, optionsMandatory)",
"Represents string values. \"\"\" name = constants.STRING_TYPE def __init__(self, minimum=None, maximum=None, pattern=None, options=None,",
"other materials provided with the # distribution. # # * Neither the name",
"supported type is tried. \"\"\" if not value is None: result = None",
"(c) 2003-2011, German Aerospace Center (DLR) # All rights reserved. # #Redistribution and",
"#A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT #OWNER OR",
"transformationSucceeded = False for subType in self._allowedTypes: try: subType.validate(value) result = subType.toPersistenceFormat(value) transformationSucceeded",
"Used to allow access to the properties self.representation = str(theDict) class DomainObjectType(BasePropertyType): \"\"\"",
"repr(persistedValue)) return result def __deepcopy__(self, _): return AnyType(deepcopy(self._allowedTypes), self.notNull) class UnknownDomainObject(domain.DomainObject): \"\"\" Used",
"= list() for subtype in self._allowedSubtypes: subValidators.append(subtype.validate) self.restrictions[constants.ALLOWED_SUB_TYPES].append(subtype.name) self._validate = type_validators.ListValidator(minimum, maximum, subValidators)",
"self.restrictions[constants.OPTIONS_MANDATORY] = optionsMandatory self._validate = type_validators.StringValidator(minimum, maximum, pattern, options, optionsMandatory) class BooleanType(BasePropertyType): \"\"\"",
"OF MERCHANTABILITY AND FITNESS FOR #A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT",
"= type_validators.StringValidator(minimum, maximum, pattern, options, optionsMandatory) class BooleanType(BasePropertyType): \"\"\" Represents a boolean values.",
"every supported type is tried. \"\"\" if not value is None: result =",
"of source code must retain the above copyright # notice, this list of",
"BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY #THEORY OF LIABILITY, WHETHER IN CONTRACT,",
"maximum self.restrictions[constants.OPTIONS] = options self.restrictions[constants.OPTIONS_MANDATORY] = optionsMandatory self._validate = type_validators.DatetimeValidator(minimum, maximum, options, optionsMandatory)",
"Got '%s' instead!\" \\ % (fullDottedClassName, cls.__name__)) return cls def _handleImportError(self, reason): \"\"\"",
"ON ANY #THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT #(INCLUDING",
"\"\"\" Provides the supported property types. A property type allows validation of property",
"def toPersistenceFormat(self, value): \"\"\" Ensures that the transformation for every supported type is",
"domain object into a dictionary. \"\"\" if not self._isValid: raise ValueError(\"The domain class",
"= minimum self.restrictions[constants.MAXIMUM_LENGTH] = maximum self.restrictions[constants.PATTERN] = pattern self.restrictions[constants.OPTIONS] = options self.restrictions[constants.OPTIONS_MANDATORY] =",
"str(value)) return result def fromPersistenceFormat(self, persistedValue): \"\"\" Restores the domain object from the",
"None.\") def _validate(self, value): \"\"\" Template method for concrete validation within a sub",
"promote products derived # from this software without specific prior written permission. #",
"_): return AnyType(deepcopy(self._allowedTypes), self.notNull) class UnknownDomainObject(domain.DomainObject): \"\"\" Used to represent values of domain",
"a values may be C{None} or not. @type notNull: C{bool} \"\"\" self.restrictions =",
"class DatetimeType(BasePropertyType): \"\"\" Represents date and time values. \"\"\" name = constants.DATETIME_TYPE def",
"self._allowedTypes.append(DomainObjectType()) self._allowedTypes.append(ListType()) else: self._allowedTypes = allowedTypes self.restrictions[constants.ALLOWED_SUB_TYPES] = list() subValidators = list() for",
"domain class has been correctly loaded or not. \"\"\" return self._cls != UnknownDomainObject",
"return result def __deepcopy__(self, _): return AnyType(deepcopy(self._allowedTypes), self.notNull) class UnknownDomainObject(domain.DomainObject): \"\"\" Used to",
"of the dictionary representation = domain.DomainProperty(StringType()) def __init__(self, theDict): domain.DomainObject.__init__(self) self.theDict = theDict",
"an unspecific property type. \"\"\" name = constants.ANY_TYPE def __init__(self, allowedTypes=None, notNull=False): \"\"\"",
"= constants.STRING_TYPE def __init__(self, minimum=None, maximum=None, pattern=None, options=None, optionsMandatory=None, notNull=False): \"\"\" @see L{StringValidator.__init__<datafinder.core.configuration.",
"= list() for subtype in self._allowedTypes: subValidators.append(subtype.validate) self.restrictions[constants.ALLOWED_SUB_TYPES].append(subtype.name) self.validate = base_validators.OrValidator(subValidators) def toPersistenceFormat(self,",
"source and binary forms, with or without # #modification, are permitted provided that",
"# # * Redistributions of source code must retain the above copyright #",
"parameters and corresponding values. @type restrictions: C{dict} keys: C{unicode}, C{object} W0142: Here the",
"if valueType in availableTypes: typeDisplayName = typeName break if typeDisplayName is None: typeDisplayName",
"primitive values. \"\"\" name = constants.LIST_TYPE def __init__(self, allowedSubtypes=None, minimum=None, maximum=None, notNull=False): \"\"\"",
"DIRECT, INDIRECT, INCIDENTAL, #SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT #LIMITED TO,",
"(consists of package, module, and class name) or a class object. @type cls:",
"= base_validators.OrValidator(subValidators) def toPersistenceFormat(self, value): \"\"\" Ensures that the transformation for every supported",
"OTHERWISE) ARISING IN ANY WAY OUT OF THE USE #OF THIS SOFTWARE, EVEN",
"Represents an unspecific property type. \"\"\" name = constants.ANY_TYPE def __init__(self, allowedTypes=None, notNull=False):",
"datetime from decimal import Decimal import logging from datafinder.core.configuration.properties import constants from datafinder.core.configuration.properties",
"repr(value)) return result def fromPersistenceFormat(self, persistedValue): \"\"\" Ensures that the transformation for every",
"types whose class could not be loaded. \"\"\" # Used to have a",
"property type creation. @param propertyTypeName: Name of the property type. @type propertyTypeName: C{unicode}",
"does not fit defined domain class '%s'.\" % (self.name, str(persistedValue))) else: setattr(instance, name,",
"NumberType.name: NumberType, DatetimeType.name: DatetimeType, ListType.name: ListType, AnyType.name: AnyType} PROPERTY_TYPE_NAMES = _propertyNameClassMap.keys()[:] def createPropertyType(propertyTypeName,",
"@param notNull: Indicates if a values may be C{None} or not. @type notNull:",
"not value is None: self._validate(value) else: if self.notNull: raise ValueError(\"Value must not be",
"= minimum self.restrictions[constants.MAXIMUM_VALUE] = maximum self.restrictions[constants.OPTIONS] = options self.restrictions[constants.OPTIONS_MANDATORY] = optionsMandatory self._validate =",
"not be found. Please \" \\ + \"correct the configuration.\") if not value",
"BasePropertyType.__init__(self, notNull) self.restrictions[constants.MINIMUM_LENGTH] = minimum self.restrictions[constants.MAXIMUM_LENGTH] = maximum self.restrictions[constants.PATTERN] = pattern self.restrictions[constants.OPTIONS] =",
"property types. A property type allows validation of property values against defined restrictions",
"object.\" % str(value)) return result def fromPersistenceFormat(self, persistedValue): \"\"\" Restores the domain object",
"Ensures that the transformation for every list item is performed. \"\"\" if not",
"Base class for all property types. \"\"\" name = \"\" def __init__(self, notNull):",
"def createPropertyType(propertyTypeName, restrictions=dict()): \"\"\" Factory method for property type creation. @param propertyTypeName: Name",
"format. @raise ValueError: Indicates problems during value transformation. \"\"\" self = self #",
"[datetime], constants.LIST_TYPE: [list], constants.NUMBER_TYPE: [int, float, Decimal], constants.STRING_TYPE: [str, unicode]} def determinePropertyTypeConstant(value): \"\"\"",
"cls is None: cls = UnknownDomainObject if isinstance(cls, basestring): self.name = cls self._cls",
"__init__(self, cls=None, notNull=False): \"\"\" Constructor. @param cls: Full dotted class name (consists of",
"from persistence format.\" % repr(item)) return result def __deepcopy__(self, _): return ListType(deepcopy(self._allowedSubtypes), self.restrictions[constants.MINIMUM_LENGTH],",
"def fromPersistenceFormat(self, persistedValue): \"\"\" Restores the value from the persistence layer format. @raise",
"= type(value) for typeName, availableTypes in _typeConstantsPythonTypeMap.iteritems(): if valueType in availableTypes: typeDisplayName =",
"OF THE USE #OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF",
"None: result = None transformationSucceeded = False for subType in self._allowedTypes: try: value",
"optionsMandatory) class ListType(BasePropertyType): \"\"\" Represents list of primitive values. \"\"\" name = constants.LIST_TYPE",
"\"\"\" return self._cls != UnknownDomainObject def _importClass(self, fullDottedClassName): \"\"\" Tries to import the",
"constants.NUMBER_TYPE def __init__(self, minimum=None, maximum=None, minDecimalPlaces=None, maxDecimalPlaces=None, options=None, optionsMandatory=None, notNull=False): \"\"\" @see L{NumberType.__init__<datafinder.core.configuration.",
"BasePropertyType.__init__(self, notNull) self.restrictions[constants.MINIMUM_VALUE] = minimum self.restrictions[constants.MAXIMUM_VALUE] = maximum self.restrictions[constants.MINIMUM_NUMBER_OF_DECIMAL_PLACES] = minDecimalPlaces self.restrictions[constants.MAXIMUM_NUMBER_OF_DECIMAL_PLACES] =",
"or not. \"\"\" return self._cls != UnknownDomainObject def _importClass(self, fullDottedClassName): \"\"\" Tries to",
"restrictions. Calls C{_validate} to perform concrete validation. @raise ValueError: indicates validation errors. \"\"\"",
"# documentation and/or other materials provided with the # distribution. # # *",
"configuration error if something goes wrong. \"\"\" fullDottedModuleName = fullDottedClassName[:fullDottedClassName.rfind(\".\")] className = fullDottedClassName[fullDottedClassName.rfind(\".\")",
"dotted class name (consists of package, module, and class name) or a class",
"for item in value: transformationSucceeded = False for subType in self._allowedSubtypes: try: subType.validate(item)",
"is performed. \"\"\" if not value is None: result = list() for item",
"dictionary.\" % str(persistedValue)) if not self._isValid: return UnknownDomainObject(persistedValue) try: instance = self._cls() except",
"determine the property type constant of the given value. If the no constant",
"allows validation of property values against defined restrictions and performs transformation of values",
"has been correctly loaded or not. \"\"\" return self._cls != UnknownDomainObject def _importClass(self,",
"= type_validators.DatetimeValidator(minimum, maximum, options, optionsMandatory) class ListType(BasePropertyType): \"\"\" Represents list of primitive values.",
"descriptor, value in instance.walk(): try: value = descriptor.type.fromPersistenceFormat(persistedValue[name]) except KeyError: raise ValueError( \"Persisted",
"_validate(self, value): \"\"\" Delegates the validation to the actual instance. \"\"\" if self._cls",
"validation within a sub class. \"\"\" pass def fromPersistenceFormat(self, persistedValue): \"\"\" Restores the",
"getattr(moduleInstance, className) except (ImportError, AttributeError, ValueError), error: return self._handleImportError(str(error.args)) if cls.__name__ != className:",
"import deepcopy from datetime import datetime from decimal import Decimal import logging from",
"'%s' instead!\" \\ % (fullDottedClassName, cls.__name__)) return cls def _handleImportError(self, reason): \"\"\" Common",
"(str(value), str(self._cls))) try: value.validate() except AttributeError, error: raise ValueError(\"Cannot validate property value. Reason",
"OR TORT #(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE",
"object from the given dictionary. \"\"\" if not persistedValue is None: if not",
"perform concrete validation. @raise ValueError: indicates validation errors. \"\"\" if not value is",
"self.restrictions[constants.ALLOWED_SUB_TYPES] = list() subValidators = list() for subtype in self._allowedTypes: subValidators.append(subtype.validate) self.restrictions[constants.ALLOWED_SUB_TYPES].append(subtype.name) self.validate",
"# $Filename$ # $Authors$ # Last Changed: $Date$ $Committer$ $Revision-Id$ # # Copyright",
"self.restrictions[constants.ALLOWED_SUB_TYPES].append(subtype.name) self._validate = type_validators.ListValidator(minimum, maximum, subValidators) def toPersistenceFormat(self, value): \"\"\" Ensures that the",
"A property type allows validation of property values against defined restrictions and performs",
"invalid.\" % propertyTypeName) else: return DomainObjectType(propertyTypeName) _typeConstantsPythonTypeMap = {constants.BOOLEAN_TYPE: [bool], constants.DATETIME_TYPE: [datetime], constants.LIST_TYPE:",
"= constants.DATETIME_TYPE def __init__(self, minimum=None, maximum=None, options=None, optionsMandatory=None, notNull=False): \"\"\" @see L{DatetimeType.__init__<datafinder.core.configuration. properties.validators.type_validators.DatetimeType.__init__>}",
"options=None, optionsMandatory=None, notNull=False): \"\"\" @see L{StringValidator.__init__<datafinder.core.configuration. properties.validators.type_validators.StringValidator.__init__>} for details on restriction parameters. \"\"\"",
"the value against the defined restrictions. Calls C{_validate} to perform concrete validation. @raise",
"= True break except ValueError: continue if not transformationSucceeded: raise ValueError(\"Cannot restore value",
"subType.toPersistenceFormat(value) transformationSucceeded = True break except ValueError: continue if not transformationSucceeded: raise ValueError(\"Cannot",
"class object \"\"\" BasePropertyType.__init__(self, notNull) if cls is None: cls = UnknownDomainObject if",
"not value is None: result = None transformationSucceeded = False for subType in",
"fromPersistenceFormat(self, persistedValue): \"\"\" Ensures that the transformation for every supported type is tried.",
"of the given value. If the no constant matches the full dotted class",
"if not transformationSucceeded: raise ValueError(\"Cannot transform value '%s' to persistence format.\" % repr(item))",
"self._allowedSubtypes: try: subType.validate(item) result.append(subType.toPersistenceFormat(item)) transformationSucceeded = True break except ValueError: continue if not",
"= UnknownDomainObject if isinstance(cls, basestring): self.name = cls self._cls = self._importClass(cls) else: self.name",
"class and raises a configuration error if something goes wrong. \"\"\" fullDottedModuleName =",
"propertyTypeName: C{unicode} @param restrictions: Map of restriction parameters and corresponding values. @type restrictions:",
"tried. \"\"\" if not value is None: result = None transformationSucceeded = False",
"SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # \"AS IS\" AND",
"'%s' has not the required type '%s'.\" \\ % (str(value), str(self._cls))) try: value.validate()",
"'%s' to persistence format.\" % repr(value)) return result def fromPersistenceFormat(self, persistedValue): \"\"\" Ensures",
"\"\"\" # pylint: disable=W0142 if propertyTypeName in _propertyNameClassMap: try: return _propertyNameClassMap[propertyTypeName](**restrictions) except TypeError:",
"wrong. \"\"\" fullDottedModuleName = fullDottedClassName[:fullDottedClassName.rfind(\".\")] className = fullDottedClassName[fullDottedClassName.rfind(\".\") + 1:] try: moduleInstance =",
"transform value '%s' to persistence format.\" % repr(value)) return result def fromPersistenceFormat(self, persistedValue):",
"\"\"\" Represents date and time values. \"\"\" name = constants.DATETIME_TYPE def __init__(self, minimum=None,",
"materials provided with the # distribution. # # * Neither the name of",
"return result def fromPersistenceFormat(self, persistedValue): \"\"\" Ensures that the transformation for every supported",
"return value class StringType(BasePropertyType): \"\"\" Represents string values. \"\"\" name = constants.STRING_TYPE def",
"BUT NOT #LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,",
"datafinder.core.error import ConfigurationError __version__ = \"$Revision-Id:$\" _log = logging.getLogger() class BasePropertyType(object): \"\"\" Base",
"try: subType.validate(value) result = subType.toPersistenceFormat(value) transformationSucceeded = True break except ValueError: continue if",
"GOODS OR SERVICES; LOSS OF USE, #DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER",
"name = constants.BOOLEAN_TYPE def __init__(self, notNull=False): BasePropertyType.__init__(self, notNull) self._validate = type_validators.BooleanValidator() class NumberType(BasePropertyType):",
"THE USE #OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH",
"value: Python object. @type value: C{object} @return: Property type constant. @rtype: C{string} \"\"\"",
"to simplify the property type creation. Other approaches would \"blow up\" the code",
"Restores the value from the persistence layer format. @raise ValueError: Indicates problems during",
"= allowedTypes self.restrictions[constants.ALLOWED_SUB_TYPES] = list() subValidators = list() for subtype in self._allowedTypes: subValidators.append(subtype.validate)",
"raise ValueError(\"Cannot transform value '%s' to persistence format.\" % repr(item)) return result def",
"allowedTypes is None: self._allowedTypes = list() self._allowedTypes.append(BooleanType()) self._allowedTypes.append(NumberType()) self._allowedTypes.append(DatetimeType()) self._allowedTypes.append(StringType()) self._allowedTypes.append(DomainObjectType()) self._allowedTypes.append(ListType()) else:",
"(cls.__module__, cls.__name__) self._cls = cls @property def _isValid(self): \"\"\" Indicates whether the domain",
"self._validate = type_validators.BooleanValidator() class NumberType(BasePropertyType): \"\"\" Represents numeric values. \"\"\" name = constants.NUMBER_TYPE",
"cls self._cls = self._importClass(cls) else: self.name = \"%s.%s\" % (cls.__module__, cls.__name__) self._cls =",
"reserved. # #Redistribution and use in source and binary forms, with or without",
"% str(error.args)) def toPersistenceFormat(self, value): \"\"\" Transform the domain object into a dictionary.",
"value transformation. \"\"\" self = self # silent pylint return persistedValue def toPersistenceFormat(self,",
"result = dict() try: for _, name, descriptor, subValue in value.walk(): result[name] =",
"# Used to have a nice representation of the dictionary representation = domain.DomainProperty(StringType())",
"def validate(self, value): \"\"\" Performs validation of the value against the defined restrictions.",
"is useful to simplify the property type creation. Other approaches would \"blow up\"",
"not the required type '%s'.\" \\ % (str(value), str(self._cls))) try: value.validate() except AttributeError,",
"def __init__(self, allowedSubtypes=None, minimum=None, maximum=None, notNull=False): \"\"\" @see L{ListType.__init__<datafinder.core.configuration. properties.validators.type_validators.ListType.__init__>} for details on",
"subType.validate(value) result = value transformationSucceeded = True break except ValueError: continue if not",
"self.notNull) class UnknownDomainObject(domain.DomainObject): \"\"\" Used to represent values of domain object types whose",
"AND CONTRIBUTORS # \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT",
"THE COPYRIGHT HOLDERS AND CONTRIBUTORS # \"AS IS\" AND ANY EXPRESS OR IMPLIED",
"not value is None: if self._cls != value.__class__: raise ValueError(\"The value '%s' has",
"corresponding values. @type restrictions: C{dict} keys: C{unicode}, C{object} W0142: Here the */** magic",
"of the German Aerospace Center nor the names of # its contributors may",
"self # silent pylint return persistedValue def toPersistenceFormat(self, value): \"\"\" Transforms the value",
"options, optionsMandatory) class DatetimeType(BasePropertyType): \"\"\" Represents date and time values. \"\"\" name =",
"# notice, this list of conditions and the following disclaimer. # # *",
"allowedSubtypes subValidators = list() for subtype in self._allowedSubtypes: subValidators.append(subtype.validate) self.restrictions[constants.ALLOWED_SUB_TYPES].append(subtype.name) self._validate = type_validators.ListValidator(minimum,",
"package, module, and class name) or a class object. @type cls: C{unicode} or",
"in self._allowedTypes: try: subType.validate(value) result = subType.toPersistenceFormat(value) transformationSucceeded = True break except ValueError:",
"constant. @rtype: C{string} \"\"\" typeDisplayName = None valueType = type(value) for typeName, availableTypes",
"DomainObjectType(propertyTypeName) _typeConstantsPythonTypeMap = {constants.BOOLEAN_TYPE: [bool], constants.DATETIME_TYPE: [datetime], constants.LIST_TYPE: [list], constants.NUMBER_TYPE: [int, float, Decimal],",
"notNull) self.restrictions[constants.MINIMUM_LENGTH] = minimum self.restrictions[constants.MAXIMUM_LENGTH] = maximum self.restrictions[constants.ALLOWED_SUB_TYPES] = list() if allowedSubtypes is",
"this list of conditions and the following disclaimer in the # documentation and/or",
"for item in persistedValue: transformationSucceeded = False for subType in self._allowedSubtypes: try: value",
"Template method for concrete validation within a sub class. \"\"\" pass def fromPersistenceFormat(self,",
"value to the persistence layer format. @raise ValueError: Indicates problems during value transformation.",
"create domain object '%s' using empty constructor.\" % self.name) else: for instance, name,",
"of restriction parameters and corresponding values. @type restrictions: C{dict} keys: C{unicode}, C{object} W0142:",
"type constant of the given value. If the no constant matches the full",
"value): \"\"\" Template method for concrete validation within a sub class. \"\"\" pass",
"for subType in self._allowedTypes: try: subType.validate(value) result = subType.toPersistenceFormat(value) transformationSucceeded = True break",
"_isValid(self): \"\"\" Indicates whether the domain class has been correctly loaded or not.",
"failed domain object imports. \"\"\" message = \"Cannot import '%s'. Reason: '%s'\" %",
"endorse or promote products derived # from this software without specific prior written",
"must not be None.\") def _validate(self, value): \"\"\" Template method for concrete validation",
"\"\"\" Factory method for property type creation. @param propertyTypeName: Name of the property",
"@param restrictions: Map of restriction parameters and corresponding values. @type restrictions: C{dict} keys:",
"class name is returned. @see: L{constants<datafinder.core.configuration.properties.constants>} for property type constants. @param value: Python",
"# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #LIMITED",
"except ValueError, error: raise ValueError(\"Invalid property value found: '%s'\" % str(error.args)) def toPersistenceFormat(self,",
"= options self.restrictions[constants.OPTIONS_MANDATORY] = optionsMandatory self._validate = type_validators.NumberValidator(minimum, maximum, minDecimalPlaces, maxDecimalPlaces, options, optionsMandatory)",
"\"\"\" Template method for concrete validation within a sub class. \"\"\" pass def",
"properties.validators.type_validators.NumberType.__init__>} for details on restriction parameters. \"\"\" BasePropertyType.__init__(self, notNull) self.restrictions[constants.MINIMUM_VALUE] = minimum self.restrictions[constants.MAXIMUM_VALUE]",
"to represent values of domain object types whose class could not be loaded.",
"must retain the above copyright # notice, this list of conditions and the",
"else: self._allowedTypes = allowedTypes self.restrictions[constants.ALLOWED_SUB_TYPES] = list() subValidators = list() for subtype in",
"transformation. \"\"\" self = self # silent pylint return persistedValue def toPersistenceFormat(self, value):",
"if not persistedValue is None: if not isinstance(persistedValue, dict): raise ValueError(\"The persisted value",
"= getattr(moduleInstance, className) except (ImportError, AttributeError, ValueError), error: return self._handleImportError(str(error.args)) if cls.__name__ !=",
"domain class '%s'.\" % (self.name, str(persistedValue))) else: setattr(instance, name, value) return instance _propertyNameClassMap",
"% (str(value), str(self._cls))) try: value.validate() except AttributeError, error: raise ValueError(\"Cannot validate property value.",
"ValueError(\"Cannot create domain object '%s' using empty constructor.\" % self.name) else: for instance,",
"# its contributors may be used to endorse or promote products derived #",
"import '%s'. Reason: '%s'\" % (self.name, reason) _log.warning(message) return UnknownDomainObject def _validate(self, value):",
"Other approaches would \"blow up\" the code here. \"\"\" # pylint: disable=W0142 if",
"domain.DomainProperty(StringType()) def __init__(self, theDict): domain.DomainObject.__init__(self) self.theDict = theDict # Used to allow access",
"import Decimal import logging from datafinder.core.configuration.properties import constants from datafinder.core.configuration.properties import domain from",
"AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #LIMITED TO, THE IMPLIED",
"= __import__(fullDottedModuleName, globals(), dict(), [\"\"]) cls = getattr(moduleInstance, className) except (ImportError, AttributeError, ValueError),",
"above copyright # notice, this list of conditions and the following disclaimer in",
"of conditions and the following disclaimer. # # * Redistributions in binary form",
"used to endorse or promote products derived # from this software without specific",
"is no valid domain object.\" % str(value)) return result def fromPersistenceFormat(self, persistedValue): \"\"\"",
"persistence layer. \"\"\" from copy import deepcopy from datetime import datetime from decimal",
"_importClass(self, fullDottedClassName): \"\"\" Tries to import the associated class and raises a configuration",
"descriptor.type.fromPersistenceFormat(persistedValue[name]) except KeyError: raise ValueError( \"Persisted domain object '%s' does not fit defined",
"a class object. @type cls: C{unicode} or class object \"\"\" BasePropertyType.__init__(self, notNull) if",
"options self.restrictions[constants.OPTIONS_MANDATORY] = optionsMandatory self._validate = type_validators.DatetimeValidator(minimum, maximum, options, optionsMandatory) class ListType(BasePropertyType): \"\"\"",
"from datafinder.core.configuration.properties.validators import base_validators from datafinder.core.configuration.properties.validators import type_validators from datafinder.core.error import ConfigurationError __version__",
"not isinstance(persistedValue, dict): raise ValueError(\"The persisted value '%s' is no dictionary.\" % str(persistedValue))",
"self.restrictions[constants.ALLOWED_SUB_TYPES].append(subtype.name) self.validate = base_validators.OrValidator(subValidators) def toPersistenceFormat(self, value): \"\"\" Ensures that the transformation for",
"# # Copyright (c) 2003-2011, German Aerospace Center (DLR) # All rights reserved.",
"persistence format.\" % repr(item)) return result def __deepcopy__(self, _): return ListType(deepcopy(self._allowedSubtypes), self.restrictions[constants.MINIMUM_LENGTH], self.restrictions[constants.MAXIMUM_LENGTH],",
"the transformation for every supported type is tried. \"\"\" if not value is",
"domain object '%s' using empty constructor.\" % self.name) else: for instance, name, descriptor,",
"options self.restrictions[constants.OPTIONS_MANDATORY] = optionsMandatory self._validate = type_validators.NumberValidator(minimum, maximum, minDecimalPlaces, maxDecimalPlaces, options, optionsMandatory) class",
"= constants.ANY_TYPE def __init__(self, allowedTypes=None, notNull=False): \"\"\" Constructor. \"\"\" BasePropertyType.__init__(self, notNull) if allowedTypes",
"PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # \"AS IS\" AND ANY EXPRESS",
"of # its contributors may be used to endorse or promote products derived",
"cls=None, notNull=False): \"\"\" Constructor. @param cls: Full dotted class name (consists of package,",
"error: raise ValueError(\"Cannot validate property value. Reason '%s'\" % str(error.args)) except ValueError, error:",
"valueType in availableTypes: typeDisplayName = typeName break if typeDisplayName is None: typeDisplayName =",
"Restores the domain object from the given dictionary. \"\"\" if not persistedValue is",
"imports. \"\"\" message = \"Cannot import '%s'. Reason: '%s'\" % (self.name, reason) _log.warning(message)",
"base_validators from datafinder.core.configuration.properties.validators import type_validators from datafinder.core.error import ConfigurationError __version__ = \"$Revision-Id:$\" _log",
"\"\"\" Represents an unspecific property type. \"\"\" name = constants.ANY_TYPE def __init__(self, allowedTypes=None,",
"THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR #A PARTICULAR PURPOSE ARE DISCLAIMED.",
"here. \"\"\" # pylint: disable=W0142 if propertyTypeName in _propertyNameClassMap: try: return _propertyNameClassMap[propertyTypeName](**restrictions) except",
"import type_validators from datafinder.core.error import ConfigurationError __version__ = \"$Revision-Id:$\" _log = logging.getLogger() class",
"= self # silent pylint return value class StringType(BasePropertyType): \"\"\" Represents string values.",
"value '%s' to persistence format.\" % repr(value)) return result def fromPersistenceFormat(self, persistedValue): \"\"\"",
"name = constants.NUMBER_TYPE def __init__(self, minimum=None, maximum=None, minDecimalPlaces=None, maxDecimalPlaces=None, options=None, optionsMandatory=None, notNull=False): \"\"\"",
"UnknownDomainObject def _validate(self, value): \"\"\" Delegates the validation to the actual instance. \"\"\"",
"whose class could not be loaded. \"\"\" # Used to have a nice",
"def __deepcopy__(self, _): return AnyType(deepcopy(self._allowedTypes), self.notNull) class UnknownDomainObject(domain.DomainObject): \"\"\" Used to represent values",
"self._importClass(cls) else: self.name = \"%s.%s\" % (cls.__module__, cls.__name__) self._cls = cls @property def",
"simplify the property type creation. Other approaches would \"blow up\" the code here.",
"\"\"\" name = constants.NUMBER_TYPE def __init__(self, minimum=None, maximum=None, minDecimalPlaces=None, maxDecimalPlaces=None, options=None, optionsMandatory=None, notNull=False):",
"dict() self.notNull = notNull def validate(self, value): \"\"\" Performs validation of the value",
"type creation. Other approaches would \"blow up\" the code here. \"\"\" # pylint:",
"# Last Changed: $Date$ $Committer$ $Revision-Id$ # # Copyright (c) 2003-2011, German Aerospace",
"self._allowedTypes = allowedTypes self.restrictions[constants.ALLOWED_SUB_TYPES] = list() subValidators = list() for subtype in self._allowedTypes:",
"persistedValue): \"\"\" Restores the domain object from the given dictionary. \"\"\" if not",
"subType in self._allowedSubtypes: try: value = subType.fromPersistenceFormat(item) subType.validate(value) result.append(value) transformationSucceeded = True break",
"parameters. \"\"\" BasePropertyType.__init__(self, notNull) self.restrictions[constants.MINIMUM_VALUE] = minimum self.restrictions[constants.MAXIMUM_VALUE] = maximum self.restrictions[constants.OPTIONS] = options",
"value from the persistence layer format. @raise ValueError: Indicates problems during value transformation.",
"\"\"\" if not value is None: result = None transformationSucceeded = False for",
"useful to simplify the property type creation. Other approaches would \"blow up\" the",
"may be used to endorse or promote products derived # from this software",
"= notNull def validate(self, value): \"\"\" Performs validation of the value against the",
"@see L{ListType.__init__<datafinder.core.configuration. properties.validators.type_validators.ListType.__init__>} for details on restriction parameters. \"\"\" BasePropertyType.__init__(self, notNull) self.restrictions[constants.MINIMUM_LENGTH] =",
"className: cls = self._handleImportError(\"Failed to import class '%s'! Got '%s' instead!\" \\ %",
"in value: transformationSucceeded = False for subType in self._allowedSubtypes: try: subType.validate(item) result.append(subType.toPersistenceFormat(item)) transformationSucceeded",
"type '%s' are invalid.\" % propertyTypeName) else: return DomainObjectType(propertyTypeName) _typeConstantsPythonTypeMap = {constants.BOOLEAN_TYPE: [bool],",
"returned. @see: L{constants<datafinder.core.configuration.properties.constants>} for property type constants. @param value: Python object. @type value:",
"if not value is None: if self._cls != value.__class__: raise ValueError(\"The value '%s'",
"represent values of domain object types whose class could not be loaded. \"\"\"",
"OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, #DATA, OR PROFITS; OR BUSINESS",
"self.restrictions[constants.MINIMUM_VALUE] = minimum self.restrictions[constants.MAXIMUM_VALUE] = maximum self.restrictions[constants.OPTIONS] = options self.restrictions[constants.OPTIONS_MANDATORY] = optionsMandatory self._validate",
"deepcopy from datetime import datetime from decimal import Decimal import logging from datafinder.core.configuration.properties",
"property type constant of the given value. If the no constant matches the",
"class BasePropertyType(object): \"\"\" Base class for all property types. \"\"\" name = \"\"",
"value is None: self._validate(value) else: if self.notNull: raise ValueError(\"Value must not be None.\")",
"minimum=None, maximum=None, options=None, optionsMandatory=None, notNull=False): \"\"\" @see L{DatetimeType.__init__<datafinder.core.configuration. properties.validators.type_validators.DatetimeType.__init__>} for details on restriction",
"= minimum self.restrictions[constants.MAXIMUM_VALUE] = maximum self.restrictions[constants.MINIMUM_NUMBER_OF_DECIMAL_PLACES] = minDecimalPlaces self.restrictions[constants.MAXIMUM_NUMBER_OF_DECIMAL_PLACES] = maxDecimalPlaces self.restrictions[constants.OPTIONS] =",
"after initialization def __init__(self, cls=None, notNull=False): \"\"\" Constructor. @param cls: Full dotted class",
"reason): \"\"\" Common procedure to handle failed domain object imports. \"\"\" message =",
"class has been correctly loaded or not. \"\"\" return self._cls != UnknownDomainObject def",
"notNull=False): \"\"\" @see L{DatetimeType.__init__<datafinder.core.configuration. properties.validators.type_validators.DatetimeType.__init__>} for details on restriction parameters. \"\"\" BasePropertyType.__init__(self, notNull)",
"!= className: cls = self._handleImportError(\"Failed to import class '%s'! Got '%s' instead!\" \\",
"persistedValue is None: result = None transformationSucceeded = False for subType in self._allowedTypes:",
"cls = UnknownDomainObject if isinstance(cls, basestring): self.name = cls self._cls = self._importClass(cls) else:",
"{StringType.name: StringType, BooleanType.name: BooleanType, NumberType.name: NumberType, DatetimeType.name: DatetimeType, ListType.name: ListType, AnyType.name: AnyType} PROPERTY_TYPE_NAMES",
"self.notNull: raise ValueError(\"Value must not be None.\") def _validate(self, value): \"\"\" Template method",
"and/or other materials provided with the # distribution. # # * Neither the",
"OF SUCH DAMAGE. \"\"\" Provides the supported property types. A property type allows",
"unicode]} def determinePropertyTypeConstant(value): \"\"\" Helper function to determine the property type constant of",
"ValueError(\"Cannot restore value '%s' from persistence format.\" % repr(item)) return result def __deepcopy__(self,",
"\"\"\" if not persistedValue is None: result = list() for item in persistedValue:",
"None: self._allowedTypes = list() self._allowedTypes.append(BooleanType()) self._allowedTypes.append(NumberType()) self._allowedTypes.append(DatetimeType()) self._allowedTypes.append(StringType()) self._allowedTypes.append(DomainObjectType()) self._allowedTypes.append(ListType()) else: self._allowedTypes =",
"!= UnknownDomainObject def _importClass(self, fullDottedClassName): \"\"\" Tries to import the associated class and",
"maximum, pattern, options, optionsMandatory) class BooleanType(BasePropertyType): \"\"\" Represents a boolean values. \"\"\" name",
"propertyTypeName: Name of the property type. @type propertyTypeName: C{unicode} @param restrictions: Map of",
"!= value.__class__: raise ValueError(\"The value '%s' has not the required type '%s'.\" \\",
"that the transformation for every list item is performed. \"\"\" if not value",
"forms, with or without # #modification, are permitted provided that the following conditions",
"restore value '%s' from persistence format.\" % repr(item)) return result def __deepcopy__(self, _):",
"'%s' to persistence format.\" % repr(item)) return result def fromPersistenceFormat(self, persistedValue): \"\"\" Ensures",
"HOLDERS AND CONTRIBUTORS # \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,",
"UnknownDomainObject def _importClass(self, fullDottedClassName): \"\"\" Tries to import the associated class and raises",
"Used to represent values of domain object types whose class could not be",
"OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT #(INCLUDING NEGLIGENCE OR OTHERWISE)",
"\"\"\" # Used to have a nice representation of the dictionary representation =",
"None: result = list() for item in persistedValue: transformationSucceeded = False for subType",
"raise ValueError(\"The domain class could not be found. Please \" \\ + \"correct",
"\"\"\" Constructor. @param cls: Full dotted class name (consists of package, module, and",
"string values. \"\"\" name = constants.STRING_TYPE def __init__(self, minimum=None, maximum=None, pattern=None, options=None, optionsMandatory=None,",
"except ValueError: continue if not transformationSucceeded: raise ValueError(\"Cannot transform value '%s' to persistence",
"except ValueError: continue if not transformationSucceeded: raise ValueError(\"Cannot restore value '%s' from persistence",
"LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, #SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT",
"\"\"\" pass def fromPersistenceFormat(self, persistedValue): \"\"\" Restores the value from the persistence layer",
"_log.warning(message) return UnknownDomainObject def _validate(self, value): \"\"\" Delegates the validation to the actual",
"except TypeError: raise ValueError(\"Cannot create domain object '%s' using empty constructor.\" % self.name)",
"for every supported type is tried. \"\"\" if not persistedValue is None: result",
"Represents numeric values. \"\"\" name = constants.NUMBER_TYPE def __init__(self, minimum=None, maximum=None, minDecimalPlaces=None, maxDecimalPlaces=None,",
"silent pylint return value class StringType(BasePropertyType): \"\"\" Represents string values. \"\"\" name =",
"DatetimeType(BasePropertyType): \"\"\" Represents date and time values. \"\"\" name = constants.DATETIME_TYPE def __init__(self,",
"Represents date and time values. \"\"\" name = constants.DATETIME_TYPE def __init__(self, minimum=None, maximum=None,",
"OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY #THEORY OF LIABILITY,",
"ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \"\"\" Provides the supported property types.",
"cls def _handleImportError(self, reason): \"\"\" Common procedure to handle failed domain object imports.",
"\"\"\" @param notNull: Indicates if a values may be C{None} or not. @type",
"is tried. \"\"\" if not value is None: result = None transformationSucceeded =",
"not transformationSucceeded: raise ValueError(\"Cannot transform value '%s' to persistence format.\" % repr(item)) return",
"ListType(BasePropertyType): \"\"\" Represents list of primitive values. \"\"\" name = constants.LIST_TYPE def __init__(self,",
"actual instance. \"\"\" if self._cls != value.__class__: raise ValueError(\"The value '%s' has not",
"self._validate = type_validators.ListValidator(minimum, maximum, subValidators) def toPersistenceFormat(self, value): \"\"\" Ensures that the transformation",
"copy import deepcopy from datetime import datetime from decimal import Decimal import logging",
"options=None, optionsMandatory=None, notNull=False): \"\"\" @see L{DatetimeType.__init__<datafinder.core.configuration. properties.validators.type_validators.DatetimeType.__init__>} for details on restriction parameters. \"\"\"",
"property value. Reason '%s'\" % str(error.args)) except ValueError, error: raise ValueError(\"Invalid property value",
"= maximum self.restrictions[constants.MINIMUM_NUMBER_OF_DECIMAL_PLACES] = minDecimalPlaces self.restrictions[constants.MAXIMUM_NUMBER_OF_DECIMAL_PLACES] = maxDecimalPlaces self.restrictions[constants.OPTIONS] = options self.restrictions[constants.OPTIONS_MANDATORY] =",
"types. \"\"\" name = \"\" def __init__(self, notNull): \"\"\" @param notNull: Indicates if",
"allowedSubtypes=None, minimum=None, maximum=None, notNull=False): \"\"\" @see L{ListType.__init__<datafinder.core.configuration. properties.validators.type_validators.ListType.__init__>} for details on restriction parameters.",
"def _validate(self, value): \"\"\" Delegates the validation to the actual instance. \"\"\" if",
"def __init__(self, minimum=None, maximum=None, pattern=None, options=None, optionsMandatory=None, notNull=False): \"\"\" @see L{StringValidator.__init__<datafinder.core.configuration. properties.validators.type_validators.StringValidator.__init__>} for",
"# #Redistribution and use in source and binary forms, with or without #",
"persistedValue): \"\"\" Restores the value from the persistence layer format. @raise ValueError: Indicates",
"the persistence layer format. @raise ValueError: Indicates problems during value transformation. \"\"\" self",
"the property type creation. Other approaches would \"blow up\" the code here. \"\"\"",
"= optionsMandatory self._validate = type_validators.StringValidator(minimum, maximum, pattern, options, optionsMandatory) class BooleanType(BasePropertyType): \"\"\" Represents",
"against the defined restrictions. Calls C{_validate} to perform concrete validation. @raise ValueError: indicates",
"descriptor, subValue in value.walk(): result[name] = descriptor.type.toPersistenceFormat(subValue) except AttributeError: raise ValueError(\"The value '%s'",
"if a values may be C{None} or not. @type notNull: C{bool} \"\"\" self.restrictions",
"\"\"\" BasePropertyType.__init__(self, notNull) self.restrictions[constants.MINIMUM_VALUE] = minimum self.restrictions[constants.MAXIMUM_VALUE] = maximum self.restrictions[constants.OPTIONS] = options self.restrictions[constants.OPTIONS_MANDATORY]",
"return cls def _handleImportError(self, reason): \"\"\" Common procedure to handle failed domain object",
"IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \"\"\" Provides the supported property",
"IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # \"AS IS\" AND ANY",
"method for concrete validation within a sub class. \"\"\" pass def fromPersistenceFormat(self, persistedValue):",
"Decimal import logging from datafinder.core.configuration.properties import constants from datafinder.core.configuration.properties import domain from datafinder.core.configuration.properties.validators",
"L{NumberType.__init__<datafinder.core.configuration. properties.validators.type_validators.NumberType.__init__>} for details on restriction parameters. \"\"\" BasePropertyType.__init__(self, notNull) self.restrictions[constants.MINIMUM_VALUE] = minimum",
"fromPersistenceFormat(self, persistedValue): \"\"\" Restores the domain object from the given dictionary. \"\"\" if",
"approaches would \"blow up\" the code here. \"\"\" # pylint: disable=W0142 if propertyTypeName",
"disable=W0142 if propertyTypeName in _propertyNameClassMap: try: return _propertyNameClassMap[propertyTypeName](**restrictions) except TypeError: raise ConfigurationError(\"Restrictions for",
"this list of conditions and the following disclaimer. # # * Redistributions in",
"Ensures that the transformation for every supported type is tried. \"\"\" if not",
"values. \"\"\" name = \"\" # Here you find the concrete class identifier",
"errors. \"\"\" if not value is None: self._validate(value) else: if self.notNull: raise ValueError(\"Value",
"restriction parameters. \"\"\" BasePropertyType.__init__(self, notNull) self.restrictions[constants.MINIMUM_VALUE] = minimum self.restrictions[constants.MAXIMUM_VALUE] = maximum self.restrictions[constants.OPTIONS] =",
"+ \"correct the configuration.\") if not value is None: if self._cls != value.__class__:",
"unspecific property type. \"\"\" name = constants.ANY_TYPE def __init__(self, allowedTypes=None, notNull=False): \"\"\" Constructor.",
"list() if allowedSubtypes is None: self._allowedSubtypes = list() self._allowedSubtypes.append(StringType()) self._allowedSubtypes.append(NumberType()) self._allowedSubtypes.append(BooleanType()) self._allowedSubtypes.append(DatetimeType()) self._allowedSubtypes.append(DomainObjectType())",
"\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #LIMITED TO,",
"maxDecimalPlaces, options, optionsMandatory) class DatetimeType(BasePropertyType): \"\"\" Represents date and time values. \"\"\" name",
"= _propertyNameClassMap.keys()[:] def createPropertyType(propertyTypeName, restrictions=dict()): \"\"\" Factory method for property type creation. @param",
"maximum=None, minDecimalPlaces=None, maxDecimalPlaces=None, options=None, optionsMandatory=None, notNull=False): \"\"\" @see L{NumberType.__init__<datafinder.core.configuration. properties.validators.type_validators.NumberType.__init__>} for details on",
"properties self.representation = str(theDict) class DomainObjectType(BasePropertyType): \"\"\" Represents a object values. \"\"\" name",
"#SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT #LIMITED TO, PROCUREMENT OF SUBSTITUTE",
"\"\"\" Represents string values. \"\"\" name = constants.STRING_TYPE def __init__(self, minimum=None, maximum=None, pattern=None,",
"= typeName break if typeDisplayName is None: typeDisplayName = \\ \"%s.%s\" % (value.__class__.__module__,",
"* Neither the name of the German Aerospace Center nor the names of",
"self.restrictions[constants.MINIMUM_NUMBER_OF_DECIMAL_PLACES] = minDecimalPlaces self.restrictions[constants.MAXIMUM_NUMBER_OF_DECIMAL_PLACES] = maxDecimalPlaces self.restrictions[constants.OPTIONS] = options self.restrictions[constants.OPTIONS_MANDATORY] = optionsMandatory self._validate",
"for every list item is performed. \"\"\" if not value is None: result",
"Factory method for property type creation. @param propertyTypeName: Name of the property type.",
"BUT NOT #LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR #A",
"pylint: disable=W0142 if propertyTypeName in _propertyNameClassMap: try: return _propertyNameClassMap[propertyTypeName](**restrictions) except TypeError: raise ConfigurationError(\"Restrictions",
"class. \"\"\" pass def fromPersistenceFormat(self, persistedValue): \"\"\" Restores the value from the persistence",
"#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR #A PARTICULAR PURPOSE",
"could not be loaded. \"\"\" # Used to have a nice representation of",
"result def __deepcopy__(self, _): return AnyType(deepcopy(self._allowedTypes), self.notNull) class UnknownDomainObject(domain.DomainObject): \"\"\" Used to represent",
"and time values. \"\"\" name = constants.DATETIME_TYPE def __init__(self, minimum=None, maximum=None, options=None, optionsMandatory=None,",
"would \"blow up\" the code here. \"\"\" # pylint: disable=W0142 if propertyTypeName in",
"result def fromPersistenceFormat(self, persistedValue): \"\"\" Ensures that the transformation for every supported type",
"options, optionsMandatory) class ListType(BasePropertyType): \"\"\" Represents list of primitive values. \"\"\" name =",
"type_validators.NumberValidator(minimum, maximum, minDecimalPlaces, maxDecimalPlaces, options, optionsMandatory) class DatetimeType(BasePropertyType): \"\"\" Represents date and time",
"theDict): domain.DomainObject.__init__(self) self.theDict = theDict # Used to allow access to the properties",
"= type_validators.ListValidator(minimum, maximum, subValidators) def toPersistenceFormat(self, value): \"\"\" Ensures that the transformation for",
"documentation and/or other materials provided with the # distribution. # # * Neither",
"= fullDottedClassName[:fullDottedClassName.rfind(\".\")] className = fullDottedClassName[fullDottedClassName.rfind(\".\") + 1:] try: moduleInstance = __import__(fullDottedModuleName, globals(), dict(),",
"'%s' has not the required type '%s'.\" \\ % (str(value), str(self._cls))) result =",
"must reproduce the above copyright # notice, this list of conditions and the",
"= descriptor.type.toPersistenceFormat(subValue) except AttributeError: raise ValueError(\"The value '%s' is no valid domain object.\"",
"domain object.\" % str(value)) return result def fromPersistenceFormat(self, persistedValue): \"\"\" Restores the domain",
"None: self._validate(value) else: if self.notNull: raise ValueError(\"Value must not be None.\") def _validate(self,",
"self.restrictions[constants.OPTIONS] = options self.restrictions[constants.OPTIONS_MANDATORY] = optionsMandatory self._validate = type_validators.NumberValidator(minimum, maximum, minDecimalPlaces, maxDecimalPlaces, options,",
"cls @property def _isValid(self): \"\"\" Indicates whether the domain class has been correctly",
"self.restrictions[constants.MAXIMUM_LENGTH] = maximum self.restrictions[constants.ALLOWED_SUB_TYPES] = list() if allowedSubtypes is None: self._allowedSubtypes = list()",
"THE COPYRIGHT #OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, #SPECIAL,",
"method for property type creation. @param propertyTypeName: Name of the property type. @type",
"ValueError: continue if not transformationSucceeded: raise ValueError(\"Cannot restore value '%s' from persistence format.\"",
"Indicates whether the domain class has been correctly loaded or not. \"\"\" return",
"for typeName, availableTypes in _typeConstantsPythonTypeMap.iteritems(): if valueType in availableTypes: typeDisplayName = typeName break",
"sub class. \"\"\" pass def fromPersistenceFormat(self, persistedValue): \"\"\" Restores the value from the",
"class ListType(BasePropertyType): \"\"\" Represents list of primitive values. \"\"\" name = constants.LIST_TYPE def",
"persistedValue is None: result = list() for item in persistedValue: transformationSucceeded = False",
"copyright # notice, this list of conditions and the following disclaimer. # #",
"= constants.NUMBER_TYPE def __init__(self, minimum=None, maximum=None, minDecimalPlaces=None, maxDecimalPlaces=None, options=None, optionsMandatory=None, notNull=False): \"\"\" @see",
"OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY",
"fullDottedClassName[:fullDottedClassName.rfind(\".\")] className = fullDottedClassName[fullDottedClassName.rfind(\".\") + 1:] try: moduleInstance = __import__(fullDottedModuleName, globals(), dict(), [\"\"])",
"subType.validate(value) result.append(value) transformationSucceeded = True break except ValueError: continue if not transformationSucceeded: raise",
"notNull) self.restrictions[constants.MINIMUM_VALUE] = minimum self.restrictions[constants.MAXIMUM_VALUE] = maximum self.restrictions[constants.OPTIONS] = options self.restrictions[constants.OPTIONS_MANDATORY] = optionsMandatory",
"fullDottedModuleName = fullDottedClassName[:fullDottedClassName.rfind(\".\")] className = fullDottedClassName[fullDottedClassName.rfind(\".\") + 1:] try: moduleInstance = __import__(fullDottedModuleName, globals(),",
"transformation for every supported type is tried. \"\"\" if not persistedValue is None:",
"def toPersistenceFormat(self, value): \"\"\" Transform the domain object into a dictionary. \"\"\" if",
"1:] try: moduleInstance = __import__(fullDottedModuleName, globals(), dict(), [\"\"]) cls = getattr(moduleInstance, className) except",
"def __init__(self, notNull=False): BasePropertyType.__init__(self, notNull) self._validate = type_validators.BooleanValidator() class NumberType(BasePropertyType): \"\"\" Represents numeric",
"result def __deepcopy__(self, _): return ListType(deepcopy(self._allowedSubtypes), self.restrictions[constants.MINIMUM_LENGTH], self.restrictions[constants.MAXIMUM_LENGTH], self.notNull) class AnyType(BasePropertyType): \"\"\" Represents",
"self._allowedTypes.append(StringType()) self._allowedTypes.append(DomainObjectType()) self._allowedTypes.append(ListType()) else: self._allowedTypes = allowedTypes self.restrictions[constants.ALLOWED_SUB_TYPES] = list() subValidators = list()",
"import class '%s'! Got '%s' instead!\" \\ % (fullDottedClassName, cls.__name__)) return cls def",
"OUT OF THE USE #OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY",
"in self._allowedTypes: try: value = subType.fromPersistenceFormat(persistedValue) subType.validate(value) result = value transformationSucceeded = True",
"persistedValue is None: if not isinstance(persistedValue, dict): raise ValueError(\"The persisted value '%s' is",
"software without specific prior written permission. # #THIS SOFTWARE IS PROVIDED BY THE",
"ListType, AnyType.name: AnyType} PROPERTY_TYPE_NAMES = _propertyNameClassMap.keys()[:] def createPropertyType(propertyTypeName, restrictions=dict()): \"\"\" Factory method for",
"item in persistedValue: transformationSucceeded = False for subType in self._allowedSubtypes: try: value =",
"self._cls = cls @property def _isValid(self): \"\"\" Indicates whether the domain class has",
"= {constants.BOOLEAN_TYPE: [bool], constants.DATETIME_TYPE: [datetime], constants.LIST_TYPE: [list], constants.NUMBER_TYPE: [int, float, Decimal], constants.STRING_TYPE: [str,",
"W0142: Here the */** magic is useful to simplify the property type creation.",
"raise ValueError(\"Cannot create domain object '%s' using empty constructor.\" % self.name) else: for",
"C{None} or not. @type notNull: C{bool} \"\"\" self.restrictions = dict() self.notNull = notNull",
"else: for instance, name, descriptor, value in instance.walk(): try: value = descriptor.type.fromPersistenceFormat(persistedValue[name]) except",
"\"\"\" BasePropertyType.__init__(self, notNull) if cls is None: cls = UnknownDomainObject if isinstance(cls, basestring):",
"is None: self._allowedTypes = list() self._allowedTypes.append(BooleanType()) self._allowedTypes.append(NumberType()) self._allowedTypes.append(DatetimeType()) self._allowedTypes.append(StringType()) self._allowedTypes.append(DomainObjectType()) self._allowedTypes.append(ListType()) else: self._allowedTypes",
"values against defined restrictions and performs transformation of values for the persistence layer.",
"AnyType(deepcopy(self._allowedTypes), self.notNull) class UnknownDomainObject(domain.DomainObject): \"\"\" Used to represent values of domain object types",
"self.restrictions[constants.MINIMUM_VALUE] = minimum self.restrictions[constants.MAXIMUM_VALUE] = maximum self.restrictions[constants.MINIMUM_NUMBER_OF_DECIMAL_PLACES] = minDecimalPlaces self.restrictions[constants.MAXIMUM_NUMBER_OF_DECIMAL_PLACES] = maxDecimalPlaces self.restrictions[constants.OPTIONS]",
"break except ValueError: continue if not transformationSucceeded: raise ValueError(\"Cannot restore value '%s' from",
"# pylint: disable=W0142 if propertyTypeName in _propertyNameClassMap: try: return _propertyNameClassMap[propertyTypeName](**restrictions) except TypeError: raise",
"if propertyTypeName in _propertyNameClassMap: try: return _propertyNameClassMap[propertyTypeName](**restrictions) except TypeError: raise ConfigurationError(\"Restrictions for property",
"self.restrictions[constants.MINIMUM_LENGTH], self.restrictions[constants.MAXIMUM_LENGTH], self.notNull) class AnyType(BasePropertyType): \"\"\" Represents an unspecific property type. \"\"\" name",
"self._allowedSubtypes: subValidators.append(subtype.validate) self.restrictions[constants.ALLOWED_SUB_TYPES].append(subtype.name) self._validate = type_validators.ListValidator(minimum, maximum, subValidators) def toPersistenceFormat(self, value): \"\"\" Ensures",
"maximum=None, pattern=None, options=None, optionsMandatory=None, notNull=False): \"\"\" @see L{StringValidator.__init__<datafinder.core.configuration. properties.validators.type_validators.StringValidator.__init__>} for details on restriction",
"self.notNull = notNull def validate(self, value): \"\"\" Performs validation of the value against",
"validate(self, value): \"\"\" Performs validation of the value against the defined restrictions. Calls",
"ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT #OWNER OR CONTRIBUTORS BE LIABLE",
"transformationSucceeded = False for subType in self._allowedSubtypes: try: subType.validate(item) result.append(subType.toPersistenceFormat(item)) transformationSucceeded = True",
"restrictions: Map of restriction parameters and corresponding values. @type restrictions: C{dict} keys: C{unicode},",
"the # documentation and/or other materials provided with the # distribution. # #",
"is None: self._validate(value) else: if self.notNull: raise ValueError(\"Value must not be None.\") def",
"for instance, name, descriptor, value in instance.walk(): try: value = descriptor.type.fromPersistenceFormat(persistedValue[name]) except KeyError:",
"Decimal], constants.STRING_TYPE: [str, unicode]} def determinePropertyTypeConstant(value): \"\"\" Helper function to determine the property",
"constants. @param value: Python object. @type value: C{object} @return: Property type constant. @rtype:",
"OF USE, #DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY",
"try: subType.validate(item) result.append(subType.toPersistenceFormat(item)) transformationSucceeded = True break except ValueError: continue if not transformationSucceeded:",
"dictionary representation = domain.DomainProperty(StringType()) def __init__(self, theDict): domain.DomainObject.__init__(self) self.theDict = theDict # Used",
"layer. \"\"\" from copy import deepcopy from datetime import datetime from decimal import",
"def __deepcopy__(self, _): return ListType(deepcopy(self._allowedSubtypes), self.restrictions[constants.MINIMUM_LENGTH], self.restrictions[constants.MAXIMUM_LENGTH], self.notNull) class AnyType(BasePropertyType): \"\"\" Represents an",
"the required type '%s'.\" \\ % (str(value), str(self._cls))) try: value.validate() except AttributeError, error:",
"the following conditions are #met: # # * Redistributions of source code must",
"following disclaimer. # # * Redistributions in binary form must reproduce the above",
"if isinstance(cls, basestring): self.name = cls self._cls = self._importClass(cls) else: self.name = \"%s.%s\"",
"__init__(self, notNull=False): BasePropertyType.__init__(self, notNull) self._validate = type_validators.BooleanValidator() class NumberType(BasePropertyType): \"\"\" Represents numeric values.",
"@param cls: Full dotted class name (consists of package, module, and class name)",
"raise ConfigurationError(\"Restrictions for property type '%s' are invalid.\" % propertyTypeName) else: return DomainObjectType(propertyTypeName)",
"\" \\ + \"correct the configuration.\") if not value is None: if self._cls",
"pattern self.restrictions[constants.OPTIONS] = options self.restrictions[constants.OPTIONS_MANDATORY] = optionsMandatory self._validate = type_validators.StringValidator(minimum, maximum, pattern, options,",
"IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR #A PARTICULAR PURPOSE ARE DISCLAIMED. IN",
"self.restrictions[constants.ALLOWED_SUB_TYPES] = list() if allowedSubtypes is None: self._allowedSubtypes = list() self._allowedSubtypes.append(StringType()) self._allowedSubtypes.append(NumberType()) self._allowedSubtypes.append(BooleanType())",
"maximum self.restrictions[constants.MINIMUM_NUMBER_OF_DECIMAL_PLACES] = minDecimalPlaces self.restrictions[constants.MAXIMUM_NUMBER_OF_DECIMAL_PLACES] = maxDecimalPlaces self.restrictions[constants.OPTIONS] = options self.restrictions[constants.OPTIONS_MANDATORY] = optionsMandatory",
"value against the defined restrictions. Calls C{_validate} to perform concrete validation. @raise ValueError:",
"def toPersistenceFormat(self, value): \"\"\" Transforms the value to the persistence layer format. @raise",
"maximum self.restrictions[constants.PATTERN] = pattern self.restrictions[constants.OPTIONS] = options self.restrictions[constants.OPTIONS_MANDATORY] = optionsMandatory self._validate = type_validators.StringValidator(minimum,",
"AttributeError: raise ValueError(\"The value '%s' is no valid domain object.\" % str(value)) return",
"% repr(item)) return result def __deepcopy__(self, _): return ListType(deepcopy(self._allowedSubtypes), self.restrictions[constants.MINIMUM_LENGTH], self.restrictions[constants.MAXIMUM_LENGTH], self.notNull) class",
"matches the full dotted class name is returned. @see: L{constants<datafinder.core.configuration.properties.constants>} for property type",
"# # * Neither the name of the German Aerospace Center nor the",
"\"%s.%s\" % (cls.__module__, cls.__name__) self._cls = cls @property def _isValid(self): \"\"\" Indicates whether",
"# silent pylint return persistedValue def toPersistenceFormat(self, value): \"\"\" Transforms the value to",
"subType.fromPersistenceFormat(item) subType.validate(value) result.append(value) transformationSucceeded = True break except ValueError: continue if not transformationSucceeded:",
"datafinder.core.configuration.properties import constants from datafinder.core.configuration.properties import domain from datafinder.core.configuration.properties.validators import base_validators from datafinder.core.configuration.properties.validators",
"type is tried. \"\"\" if not persistedValue is None: result = None transformationSucceeded",
"transformationSucceeded = True break except ValueError: continue if not transformationSucceeded: raise ValueError(\"Cannot restore",
"import logging from datafinder.core.configuration.properties import constants from datafinder.core.configuration.properties import domain from datafinder.core.configuration.properties.validators import",
"optionsMandatory=None, notNull=False): \"\"\" @see L{DatetimeType.__init__<datafinder.core.configuration. properties.validators.type_validators.DatetimeType.__init__>} for details on restriction parameters. \"\"\" BasePropertyType.__init__(self,",
"if not isinstance(persistedValue, dict): raise ValueError(\"The persisted value '%s' is no dictionary.\" %",
"def _handleImportError(self, reason): \"\"\" Common procedure to handle failed domain object imports. \"\"\"",
"problems during value transformation. \"\"\" self = self # silent pylint return persistedValue",
"SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, #DATA, OR PROFITS; OR BUSINESS INTERRUPTION)",
"LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT #(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING",
"given dictionary. \"\"\" if not persistedValue is None: if not isinstance(persistedValue, dict): raise",
"= maximum self.restrictions[constants.PATTERN] = pattern self.restrictions[constants.OPTIONS] = options self.restrictions[constants.OPTIONS_MANDATORY] = optionsMandatory self._validate =",
"instance, name, descriptor, value in instance.walk(): try: value = descriptor.type.fromPersistenceFormat(persistedValue[name]) except KeyError: raise",
"allowedSubtypes is None: self._allowedSubtypes = list() self._allowedSubtypes.append(StringType()) self._allowedSubtypes.append(NumberType()) self._allowedSubtypes.append(BooleanType()) self._allowedSubtypes.append(DatetimeType()) self._allowedSubtypes.append(DomainObjectType()) else: self._allowedSubtypes",
"BasePropertyType.__init__(self, notNull) if cls is None: cls = UnknownDomainObject if isinstance(cls, basestring): self.name",
"object values. \"\"\" name = \"\" # Here you find the concrete class",
"ValueError: continue if not transformationSucceeded: raise ValueError(\"Cannot transform value '%s' to persistence format.\"",
"\"\"\" Tries to import the associated class and raises a configuration error if",
"notice, this list of conditions and the following disclaimer in the # documentation",
"subType in self._allowedTypes: try: subType.validate(value) result = subType.toPersistenceFormat(value) transformationSucceeded = True break except",
"% str(persistedValue)) if not self._isValid: return UnknownDomainObject(persistedValue) try: instance = self._cls() except TypeError:",
"# Here you find the concrete class identifier after initialization def __init__(self, cls=None,",
"BooleanType(BasePropertyType): \"\"\" Represents a boolean values. \"\"\" name = constants.BOOLEAN_TYPE def __init__(self, notNull=False):",
"pylint return value class StringType(BasePropertyType): \"\"\" Represents string values. \"\"\" name = constants.STRING_TYPE",
"__init__(self, minimum=None, maximum=None, minDecimalPlaces=None, maxDecimalPlaces=None, options=None, optionsMandatory=None, notNull=False): \"\"\" @see L{NumberType.__init__<datafinder.core.configuration. properties.validators.type_validators.NumberType.__init__>} for",
"NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE #OF THIS",
"result = None transformationSucceeded = False for subType in self._allowedTypes: try: subType.validate(value) result",
"representation of the dictionary representation = domain.DomainProperty(StringType()) def __init__(self, theDict): domain.DomainObject.__init__(self) self.theDict =",
"Delegates the validation to the actual instance. \"\"\" if self._cls != value.__class__: raise",
"DatetimeType, ListType.name: ListType, AnyType.name: AnyType} PROPERTY_TYPE_NAMES = _propertyNameClassMap.keys()[:] def createPropertyType(propertyTypeName, restrictions=dict()): \"\"\" Factory",
"$Date$ $Committer$ $Revision-Id$ # # Copyright (c) 2003-2011, German Aerospace Center (DLR) #",
"% (self.name, str(persistedValue))) else: setattr(instance, name, value) return instance _propertyNameClassMap = {StringType.name: StringType,",
"NumberType(BasePropertyType): \"\"\" Represents numeric values. \"\"\" name = constants.NUMBER_TYPE def __init__(self, minimum=None, maximum=None,",
"__version__ = \"$Revision-Id:$\" _log = logging.getLogger() class BasePropertyType(object): \"\"\" Base class for all",
"transformationSucceeded: raise ValueError(\"Cannot restore value '%s' from persistence format.\" % repr(persistedValue)) return result",
"class for all property types. \"\"\" name = \"\" def __init__(self, notNull): \"\"\"",
"EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #LIMITED TO, THE IMPLIED WARRANTIES OF",
"if not persistedValue is None: result = list() for item in persistedValue: transformationSucceeded",
"initialization def __init__(self, cls=None, notNull=False): \"\"\" Constructor. @param cls: Full dotted class name",
"subValidators.append(subtype.validate) self.restrictions[constants.ALLOWED_SUB_TYPES].append(subtype.name) self.validate = base_validators.OrValidator(subValidators) def toPersistenceFormat(self, value): \"\"\" Ensures that the transformation",
"= cls @property def _isValid(self): \"\"\" Indicates whether the domain class has been",
"logging.getLogger() class BasePropertyType(object): \"\"\" Base class for all property types. \"\"\" name =",
"notNull=False): \"\"\" @see L{ListType.__init__<datafinder.core.configuration. properties.validators.type_validators.ListType.__init__>} for details on restriction parameters. \"\"\" BasePropertyType.__init__(self, notNull)",
"str(self._cls))) try: value.validate() except AttributeError, error: raise ValueError(\"Cannot validate property value. Reason '%s'\"",
"result def fromPersistenceFormat(self, persistedValue): \"\"\" Ensures that the transformation for every list item",
"name = constants.DATETIME_TYPE def __init__(self, minimum=None, maximum=None, options=None, optionsMandatory=None, notNull=False): \"\"\" @see L{DatetimeType.__init__<datafinder.core.configuration.",
"\"\"\" name = constants.BOOLEAN_TYPE def __init__(self, notNull=False): BasePropertyType.__init__(self, notNull) self._validate = type_validators.BooleanValidator() class",
"import ConfigurationError __version__ = \"$Revision-Id:$\" _log = logging.getLogger() class BasePropertyType(object): \"\"\" Base class",
"object '%s' does not fit defined domain class '%s'.\" % (self.name, str(persistedValue))) else:",
"nice representation of the dictionary representation = domain.DomainProperty(StringType()) def __init__(self, theDict): domain.DomainObject.__init__(self) self.theDict",
"self._allowedSubtypes = allowedSubtypes subValidators = list() for subtype in self._allowedSubtypes: subValidators.append(subtype.validate) self.restrictions[constants.ALLOWED_SUB_TYPES].append(subtype.name) self._validate",
"def fromPersistenceFormat(self, persistedValue): \"\"\" Ensures that the transformation for every supported type is",
"values may be C{None} or not. @type notNull: C{bool} \"\"\" self.restrictions = dict()",
"subType.validate(item) result.append(subType.toPersistenceFormat(item)) transformationSucceeded = True break except ValueError: continue if not transformationSucceeded: raise",
"not self._isValid: return UnknownDomainObject(persistedValue) try: instance = self._cls() except TypeError: raise ValueError(\"Cannot create",
"module, and class name) or a class object. @type cls: C{unicode} or class",
"'%s'.\" % (self.name, str(persistedValue))) else: setattr(instance, name, value) return instance _propertyNameClassMap = {StringType.name:",
"\"\"\" Represents a boolean values. \"\"\" name = constants.BOOLEAN_TYPE def __init__(self, notNull=False): BasePropertyType.__init__(self,",
"'%s' are invalid.\" % propertyTypeName) else: return DomainObjectType(propertyTypeName) _typeConstantsPythonTypeMap = {constants.BOOLEAN_TYPE: [bool], constants.DATETIME_TYPE:",
"value: C{object} @return: Property type constant. @rtype: C{string} \"\"\" typeDisplayName = None valueType",
"class DomainObjectType(BasePropertyType): \"\"\" Represents a object values. \"\"\" name = \"\" # Here",
"else: if self.notNull: raise ValueError(\"Value must not be None.\") def _validate(self, value): \"\"\"",
"raises a configuration error if something goes wrong. \"\"\" fullDottedModuleName = fullDottedClassName[:fullDottedClassName.rfind(\".\")] className",
"BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, #SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,",
"= None transformationSucceeded = False for subType in self._allowedTypes: try: subType.validate(value) result =",
"time values. \"\"\" name = constants.DATETIME_TYPE def __init__(self, minimum=None, maximum=None, options=None, optionsMandatory=None, notNull=False):",
"= descriptor.type.fromPersistenceFormat(persistedValue[name]) except KeyError: raise ValueError( \"Persisted domain object '%s' does not fit",
"* Redistributions in binary form must reproduce the above copyright # notice, this",
"PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, #DATA, OR PROFITS; OR",
"moduleInstance = __import__(fullDottedModuleName, globals(), dict(), [\"\"]) cls = getattr(moduleInstance, className) except (ImportError, AttributeError,",
"property type. @type propertyTypeName: C{unicode} @param restrictions: Map of restriction parameters and corresponding",
"SUCH DAMAGE. \"\"\" Provides the supported property types. A property type allows validation",
"object types whose class could not be loaded. \"\"\" # Used to have",
"whether the domain class has been correctly loaded or not. \"\"\" return self._cls",
"a boolean values. \"\"\" name = constants.BOOLEAN_TYPE def __init__(self, notNull=False): BasePropertyType.__init__(self, notNull) self._validate",
"in instance.walk(): try: value = descriptor.type.fromPersistenceFormat(persistedValue[name]) except KeyError: raise ValueError( \"Persisted domain object",
"= None valueType = type(value) for typeName, availableTypes in _typeConstantsPythonTypeMap.iteritems(): if valueType in",
"persistence format.\" % repr(value)) return result def fromPersistenceFormat(self, persistedValue): \"\"\" Ensures that the",
"# notice, this list of conditions and the following disclaimer in the #",
"[str, unicode]} def determinePropertyTypeConstant(value): \"\"\" Helper function to determine the property type constant",
"Represents a boolean values. \"\"\" name = constants.BOOLEAN_TYPE def __init__(self, notNull=False): BasePropertyType.__init__(self, notNull)",
"values of domain object types whose class could not be loaded. \"\"\" #",
"and raises a configuration error if something goes wrong. \"\"\" fullDottedModuleName = fullDottedClassName[:fullDottedClassName.rfind(\".\")]",
"self._validate = type_validators.DatetimeValidator(minimum, maximum, options, optionsMandatory) class ListType(BasePropertyType): \"\"\" Represents list of primitive",
"except (ImportError, AttributeError, ValueError), error: return self._handleImportError(str(error.args)) if cls.__name__ != className: cls =",
"is None: result = list() for item in value: transformationSucceeded = False for",
"_typeConstantsPythonTypeMap.iteritems(): if valueType in availableTypes: typeDisplayName = typeName break if typeDisplayName is None:",
"dict): raise ValueError(\"The persisted value '%s' is no dictionary.\" % str(persistedValue)) if not",
"return AnyType(deepcopy(self._allowedTypes), self.notNull) class UnknownDomainObject(domain.DomainObject): \"\"\" Used to represent values of domain object",
"__deepcopy__(self, _): return ListType(deepcopy(self._allowedSubtypes), self.restrictions[constants.MINIMUM_LENGTH], self.restrictions[constants.MAXIMUM_LENGTH], self.notNull) class AnyType(BasePropertyType): \"\"\" Represents an unspecific",
"(DLR) # All rights reserved. # #Redistribution and use in source and binary",
"no valid domain object.\" % str(value)) return result def fromPersistenceFormat(self, persistedValue): \"\"\" Restores",
"provided that the following conditions are #met: # # * Redistributions of source",
"list of conditions and the following disclaimer. # # * Redistributions in binary",
"validation to the actual instance. \"\"\" if self._cls != value.__class__: raise ValueError(\"The value",
"createPropertyType(propertyTypeName, restrictions=dict()): \"\"\" Factory method for property type creation. @param propertyTypeName: Name of",
"C{object} @return: Property type constant. @rtype: C{string} \"\"\" typeDisplayName = None valueType =",
"DomainObjectType(BasePropertyType): \"\"\" Represents a object values. \"\"\" name = \"\" # Here you",
"'%s'\" % str(error.args)) def toPersistenceFormat(self, value): \"\"\" Transform the domain object into a",
"@type restrictions: C{dict} keys: C{unicode}, C{object} W0142: Here the */** magic is useful",
"derived # from this software without specific prior written permission. # #THIS SOFTWARE",
"minimum self.restrictions[constants.MAXIMUM_VALUE] = maximum self.restrictions[constants.OPTIONS] = options self.restrictions[constants.OPTIONS_MANDATORY] = optionsMandatory self._validate = type_validators.DatetimeValidator(minimum,",
"a dictionary. \"\"\" if not self._isValid: raise ValueError(\"The domain class could not be",
"domain class could not be found. Please \" \\ + \"correct the configuration.\")",
"__init__(self, notNull): \"\"\" @param notNull: Indicates if a values may be C{None} or",
"= optionsMandatory self._validate = type_validators.NumberValidator(minimum, maximum, minDecimalPlaces, maxDecimalPlaces, options, optionsMandatory) class DatetimeType(BasePropertyType): \"\"\"",
"setattr(instance, name, value) return instance _propertyNameClassMap = {StringType.name: StringType, BooleanType.name: BooleanType, NumberType.name: NumberType,",
"domain object imports. \"\"\" message = \"Cannot import '%s'. Reason: '%s'\" % (self.name,",
"\"\" def __init__(self, notNull): \"\"\" @param notNull: Indicates if a values may be",
"be None.\") def _validate(self, value): \"\"\" Template method for concrete validation within a",
"and use in source and binary forms, with or without # #modification, are",
"ValueError(\"Cannot transform value '%s' to persistence format.\" % repr(item)) return result def fromPersistenceFormat(self,",
"\"\"\" if not value is None: result = list() for item in value:",
"Map of restriction parameters and corresponding values. @type restrictions: C{dict} keys: C{unicode}, C{object}",
"= subType.toPersistenceFormat(value) transformationSucceeded = True break except ValueError: continue if not transformationSucceeded: raise",
"Neither the name of the German Aerospace Center nor the names of #",
"are #met: # # * Redistributions of source code must retain the above",
"IN ANY WAY OUT OF THE USE #OF THIS SOFTWARE, EVEN IF ADVISED",
"THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \"\"\" Provides",
"not persistedValue is None: result = list() for item in persistedValue: transformationSucceeded =",
"permitted provided that the following conditions are #met: # # * Redistributions of",
"logging from datafinder.core.configuration.properties import constants from datafinder.core.configuration.properties import domain from datafinder.core.configuration.properties.validators import base_validators",
"values. \"\"\" name = constants.NUMBER_TYPE def __init__(self, minimum=None, maximum=None, minDecimalPlaces=None, maxDecimalPlaces=None, options=None, optionsMandatory=None,",
"value transformationSucceeded = True break except ValueError: continue if not transformationSucceeded: raise ValueError(\"Cannot",
"self.restrictions[constants.MAXIMUM_VALUE] = maximum self.restrictions[constants.OPTIONS] = options self.restrictions[constants.OPTIONS_MANDATORY] = optionsMandatory self._validate = type_validators.DatetimeValidator(minimum, maximum,",
"restrictions and performs transformation of values for the persistence layer. \"\"\" from copy",
"defined restrictions and performs transformation of values for the persistence layer. \"\"\" from",
"\"\"\" @see L{NumberType.__init__<datafinder.core.configuration. properties.validators.type_validators.NumberType.__init__>} for details on restriction parameters. \"\"\" BasePropertyType.__init__(self, notNull) self.restrictions[constants.MINIMUM_VALUE]",
"not. \"\"\" return self._cls != UnknownDomainObject def _importClass(self, fullDottedClassName): \"\"\" Tries to import",
"@see L{DatetimeType.__init__<datafinder.core.configuration. properties.validators.type_validators.DatetimeType.__init__>} for details on restriction parameters. \"\"\" BasePropertyType.__init__(self, notNull) self.restrictions[constants.MINIMUM_VALUE] =",
"subtype in self._allowedTypes: subValidators.append(subtype.validate) self.restrictions[constants.ALLOWED_SUB_TYPES].append(subtype.name) self.validate = base_validators.OrValidator(subValidators) def toPersistenceFormat(self, value): \"\"\" Ensures",
"associated class and raises a configuration error if something goes wrong. \"\"\" fullDottedModuleName",
"Aerospace Center (DLR) # All rights reserved. # #Redistribution and use in source",
"for subtype in self._allowedTypes: subValidators.append(subtype.validate) self.restrictions[constants.ALLOWED_SUB_TYPES].append(subtype.name) self.validate = base_validators.OrValidator(subValidators) def toPersistenceFormat(self, value): \"\"\"",
"subValidators = list() for subtype in self._allowedSubtypes: subValidators.append(subtype.validate) self.restrictions[constants.ALLOWED_SUB_TYPES].append(subtype.name) self._validate = type_validators.ListValidator(minimum, maximum,",
"be C{None} or not. @type notNull: C{bool} \"\"\" self.restrictions = dict() self.notNull =",
"False for subType in self._allowedSubtypes: try: value = subType.fromPersistenceFormat(item) subType.validate(value) result.append(value) transformationSucceeded =",
"datafinder.core.configuration.properties.validators import type_validators from datafinder.core.error import ConfigurationError __version__ = \"$Revision-Id:$\" _log = logging.getLogger()",
"Here the */** magic is useful to simplify the property type creation. Other",
"binary form must reproduce the above copyright # notice, this list of conditions",
"result[name] = descriptor.type.toPersistenceFormat(subValue) except AttributeError: raise ValueError(\"The value '%s' is no valid domain",
"\"\"\" name = constants.LIST_TYPE def __init__(self, allowedSubtypes=None, minimum=None, maximum=None, notNull=False): \"\"\" @see L{ListType.__init__<datafinder.core.configuration.",
"isinstance(persistedValue, dict): raise ValueError(\"The persisted value '%s' is no dictionary.\" % str(persistedValue)) if",
"MERCHANTABILITY AND FITNESS FOR #A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL",
"a object values. \"\"\" name = \"\" # Here you find the concrete",
"% (self.name, reason) _log.warning(message) return UnknownDomainObject def _validate(self, value): \"\"\" Delegates the validation",
"property type '%s' are invalid.\" % propertyTypeName) else: return DomainObjectType(propertyTypeName) _typeConstantsPythonTypeMap = {constants.BOOLEAN_TYPE:",
"restriction parameters. \"\"\" BasePropertyType.__init__(self, notNull) self.restrictions[constants.MINIMUM_VALUE] = minimum self.restrictions[constants.MAXIMUM_VALUE] = maximum self.restrictions[constants.MINIMUM_NUMBER_OF_DECIMAL_PLACES] =",
"False for subType in self._allowedSubtypes: try: subType.validate(item) result.append(subType.toPersistenceFormat(item)) transformationSucceeded = True break except",
"#THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT #(INCLUDING NEGLIGENCE OR",
"types. A property type allows validation of property values against defined restrictions and",
"self.restrictions[constants.PATTERN] = pattern self.restrictions[constants.OPTIONS] = options self.restrictions[constants.OPTIONS_MANDATORY] = optionsMandatory self._validate = type_validators.StringValidator(minimum, maximum,",
"the associated class and raises a configuration error if something goes wrong. \"\"\"",
"notice, this list of conditions and the following disclaimer. # # * Redistributions",
"\"\"\" self.restrictions = dict() self.notNull = notNull def validate(self, value): \"\"\" Performs validation",
"ValueError(\"Cannot validate property value. Reason '%s'\" % str(error.args)) except ValueError, error: raise ValueError(\"Invalid",
"options self.restrictions[constants.OPTIONS_MANDATORY] = optionsMandatory self._validate = type_validators.StringValidator(minimum, maximum, pattern, options, optionsMandatory) class BooleanType(BasePropertyType):",
"OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE #OF THIS SOFTWARE,",
"constant of the given value. If the no constant matches the full dotted",
"(self.name, str(persistedValue))) else: setattr(instance, name, value) return instance _propertyNameClassMap = {StringType.name: StringType, BooleanType.name:",
"\"\"\" if self._cls != value.__class__: raise ValueError(\"The value '%s' has not the required",
"= constants.LIST_TYPE def __init__(self, allowedSubtypes=None, minimum=None, maximum=None, notNull=False): \"\"\" @see L{ListType.__init__<datafinder.core.configuration. properties.validators.type_validators.ListType.__init__>} for",
"values. \"\"\" name = constants.STRING_TYPE def __init__(self, minimum=None, maximum=None, pattern=None, options=None, optionsMandatory=None, notNull=False):",
"minimum=None, maximum=None, pattern=None, options=None, optionsMandatory=None, notNull=False): \"\"\" @see L{StringValidator.__init__<datafinder.core.configuration. properties.validators.type_validators.StringValidator.__init__>} for details on",
"INCLUDING, BUT NOT #LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR",
"transformation for every list item is performed. \"\"\" if not persistedValue is None:",
"dict(), [\"\"]) cls = getattr(moduleInstance, className) except (ImportError, AttributeError, ValueError), error: return self._handleImportError(str(error.args))",
"raise ValueError(\"The persisted value '%s' is no dictionary.\" % str(persistedValue)) if not self._isValid:",
"\"\"\" name = constants.DATETIME_TYPE def __init__(self, minimum=None, maximum=None, options=None, optionsMandatory=None, notNull=False): \"\"\" @see",
"Indicates if a values may be C{None} or not. @type notNull: C{bool} \"\"\"",
"subType in self._allowedTypes: try: value = subType.fromPersistenceFormat(persistedValue) subType.validate(value) result = value transformationSucceeded =",
"handle failed domain object imports. \"\"\" message = \"Cannot import '%s'. Reason: '%s'\"",
"values. \"\"\" name = constants.LIST_TYPE def __init__(self, allowedSubtypes=None, minimum=None, maximum=None, notNull=False): \"\"\" @see",
"\\ % (str(value), str(self._cls))) result = dict() try: for _, name, descriptor, subValue",
"the domain object into a dictionary. \"\"\" if not self._isValid: raise ValueError(\"The domain",
"supported property types. A property type allows validation of property values against defined",
"\"\"\" Transforms the value to the persistence layer format. @raise ValueError: Indicates problems",
"or a class object. @type cls: C{unicode} or class object \"\"\" BasePropertyType.__init__(self, notNull)",
"list() self._allowedSubtypes.append(StringType()) self._allowedSubtypes.append(NumberType()) self._allowedSubtypes.append(BooleanType()) self._allowedSubtypes.append(DatetimeType()) self._allowedSubtypes.append(DomainObjectType()) else: self._allowedSubtypes = allowedSubtypes subValidators = list()",
"= list() self._allowedTypes.append(BooleanType()) self._allowedTypes.append(NumberType()) self._allowedTypes.append(DatetimeType()) self._allowedTypes.append(StringType()) self._allowedTypes.append(DomainObjectType()) self._allowedTypes.append(ListType()) else: self._allowedTypes = allowedTypes self.restrictions[constants.ALLOWED_SUB_TYPES]",
"contributors may be used to endorse or promote products derived # from this",
"subValidators.append(subtype.validate) self.restrictions[constants.ALLOWED_SUB_TYPES].append(subtype.name) self._validate = type_validators.ListValidator(minimum, maximum, subValidators) def toPersistenceFormat(self, value): \"\"\" Ensures that",
"Helper function to determine the property type constant of the given value. If",
"notNull: Indicates if a values may be C{None} or not. @type notNull: C{bool}",
"@see L{NumberType.__init__<datafinder.core.configuration. properties.validators.type_validators.NumberType.__init__>} for details on restriction parameters. \"\"\" BasePropertyType.__init__(self, notNull) self.restrictions[constants.MINIMUM_VALUE] =",
"fullDottedClassName): \"\"\" Tries to import the associated class and raises a configuration error",
"ANY DIRECT, INDIRECT, INCIDENTAL, #SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT #LIMITED",
"@type propertyTypeName: C{unicode} @param restrictions: Map of restriction parameters and corresponding values. @type",
"type constant. @rtype: C{string} \"\"\" typeDisplayName = None valueType = type(value) for typeName,",
"type_validators.StringValidator(minimum, maximum, pattern, options, optionsMandatory) class BooleanType(BasePropertyType): \"\"\" Represents a boolean values. \"\"\"",
"full dotted class name is returned. @see: L{constants<datafinder.core.configuration.properties.constants>} for property type constants. @param",
"from datetime import datetime from decimal import Decimal import logging from datafinder.core.configuration.properties import",
"class could not be loaded. \"\"\" # Used to have a nice representation",
"Performs validation of the value against the defined restrictions. Calls C{_validate} to perform",
"name = \"\" def __init__(self, notNull): \"\"\" @param notNull: Indicates if a values",
"object \"\"\" BasePropertyType.__init__(self, notNull) if cls is None: cls = UnknownDomainObject if isinstance(cls,",
"BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # \"AS IS\" AND ANY EXPRESS OR",
"ValueError(\"The persisted value '%s' is no dictionary.\" % str(persistedValue)) if not self._isValid: return",
"float, Decimal], constants.STRING_TYPE: [str, unicode]} def determinePropertyTypeConstant(value): \"\"\" Helper function to determine the",
"is None: self._allowedSubtypes = list() self._allowedSubtypes.append(StringType()) self._allowedSubtypes.append(NumberType()) self._allowedSubtypes.append(BooleanType()) self._allowedSubtypes.append(DatetimeType()) self._allowedSubtypes.append(DomainObjectType()) else: self._allowedSubtypes =",
"object imports. \"\"\" message = \"Cannot import '%s'. Reason: '%s'\" % (self.name, reason)",
"'%s' from persistence format.\" % repr(persistedValue)) return result def __deepcopy__(self, _): return AnyType(deepcopy(self._allowedTypes),",
"CONTRACT, STRICT LIABILITY, OR TORT #(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY",
"the validation to the actual instance. \"\"\" if self._cls != value.__class__: raise ValueError(\"The",
"format.\" % repr(item)) return result def fromPersistenceFormat(self, persistedValue): \"\"\" Ensures that the transformation",
"CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, #SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES",
"= {StringType.name: StringType, BooleanType.name: BooleanType, NumberType.name: NumberType, DatetimeType.name: DatetimeType, ListType.name: ListType, AnyType.name: AnyType}",
"value): \"\"\" Transforms the value to the persistence layer format. @raise ValueError: Indicates",
"pylint return persistedValue def toPersistenceFormat(self, value): \"\"\" Transforms the value to the persistence",
"value): \"\"\" Performs validation of the value against the defined restrictions. Calls C{_validate}",
"isinstance(cls, basestring): self.name = cls self._cls = self._importClass(cls) else: self.name = \"%s.%s\" %",
"_, name, descriptor, subValue in value.walk(): result[name] = descriptor.type.toPersistenceFormat(subValue) except AttributeError: raise ValueError(\"The",
"className = fullDottedClassName[fullDottedClassName.rfind(\".\") + 1:] try: moduleInstance = __import__(fullDottedModuleName, globals(), dict(), [\"\"]) cls",
"transformation for every list item is performed. \"\"\" if not value is None:",
"a sub class. \"\"\" pass def fromPersistenceFormat(self, persistedValue): \"\"\" Restores the value from",
"representation = domain.DomainProperty(StringType()) def __init__(self, theDict): domain.DomainObject.__init__(self) self.theDict = theDict # Used to",
"optionsMandatory=None, notNull=False): \"\"\" @see L{NumberType.__init__<datafinder.core.configuration. properties.validators.type_validators.NumberType.__init__>} for details on restriction parameters. \"\"\" BasePropertyType.__init__(self,",
"allowedTypes=None, notNull=False): \"\"\" Constructor. \"\"\" BasePropertyType.__init__(self, notNull) if allowedTypes is None: self._allowedTypes =",
"the no constant matches the full dotted class name is returned. @see: L{constants<datafinder.core.configuration.properties.constants>}",
"in source and binary forms, with or without # #modification, are permitted provided",
"TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, #DATA, OR PROFITS;",
"persistedValue): \"\"\" Ensures that the transformation for every supported type is tried. \"\"\"",
"transformation of values for the persistence layer. \"\"\" from copy import deepcopy from",
"not value is None: result = list() for item in value: transformationSucceeded =",
"cls = getattr(moduleInstance, className) except (ImportError, AttributeError, ValueError), error: return self._handleImportError(str(error.args)) if cls.__name__",
"False for subType in self._allowedTypes: try: subType.validate(value) result = subType.toPersistenceFormat(value) transformationSucceeded = True",
"from the given dictionary. \"\"\" if not persistedValue is None: if not isinstance(persistedValue,",
"PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY #THEORY OF LIABILITY, WHETHER",
"result = list() for item in persistedValue: transformationSucceeded = False for subType in",
"BooleanType, NumberType.name: NumberType, DatetimeType.name: DatetimeType, ListType.name: ListType, AnyType.name: AnyType} PROPERTY_TYPE_NAMES = _propertyNameClassMap.keys()[:] def",
"NOT #LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR #A PARTICULAR",
"the value from the persistence layer format. @raise ValueError: Indicates problems during value",
"instance = self._cls() except TypeError: raise ValueError(\"Cannot create domain object '%s' using empty",
"for details on restriction parameters. \"\"\" BasePropertyType.__init__(self, notNull) self.restrictions[constants.MINIMUM_LENGTH] = minimum self.restrictions[constants.MAXIMUM_LENGTH] =",
"result = list() for item in value: transformationSucceeded = False for subType in",
"that the following conditions are #met: # # * Redistributions of source code",
"'%s'.\" \\ % (str(value), str(self._cls))) try: value.validate() except AttributeError, error: raise ValueError(\"Cannot validate",
"\\ % (fullDottedClassName, cls.__name__)) return cls def _handleImportError(self, reason): \"\"\" Common procedure to",
"from the persistence layer format. @raise ValueError: Indicates problems during value transformation. \"\"\"",
"def __init__(self, minimum=None, maximum=None, options=None, optionsMandatory=None, notNull=False): \"\"\" @see L{DatetimeType.__init__<datafinder.core.configuration. properties.validators.type_validators.DatetimeType.__init__>} for details",
"and class name) or a class object. @type cls: C{unicode} or class object",
"= self # silent pylint return persistedValue def toPersistenceFormat(self, value): \"\"\" Transforms the",
"in availableTypes: typeDisplayName = typeName break if typeDisplayName is None: typeDisplayName = \\",
"the domain class has been correctly loaded or not. \"\"\" return self._cls !=",
"def fromPersistenceFormat(self, persistedValue): \"\"\" Restores the domain object from the given dictionary. \"\"\"",
"'%s'\" % str(error.args)) except ValueError, error: raise ValueError(\"Invalid property value found: '%s'\" %",
"continue if not transformationSucceeded: raise ValueError(\"Cannot restore value '%s' from persistence format.\" %",
"toPersistenceFormat(self, value): \"\"\" Transform the domain object into a dictionary. \"\"\" if not",
"for property type constants. @param value: Python object. @type value: C{object} @return: Property",
"no constant matches the full dotted class name is returned. @see: L{constants<datafinder.core.configuration.properties.constants>} for",
"value): \"\"\" Transform the domain object into a dictionary. \"\"\" if not self._isValid:",
"self = self # silent pylint return persistedValue def toPersistenceFormat(self, value): \"\"\" Transforms",
"except TypeError: raise ConfigurationError(\"Restrictions for property type '%s' are invalid.\" % propertyTypeName) else:",
"to perform concrete validation. @raise ValueError: indicates validation errors. \"\"\" if not value",
"required type '%s'.\" \\ % (str(value), str(self._cls))) try: value.validate() except AttributeError, error: raise",
"True break except ValueError: continue if not transformationSucceeded: raise ValueError(\"Cannot transform value '%s'",
"during value transformation. \"\"\" self = self # silent pylint return persistedValue def",
"value '%s' is no valid domain object.\" % str(value)) return result def fromPersistenceFormat(self,",
"the code here. \"\"\" # pylint: disable=W0142 if propertyTypeName in _propertyNameClassMap: try: return",
"silent pylint return persistedValue def toPersistenceFormat(self, value): \"\"\" Transforms the value to the",
"#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \"\"\"",
"fit defined domain class '%s'.\" % (self.name, str(persistedValue))) else: setattr(instance, name, value) return",
"disclaimer in the # documentation and/or other materials provided with the # distribution.",
"import base_validators from datafinder.core.configuration.properties.validators import type_validators from datafinder.core.error import ConfigurationError __version__ = \"$Revision-Id:$\"",
"STRICT LIABILITY, OR TORT #(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT",
"class name) or a class object. @type cls: C{unicode} or class object \"\"\"",
"= self._handleImportError(\"Failed to import class '%s'! Got '%s' instead!\" \\ % (fullDottedClassName, cls.__name__))",
"\"\"\" BasePropertyType.__init__(self, notNull) self.restrictions[constants.MINIMUM_VALUE] = minimum self.restrictions[constants.MAXIMUM_VALUE] = maximum self.restrictions[constants.MINIMUM_NUMBER_OF_DECIMAL_PLACES] = minDecimalPlaces self.restrictions[constants.MAXIMUM_NUMBER_OF_DECIMAL_PLACES]",
"correctly loaded or not. \"\"\" return self._cls != UnknownDomainObject def _importClass(self, fullDottedClassName): \"\"\"",
"subValue in value.walk(): result[name] = descriptor.type.toPersistenceFormat(subValue) except AttributeError: raise ValueError(\"The value '%s' is",
"value transformation. \"\"\" self = self # silent pylint return value class StringType(BasePropertyType):",
"value = subType.fromPersistenceFormat(item) subType.validate(value) result.append(value) transformationSucceeded = True break except ValueError: continue if",
"maximum self.restrictions[constants.ALLOWED_SUB_TYPES] = list() if allowedSubtypes is None: self._allowedSubtypes = list() self._allowedSubtypes.append(StringType()) self._allowedSubtypes.append(NumberType())",
"break except ValueError: continue if not transformationSucceeded: raise ValueError(\"Cannot transform value '%s' to",
"restriction parameters. \"\"\" BasePropertyType.__init__(self, notNull) self.restrictions[constants.MINIMUM_LENGTH] = minimum self.restrictions[constants.MAXIMUM_LENGTH] = maximum self.restrictions[constants.PATTERN] =",
"the transformation for every list item is performed. \"\"\" if not persistedValue is",
"persisted value '%s' is no dictionary.\" % str(persistedValue)) if not self._isValid: return UnknownDomainObject(persistedValue)",
"error if something goes wrong. \"\"\" fullDottedModuleName = fullDottedClassName[:fullDottedClassName.rfind(\".\")] className = fullDottedClassName[fullDottedClassName.rfind(\".\") +",
"validation of property values against defined restrictions and performs transformation of values for",
"prior written permission. # #THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND",
"may be C{None} or not. @type notNull: C{bool} \"\"\" self.restrictions = dict() self.notNull",
"= maximum self.restrictions[constants.ALLOWED_SUB_TYPES] = list() if allowedSubtypes is None: self._allowedSubtypes = list() self._allowedSubtypes.append(StringType())",
"Last Changed: $Date$ $Committer$ $Revision-Id$ # # Copyright (c) 2003-2011, German Aerospace Center",
"the required type '%s'.\" \\ % (str(value), str(self._cls))) result = dict() try: for",
"Provides the supported property types. A property type allows validation of property values",
"type '%s'.\" \\ % (str(value), str(self._cls))) try: value.validate() except AttributeError, error: raise ValueError(\"Cannot",
"\"\"\" @see L{StringValidator.__init__<datafinder.core.configuration. properties.validators.type_validators.StringValidator.__init__>} for details on restriction parameters. \"\"\" BasePropertyType.__init__(self, notNull) self.restrictions[constants.MINIMUM_LENGTH]",
"# Used to allow access to the properties self.representation = str(theDict) class DomainObjectType(BasePropertyType):",
"@raise ValueError: Indicates problems during value transformation. \"\"\" self = self # silent",
"ValueError: Indicates problems during value transformation. \"\"\" self = self # silent pylint",
"AttributeError, ValueError), error: return self._handleImportError(str(error.args)) if cls.__name__ != className: cls = self._handleImportError(\"Failed to",
"SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \"\"\" Provides the",
"_propertyNameClassMap.keys()[:] def createPropertyType(propertyTypeName, restrictions=dict()): \"\"\" Factory method for property type creation. @param propertyTypeName:",
"the defined restrictions. Calls C{_validate} to perform concrete validation. @raise ValueError: indicates validation",
"Property type constant. @rtype: C{string} \"\"\" typeDisplayName = None valueType = type(value) for",
"# # * Redistributions in binary form must reproduce the above copyright #",
"theDict # Used to allow access to the properties self.representation = str(theDict) class",
"notNull def validate(self, value): \"\"\" Performs validation of the value against the defined",
"on restriction parameters. \"\"\" BasePropertyType.__init__(self, notNull) self.restrictions[constants.MINIMUM_LENGTH] = minimum self.restrictions[constants.MAXIMUM_LENGTH] = maximum self.restrictions[constants.ALLOWED_SUB_TYPES]",
"allow access to the properties self.representation = str(theDict) class DomainObjectType(BasePropertyType): \"\"\" Represents a",
"no dictionary.\" % str(persistedValue)) if not self._isValid: return UnknownDomainObject(persistedValue) try: instance = self._cls()",
"of the property type. @type propertyTypeName: C{unicode} @param restrictions: Map of restriction parameters",
"WAY OUT OF THE USE #OF THIS SOFTWARE, EVEN IF ADVISED OF THE",
"class could not be found. Please \" \\ + \"correct the configuration.\") if",
"self.restrictions[constants.MAXIMUM_NUMBER_OF_DECIMAL_PLACES] = maxDecimalPlaces self.restrictions[constants.OPTIONS] = options self.restrictions[constants.OPTIONS_MANDATORY] = optionsMandatory self._validate = type_validators.NumberValidator(minimum, maximum,",
"subType.fromPersistenceFormat(persistedValue) subType.validate(value) result = value transformationSucceeded = True break except ValueError: continue if",
"name, descriptor, subValue in value.walk(): result[name] = descriptor.type.toPersistenceFormat(subValue) except AttributeError: raise ValueError(\"The value",
"= pattern self.restrictions[constants.OPTIONS] = options self.restrictions[constants.OPTIONS_MANDATORY] = optionsMandatory self._validate = type_validators.StringValidator(minimum, maximum, pattern,",
"@see: L{constants<datafinder.core.configuration.properties.constants>} for property type constants. @param value: Python object. @type value: C{object}",
"self._allowedSubtypes = list() self._allowedSubtypes.append(StringType()) self._allowedSubtypes.append(NumberType()) self._allowedSubtypes.append(BooleanType()) self._allowedSubtypes.append(DatetimeType()) self._allowedSubtypes.append(DomainObjectType()) else: self._allowedSubtypes = allowedSubtypes subValidators",
"typeName, availableTypes in _typeConstantsPythonTypeMap.iteritems(): if valueType in availableTypes: typeDisplayName = typeName break if",
"self.restrictions[constants.OPTIONS_MANDATORY] = optionsMandatory self._validate = type_validators.DatetimeValidator(minimum, maximum, options, optionsMandatory) class ListType(BasePropertyType): \"\"\" Represents",
"name) or a class object. @type cls: C{unicode} or class object \"\"\" BasePropertyType.__init__(self,",
"is no dictionary.\" % str(persistedValue)) if not self._isValid: return UnknownDomainObject(persistedValue) try: instance =",
"CONTRIBUTORS # \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT",
"self._allowedSubtypes.append(DatetimeType()) self._allowedSubtypes.append(DomainObjectType()) else: self._allowedSubtypes = allowedSubtypes subValidators = list() for subtype in self._allowedSubtypes:",
"= fullDottedClassName[fullDottedClassName.rfind(\".\") + 1:] try: moduleInstance = __import__(fullDottedModuleName, globals(), dict(), [\"\"]) cls =",
"domain object from the given dictionary. \"\"\" if not persistedValue is None: if",
"= True break except ValueError: continue if not transformationSucceeded: raise ValueError(\"Cannot transform value",
"maximum, options, optionsMandatory) class ListType(BasePropertyType): \"\"\" Represents list of primitive values. \"\"\" name",
"its contributors may be used to endorse or promote products derived # from",
"= list() for item in value: transformationSucceeded = False for subType in self._allowedSubtypes:",
"__init__(self, minimum=None, maximum=None, pattern=None, options=None, optionsMandatory=None, notNull=False): \"\"\" @see L{StringValidator.__init__<datafinder.core.configuration. properties.validators.type_validators.StringValidator.__init__>} for details",
"if not persistedValue is None: result = None transformationSucceeded = False for subType",
"def __init__(self, cls=None, notNull=False): \"\"\" Constructor. @param cls: Full dotted class name (consists",
"validate property value. Reason '%s'\" % str(error.args)) except ValueError, error: raise ValueError(\"Invalid property",
"use in source and binary forms, with or without # #modification, are permitted",
"self # silent pylint return value class StringType(BasePropertyType): \"\"\" Represents string values. \"\"\"",
"self._allowedSubtypes: try: value = subType.fromPersistenceFormat(item) subType.validate(value) result.append(value) transformationSucceeded = True break except ValueError:",
"All rights reserved. # #Redistribution and use in source and binary forms, with",
"minDecimalPlaces, maxDecimalPlaces, options, optionsMandatory) class DatetimeType(BasePropertyType): \"\"\" Represents date and time values. \"\"\"",
"name = constants.STRING_TYPE def __init__(self, minimum=None, maximum=None, pattern=None, options=None, optionsMandatory=None, notNull=False): \"\"\" @see",
"properties.validators.type_validators.DatetimeType.__init__>} for details on restriction parameters. \"\"\" BasePropertyType.__init__(self, notNull) self.restrictions[constants.MINIMUM_VALUE] = minimum self.restrictions[constants.MAXIMUM_VALUE]",
"into a dictionary. \"\"\" if not self._isValid: raise ValueError(\"The domain class could not",
"Center (DLR) # All rights reserved. # #Redistribution and use in source and",
"of domain object types whose class could not be loaded. \"\"\" # Used",
"self._cls != UnknownDomainObject def _importClass(self, fullDottedClassName): \"\"\" Tries to import the associated class",
"notNull=False): \"\"\" Constructor. \"\"\" BasePropertyType.__init__(self, notNull) if allowedTypes is None: self._allowedTypes = list()",
"cls.__name__) self._cls = cls @property def _isValid(self): \"\"\" Indicates whether the domain class",
"following conditions are #met: # # * Redistributions of source code must retain",
"INDIRECT, INCIDENTAL, #SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT #LIMITED TO, PROCUREMENT",
"\"\"\" Performs validation of the value against the defined restrictions. Calls C{_validate} to",
"something goes wrong. \"\"\" fullDottedModuleName = fullDottedClassName[:fullDottedClassName.rfind(\".\")] className = fullDottedClassName[fullDottedClassName.rfind(\".\") + 1:] try:",
"self._allowedTypes.append(ListType()) else: self._allowedTypes = allowedTypes self.restrictions[constants.ALLOWED_SUB_TYPES] = list() subValidators = list() for subtype",
"self._allowedTypes: try: subType.validate(value) result = subType.toPersistenceFormat(value) transformationSucceeded = True break except ValueError: continue",
"keys: C{unicode}, C{object} W0142: Here the */** magic is useful to simplify the",
"(ImportError, AttributeError, ValueError), error: return self._handleImportError(str(error.args)) if cls.__name__ != className: cls = self._handleImportError(\"Failed",
"class UnknownDomainObject(domain.DomainObject): \"\"\" Used to represent values of domain object types whose class",
"details on restriction parameters. \"\"\" BasePropertyType.__init__(self, notNull) self.restrictions[constants.MINIMUM_LENGTH] = minimum self.restrictions[constants.MAXIMUM_LENGTH] = maximum",
"find the concrete class identifier after initialization def __init__(self, cls=None, notNull=False): \"\"\" Constructor.",
"\"\"\" @see L{DatetimeType.__init__<datafinder.core.configuration. properties.validators.type_validators.DatetimeType.__init__>} for details on restriction parameters. \"\"\" BasePropertyType.__init__(self, notNull) self.restrictions[constants.MINIMUM_VALUE]",
"not self._isValid: raise ValueError(\"The domain class could not be found. Please \" \\",
"C{object} W0142: Here the */** magic is useful to simplify the property type",
"TORT #(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE",
"domain.DomainObject.__init__(self) self.theDict = theDict # Used to allow access to the properties self.representation",
"retain the above copyright # notice, this list of conditions and the following",
"CAUSED AND ON ANY #THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR",
"to handle failed domain object imports. \"\"\" message = \"Cannot import '%s'. Reason:",
"for subType in self._allowedSubtypes: try: subType.validate(item) result.append(subType.toPersistenceFormat(item)) transformationSucceeded = True break except ValueError:",
"raise ValueError(\"The value '%s' is no valid domain object.\" % str(value)) return result",
"in self._allowedSubtypes: try: subType.validate(item) result.append(subType.toPersistenceFormat(item)) transformationSucceeded = True break except ValueError: continue if",
"\"\"\" Base class for all property types. \"\"\" name = \"\" def __init__(self,",
"for all property types. \"\"\" name = \"\" def __init__(self, notNull): \"\"\" @param",
"\"\"\" BasePropertyType.__init__(self, notNull) self.restrictions[constants.MINIMUM_LENGTH] = minimum self.restrictions[constants.MAXIMUM_LENGTH] = maximum self.restrictions[constants.ALLOWED_SUB_TYPES] = list() if",
"self._cls = self._importClass(cls) else: self.name = \"%s.%s\" % (cls.__module__, cls.__name__) self._cls = cls",
"all property types. \"\"\" name = \"\" def __init__(self, notNull): \"\"\" @param notNull:",
"object '%s' using empty constructor.\" % self.name) else: for instance, name, descriptor, value",
"notNull) self.restrictions[constants.MINIMUM_LENGTH] = minimum self.restrictions[constants.MAXIMUM_LENGTH] = maximum self.restrictions[constants.PATTERN] = pattern self.restrictions[constants.OPTIONS] = options",
"@param value: Python object. @type value: C{object} @return: Property type constant. @rtype: C{string}",
"cls: C{unicode} or class object \"\"\" BasePropertyType.__init__(self, notNull) if cls is None: cls",
"ListType(deepcopy(self._allowedSubtypes), self.restrictions[constants.MINIMUM_LENGTH], self.restrictions[constants.MAXIMUM_LENGTH], self.notNull) class AnyType(BasePropertyType): \"\"\" Represents an unspecific property type. \"\"\"",
"className) except (ImportError, AttributeError, ValueError), error: return self._handleImportError(str(error.args)) if cls.__name__ != className: cls",
"list() for item in persistedValue: transformationSucceeded = False for subType in self._allowedSubtypes: try:",
"to import the associated class and raises a configuration error if something goes",
"self._validate = type_validators.NumberValidator(minimum, maximum, minDecimalPlaces, maxDecimalPlaces, options, optionsMandatory) class DatetimeType(BasePropertyType): \"\"\" Represents date",
"ValueError(\"The value '%s' is no valid domain object.\" % str(value)) return result def",
"@see L{StringValidator.__init__<datafinder.core.configuration. properties.validators.type_validators.StringValidator.__init__>} for details on restriction parameters. \"\"\" BasePropertyType.__init__(self, notNull) self.restrictions[constants.MINIMUM_LENGTH] =",
"type_validators.DatetimeValidator(minimum, maximum, options, optionsMandatory) class ListType(BasePropertyType): \"\"\" Represents list of primitive values. \"\"\"",
"instead!\" \\ % (fullDottedClassName, cls.__name__)) return cls def _handleImportError(self, reason): \"\"\" Common procedure",
"_log = logging.getLogger() class BasePropertyType(object): \"\"\" Base class for all property types. \"\"\"",
"= \"$Revision-Id:$\" _log = logging.getLogger() class BasePropertyType(object): \"\"\" Base class for all property",
"= optionsMandatory self._validate = type_validators.DatetimeValidator(minimum, maximum, options, optionsMandatory) class ListType(BasePropertyType): \"\"\" Represents list",
"False for subType in self._allowedTypes: try: value = subType.fromPersistenceFormat(persistedValue) subType.validate(value) result = value",
"self._allowedTypes = list() self._allowedTypes.append(BooleanType()) self._allowedTypes.append(NumberType()) self._allowedTypes.append(DatetimeType()) self._allowedTypes.append(StringType()) self._allowedTypes.append(DomainObjectType()) self._allowedTypes.append(ListType()) else: self._allowedTypes = allowedTypes",
"@rtype: C{string} \"\"\" typeDisplayName = None valueType = type(value) for typeName, availableTypes in",
"= False for subType in self._allowedTypes: try: value = subType.fromPersistenceFormat(persistedValue) subType.validate(value) result =",
"return self._handleImportError(str(error.args)) if cls.__name__ != className: cls = self._handleImportError(\"Failed to import class '%s'!",
"subType.validate(value) result = subType.toPersistenceFormat(value) transformationSucceeded = True break except ValueError: continue if not",
"error: raise ValueError(\"Invalid property value found: '%s'\" % str(error.args)) def toPersistenceFormat(self, value): \"\"\"",
"OR SERVICES; LOSS OF USE, #DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED",
"is None: if self._cls != value.__class__: raise ValueError(\"The value '%s' has not the",
"notNull: C{bool} \"\"\" self.restrictions = dict() self.notNull = notNull def validate(self, value): \"\"\"",
"$Filename$ # $Authors$ # Last Changed: $Date$ $Committer$ $Revision-Id$ # # Copyright (c)",
"#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # \"AS IS\"",
"constants.DATETIME_TYPE: [datetime], constants.LIST_TYPE: [list], constants.NUMBER_TYPE: [int, float, Decimal], constants.STRING_TYPE: [str, unicode]} def determinePropertyTypeConstant(value):",
"else: setattr(instance, name, value) return instance _propertyNameClassMap = {StringType.name: StringType, BooleanType.name: BooleanType, NumberType.name:",
"# All rights reserved. # #Redistribution and use in source and binary forms,",
"minimum=None, maximum=None, notNull=False): \"\"\" @see L{ListType.__init__<datafinder.core.configuration. properties.validators.type_validators.ListType.__init__>} for details on restriction parameters. \"\"\"",
"datetime import datetime from decimal import Decimal import logging from datafinder.core.configuration.properties import constants",
"ValueError(\"The value '%s' has not the required type '%s'.\" \\ % (str(value), str(self._cls)))",
"def _isValid(self): \"\"\" Indicates whether the domain class has been correctly loaded or",
"\"\"\" if not value is None: self._validate(value) else: if self.notNull: raise ValueError(\"Value must",
"value): \"\"\" Ensures that the transformation for every list item is performed. \"\"\"",
"= list() for item in persistedValue: transformationSucceeded = False for subType in self._allowedSubtypes:",
"IN CONTRACT, STRICT LIABILITY, OR TORT #(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY",
"notNull=False): \"\"\" @see L{StringValidator.__init__<datafinder.core.configuration. properties.validators.type_validators.StringValidator.__init__>} for details on restriction parameters. \"\"\" BasePropertyType.__init__(self, notNull)",
"in self._allowedSubtypes: try: value = subType.fromPersistenceFormat(item) subType.validate(value) result.append(value) transformationSucceeded = True break except",
"property values against defined restrictions and performs transformation of values for the persistence",
"IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #LIMITED TO, THE",
"is None: result = None transformationSucceeded = False for subType in self._allowedTypes: try:",
"= constants.BOOLEAN_TYPE def __init__(self, notNull=False): BasePropertyType.__init__(self, notNull) self._validate = type_validators.BooleanValidator() class NumberType(BasePropertyType): \"\"\"",
"copyright # notice, this list of conditions and the following disclaimer in the",
"UnknownDomainObject if isinstance(cls, basestring): self.name = cls self._cls = self._importClass(cls) else: self.name =",
"def _validate(self, value): \"\"\" Template method for concrete validation within a sub class.",
"SHALL THE COPYRIGHT #OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,",
"% str(value)) return result def fromPersistenceFormat(self, persistedValue): \"\"\" Restores the domain object from"
] |
[
"6, 1, 4, 1, 27662, 200, 1, 3, 1, 1, 2), IpAddress()).setMaxAccess(\"readonly\") if",
"On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4 # Using Python",
"# Produced by pysmi-0.3.4 at Wed May 1 14:05:04 2019 # On host",
"\"MibScalar\", \"MibTable\", \"MibTableRow\", \"MibTableColumn\", \"Counter32\") MacAddress, DisplayString, TextualConvention, RowStatus, TruthValue = mibBuilder.importSymbols(\"SNMPv2-TC\", \"MacAddress\",",
"\"enterprises\", \"MibScalar\", \"MibTable\", \"MibTableRow\", \"MibTableColumn\", \"Counter32\") MacAddress, DisplayString, TextualConvention, RowStatus, TruthValue = mibBuilder.importSymbols(\"SNMPv2-TC\",",
"14:05:04 2019 # On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4",
"\"iso\", \"ObjectIdentity\", \"IpAddress\", \"Unsigned32\", \"enterprises\", \"MibScalar\", \"MibTable\", \"MibTableRow\", \"MibTableColumn\", \"Counter32\") MacAddress, DisplayString, TextualConvention,",
"\"IpAddress\", \"Unsigned32\", \"enterprises\", \"MibScalar\", \"MibTable\", \"MibTableRow\", \"MibTableColumn\", \"Counter32\") MacAddress, DisplayString, TextualConvention, RowStatus, TruthValue",
"MibIdentifier, NotificationType, Bits, Integer32, iso, ObjectIdentity, IpAddress, Unsigned32, enterprises, MibScalar, MibTable, MibTableRow, MibTableColumn,",
"namedValues = NamedValues((\"unknown\", 0), (\"auto\", 1), (\"fullDulplex10\", 2), (\"halfDulplex10\", 3), (\"fullDulplex100\", 4), (\"halfDulplex100\",",
"lanInfo = ModuleIdentity((1, 3, 6, 1, 4, 1, 27662, 200, 1, 3, 1))",
"6, 1, 4, 1, 27662, 200, 1, 3, 1)) if mibBuilder.loadTexts: lanInfo.setLastUpdated('201305220000Z') if",
"mibBuilder.loadTexts: lanIp.setStatus('current') if mibBuilder.loadTexts: lanIp.setDescription('LAN IP address.') lanSubnetMask = MibScalar((1, 3, 6, 1,",
"lanIp.setDescription('LAN IP address.') lanSubnetMask = MibScalar((1, 3, 6, 1, 4, 1, 27662, 200,",
"18.5.0 by user davwang4 # Using Python version 3.7.3 (default, Mar 27 2019,",
"# PySNMP MIB module LAN (http://snmplabs.com/pysmi) # ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/LAN # Produced by",
"6, 1, 4, 1, 27662, 200, 1, 3, 1, 1)) lanIp = MibScalar((1,",
"1)) if mibBuilder.loadTexts: lanInfo.setLastUpdated('201305220000Z') if mibBuilder.loadTexts: lanInfo.setOrganization('PEPWAVE') if mibBuilder.loadTexts: lanInfo.setContactInfo('') if mibBuilder.loadTexts: lanInfo.setDescription('MIB",
"= mibBuilder.importSymbols(\"SNMPv2-CONF\", \"NotificationGroup\", \"ObjectGroup\", \"ModuleCompliance\") ModuleIdentity, Gauge32, TimeTicks, Counter64, MibIdentifier, NotificationType, Bits, Integer32,",
"3, 6, 1, 4, 1, 27662, 200, 1, 3, 1, 1)) lanIp =",
"1, 1), IpAddress()).setMaxAccess(\"readonly\") if mibBuilder.loadTexts: lanIp.setStatus('current') if mibBuilder.loadTexts: lanIp.setDescription('LAN IP address.') lanSubnetMask =",
"MibIdentifier((1, 3, 6, 1, 4, 1, 27662, 200, 1, 3, 1, 1)) lanIp",
"MibTableRow, MibTableColumn, Counter32 = mibBuilder.importSymbols(\"SNMPv2-SMI\", \"ModuleIdentity\", \"Gauge32\", \"TimeTicks\", \"Counter64\", \"MibIdentifier\", \"NotificationType\", \"Bits\", \"Integer32\",",
"and type.' status = 'current' subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3,",
"description = 'Describe the port speed and type.' status = 'current' subtypeSpec =",
"NamedValues((\"unknown\", 0), (\"auto\", 1), (\"fullDulplex10\", 2), (\"halfDulplex10\", 3), (\"fullDulplex100\", 4), (\"halfDulplex100\", 5), (\"fullDulplex1000\",",
"1), IpAddress()).setMaxAccess(\"readonly\") if mibBuilder.loadTexts: lanIp.setStatus('current') if mibBuilder.loadTexts: lanIp.setDescription('LAN IP address.') lanSubnetMask = MibScalar((1,",
"\"ConstraintsUnion\", \"ValueRangeConstraint\", \"ValueSizeConstraint\", \"SingleValueConstraint\") NotificationGroup, ObjectGroup, ModuleCompliance = mibBuilder.importSymbols(\"SNMPv2-CONF\", \"NotificationGroup\", \"ObjectGroup\", \"ModuleCompliance\") ModuleIdentity,",
"1, 27662)) productMib = MibIdentifier((1, 3, 6, 1, 4, 1, 27662, 200)) generalMib",
"MibIdentifier((1, 3, 6, 1, 4, 1, 27662, 200)) generalMib = MibIdentifier((1, 3, 6,",
"mibBuilder.loadTexts: lanInfo.setDescription('MIB module for LAN.') class PortSpeedType(TextualConvention, Integer32): description = 'Describe the port",
"1, 4, 1, 27662, 200, 1, 3)) lanInfo = ModuleIdentity((1, 3, 6, 1,",
"Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15) # ObjectIdentifier, OctetString, Integer",
"status = 'current' subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5,",
"200, 1, 3)) lanInfo = ModuleIdentity((1, 3, 6, 1, 4, 1, 27662, 200,",
"\"RowStatus\", \"TruthValue\") pepwave = MibIdentifier((1, 3, 6, 1, 4, 1, 27662)) productMib =",
"ValueRangeConstraint, ValueSizeConstraint, SingleValueConstraint = mibBuilder.importSymbols(\"ASN1-REFINEMENT\", \"ConstraintsIntersection\", \"ConstraintsUnion\", \"ValueRangeConstraint\", \"ValueSizeConstraint\", \"SingleValueConstraint\") NotificationGroup, ObjectGroup, ModuleCompliance",
"'current' subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7))",
"= mibBuilder.importSymbols(\"ASN1-ENUMERATION\", \"NamedValues\") ConstraintsIntersection, ConstraintsUnion, ValueRangeConstraint, ValueSizeConstraint, SingleValueConstraint = mibBuilder.importSymbols(\"ASN1-REFINEMENT\", \"ConstraintsIntersection\", \"ConstraintsUnion\", \"ValueRangeConstraint\",",
"1), (\"fullDulplex10\", 2), (\"halfDulplex10\", 3), (\"fullDulplex100\", 4), (\"halfDulplex100\", 5), (\"fullDulplex1000\", 6), (\"halfDulplex1000\", 7))",
"ModuleIdentity((1, 3, 6, 1, 4, 1, 27662, 200, 1, 3, 1)) if mibBuilder.loadTexts:",
"Integer = mibBuilder.importSymbols(\"ASN1\", \"ObjectIdentifier\", \"OctetString\", \"Integer\") NamedValues, = mibBuilder.importSymbols(\"ASN1-ENUMERATION\", \"NamedValues\") ConstraintsIntersection, ConstraintsUnion, ValueRangeConstraint,",
"ObjectIdentity, IpAddress, Unsigned32, enterprises, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter32 = mibBuilder.importSymbols(\"SNMPv2-SMI\", \"ModuleIdentity\", \"Gauge32\",",
"RowStatus, TruthValue = mibBuilder.importSymbols(\"SNMPv2-TC\", \"MacAddress\", \"DisplayString\", \"TextualConvention\", \"RowStatus\", \"TruthValue\") pepwave = MibIdentifier((1, 3,",
"if mibBuilder.loadTexts: lanInfo.setLastUpdated('201305220000Z') if mibBuilder.loadTexts: lanInfo.setOrganization('PEPWAVE') if mibBuilder.loadTexts: lanInfo.setContactInfo('') if mibBuilder.loadTexts: lanInfo.setDescription('MIB module",
"speed and type.' status = 'current' subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1, 2,",
"IP address.') lanSubnetMask = MibScalar((1, 3, 6, 1, 4, 1, 27662, 200, 1,",
"1000baseTx-HD.') mibBuilder.exportSymbols(\"LAN\", pepwave=pepwave, PYSNMP_MODULE_ID=lanInfo, lanStatus=lanStatus, lanInfo=lanInfo, lanIp=lanIp, lanSpeed=lanSpeed, lanMib=lanMib, PortSpeedType=PortSpeedType, generalMib=generalMib, productMib=productMib, lanSubnetMask=lanSubnetMask)",
"5), (\"fullDulplex1000\", 6), (\"halfDulplex1000\", 7)) lanStatus = MibIdentifier((1, 3, 6, 1, 4, 1,",
"MibIdentifier((1, 3, 6, 1, 4, 1, 27662, 200, 1, 3)) lanInfo = ModuleIdentity((1,",
"1 14:05:04 2019 # On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user",
"5, 6, 7)) namedValues = NamedValues((\"unknown\", 0), (\"auto\", 1), (\"fullDulplex10\", 2), (\"halfDulplex10\", 3),",
"1, 3, 1, 1, 2), IpAddress()).setMaxAccess(\"readonly\") if mibBuilder.loadTexts: lanSubnetMask.setStatus('current') if mibBuilder.loadTexts: lanSubnetMask.setDescription('LAN subnet",
"3, 6, 1, 4, 1, 27662, 200, 1)) lanMib = MibIdentifier((1, 3, 6,",
"lanIp = MibScalar((1, 3, 6, 1, 4, 1, 27662, 200, 1, 3, 1,",
"IpAddress()).setMaxAccess(\"readonly\") if mibBuilder.loadTexts: lanSubnetMask.setStatus('current') if mibBuilder.loadTexts: lanSubnetMask.setDescription('LAN subnet mask.') lanSpeed = MibScalar((1, 3,",
"if mibBuilder.loadTexts: lanSubnetMask.setStatus('current') if mibBuilder.loadTexts: lanSubnetMask.setDescription('LAN subnet mask.') lanSpeed = MibScalar((1, 3, 6,",
"at Wed May 1 14:05:04 2019 # On host DAVWANG4-M-1475 platform Darwin version",
"mibBuilder.loadTexts: lanIp.setDescription('LAN IP address.') lanSubnetMask = MibScalar((1, 3, 6, 1, 4, 1, 27662,",
"mibBuilder.loadTexts: lanSubnetMask.setStatus('current') if mibBuilder.loadTexts: lanSubnetMask.setDescription('LAN subnet mask.') lanSpeed = MibScalar((1, 3, 6, 1,",
"(default, Mar 27 2019, 09:23:15) # ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols(\"ASN1\", \"ObjectIdentifier\", \"OctetString\",",
"3), PortSpeedType()).setMaxAccess(\"readonly\") if mibBuilder.loadTexts: lanSpeed.setStatus('current') if mibBuilder.loadTexts: lanSpeed.setDescription('LAN speed status (Auto/10baseT-FD/ 10baseT-HD/100baseTx-FD/100baseTx-HD/1000baseTx-FD/ 1000baseTx-HD.')",
"status (Auto/10baseT-FD/ 10baseT-HD/100baseTx-FD/100baseTx-HD/1000baseTx-FD/ 1000baseTx-HD.') mibBuilder.exportSymbols(\"LAN\", pepwave=pepwave, PYSNMP_MODULE_ID=lanInfo, lanStatus=lanStatus, lanInfo=lanInfo, lanIp=lanIp, lanSpeed=lanSpeed, lanMib=lanMib, PortSpeedType=PortSpeedType,",
"SingleValueConstraint = mibBuilder.importSymbols(\"ASN1-REFINEMENT\", \"ConstraintsIntersection\", \"ConstraintsUnion\", \"ValueRangeConstraint\", \"ValueSizeConstraint\", \"SingleValueConstraint\") NotificationGroup, ObjectGroup, ModuleCompliance = mibBuilder.importSymbols(\"SNMPv2-CONF\",",
"# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4 # Using",
"ConstraintsUnion, ValueRangeConstraint, ValueSizeConstraint, SingleValueConstraint = mibBuilder.importSymbols(\"ASN1-REFINEMENT\", \"ConstraintsIntersection\", \"ConstraintsUnion\", \"ValueRangeConstraint\", \"ValueSizeConstraint\", \"SingleValueConstraint\") NotificationGroup, ObjectGroup,",
"Integer32, iso, ObjectIdentity, IpAddress, Unsigned32, enterprises, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter32 = mibBuilder.importSymbols(\"SNMPv2-SMI\",",
"# ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols(\"ASN1\", \"ObjectIdentifier\", \"OctetString\", \"Integer\") NamedValues, = mibBuilder.importSymbols(\"ASN1-ENUMERATION\", \"NamedValues\")",
"\"OctetString\", \"Integer\") NamedValues, = mibBuilder.importSymbols(\"ASN1-ENUMERATION\", \"NamedValues\") ConstraintsIntersection, ConstraintsUnion, ValueRangeConstraint, ValueSizeConstraint, SingleValueConstraint = mibBuilder.importSymbols(\"ASN1-REFINEMENT\",",
"if mibBuilder.loadTexts: lanInfo.setContactInfo('') if mibBuilder.loadTexts: lanInfo.setDescription('MIB module for LAN.') class PortSpeedType(TextualConvention, Integer32): description",
"1, 3, 1, 1, 1), IpAddress()).setMaxAccess(\"readonly\") if mibBuilder.loadTexts: lanIp.setStatus('current') if mibBuilder.loadTexts: lanIp.setDescription('LAN IP",
"lanStatus = MibIdentifier((1, 3, 6, 1, 4, 1, 27662, 200, 1, 3, 1,",
"200, 1, 3, 1, 1, 1), IpAddress()).setMaxAccess(\"readonly\") if mibBuilder.loadTexts: lanIp.setStatus('current') if mibBuilder.loadTexts: lanIp.setDescription('LAN",
"if mibBuilder.loadTexts: lanSpeed.setStatus('current') if mibBuilder.loadTexts: lanSpeed.setDescription('LAN speed status (Auto/10baseT-FD/ 10baseT-HD/100baseTx-FD/100baseTx-HD/1000baseTx-FD/ 1000baseTx-HD.') mibBuilder.exportSymbols(\"LAN\", pepwave=pepwave,",
"MibTableColumn, Counter32 = mibBuilder.importSymbols(\"SNMPv2-SMI\", \"ModuleIdentity\", \"Gauge32\", \"TimeTicks\", \"Counter64\", \"MibIdentifier\", \"NotificationType\", \"Bits\", \"Integer32\", \"iso\",",
"lanSubnetMask.setDescription('LAN subnet mask.') lanSpeed = MibScalar((1, 3, 6, 1, 4, 1, 27662, 200,",
"\"ObjectIdentity\", \"IpAddress\", \"Unsigned32\", \"enterprises\", \"MibScalar\", \"MibTable\", \"MibTableRow\", \"MibTableColumn\", \"Counter32\") MacAddress, DisplayString, TextualConvention, RowStatus,",
"2), (\"halfDulplex10\", 3), (\"fullDulplex100\", 4), (\"halfDulplex100\", 5), (\"fullDulplex1000\", 6), (\"halfDulplex1000\", 7)) lanStatus =",
"200)) generalMib = MibIdentifier((1, 3, 6, 1, 4, 1, 27662, 200, 1)) lanMib",
"3)) lanInfo = ModuleIdentity((1, 3, 6, 1, 4, 1, 27662, 200, 1, 3,",
"27662, 200, 1, 3, 1, 1, 3), PortSpeedType()).setMaxAccess(\"readonly\") if mibBuilder.loadTexts: lanSpeed.setStatus('current') if mibBuilder.loadTexts:",
"PortSpeedType(TextualConvention, Integer32): description = 'Describe the port speed and type.' status = 'current'",
"TruthValue = mibBuilder.importSymbols(\"SNMPv2-TC\", \"MacAddress\", \"DisplayString\", \"TextualConvention\", \"RowStatus\", \"TruthValue\") pepwave = MibIdentifier((1, 3, 6,",
"by user davwang4 # Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)",
"\"TextualConvention\", \"RowStatus\", \"TruthValue\") pepwave = MibIdentifier((1, 3, 6, 1, 4, 1, 27662)) productMib",
"lanSpeed = MibScalar((1, 3, 6, 1, 4, 1, 27662, 200, 1, 3, 1,",
"\"NotificationType\", \"Bits\", \"Integer32\", \"iso\", \"ObjectIdentity\", \"IpAddress\", \"Unsigned32\", \"enterprises\", \"MibScalar\", \"MibTable\", \"MibTableRow\", \"MibTableColumn\", \"Counter32\")",
"4, 1, 27662, 200, 1, 3, 1)) if mibBuilder.loadTexts: lanInfo.setLastUpdated('201305220000Z') if mibBuilder.loadTexts: lanInfo.setOrganization('PEPWAVE')",
"= ModuleIdentity((1, 3, 6, 1, 4, 1, 27662, 200, 1, 3, 1)) if",
"= mibBuilder.importSymbols(\"SNMPv2-SMI\", \"ModuleIdentity\", \"Gauge32\", \"TimeTicks\", \"Counter64\", \"MibIdentifier\", \"NotificationType\", \"Bits\", \"Integer32\", \"iso\", \"ObjectIdentity\", \"IpAddress\",",
"1, 27662, 200, 1)) lanMib = MibIdentifier((1, 3, 6, 1, 4, 1, 27662,",
"1, 27662, 200)) generalMib = MibIdentifier((1, 3, 6, 1, 4, 1, 27662, 200,",
"if mibBuilder.loadTexts: lanIp.setStatus('current') if mibBuilder.loadTexts: lanIp.setDescription('LAN IP address.') lanSubnetMask = MibScalar((1, 3, 6,",
"ModuleIdentity, Gauge32, TimeTicks, Counter64, MibIdentifier, NotificationType, Bits, Integer32, iso, ObjectIdentity, IpAddress, Unsigned32, enterprises,",
"= mibBuilder.importSymbols(\"SNMPv2-TC\", \"MacAddress\", \"DisplayString\", \"TextualConvention\", \"RowStatus\", \"TruthValue\") pepwave = MibIdentifier((1, 3, 6, 1,",
"1, 2), IpAddress()).setMaxAccess(\"readonly\") if mibBuilder.loadTexts: lanSubnetMask.setStatus('current') if mibBuilder.loadTexts: lanSubnetMask.setDescription('LAN subnet mask.') lanSpeed =",
"Mar 27 2019, 09:23:15) # ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols(\"ASN1\", \"ObjectIdentifier\", \"OctetString\", \"Integer\")",
"subnet mask.') lanSpeed = MibScalar((1, 3, 6, 1, 4, 1, 27662, 200, 1,",
"DisplayString, TextualConvention, RowStatus, TruthValue = mibBuilder.importSymbols(\"SNMPv2-TC\", \"MacAddress\", \"DisplayString\", \"TextualConvention\", \"RowStatus\", \"TruthValue\") pepwave =",
"1, 27662, 200, 1, 3, 1, 1)) lanIp = MibScalar((1, 3, 6, 1,",
"3), (\"fullDulplex100\", 4), (\"halfDulplex100\", 5), (\"fullDulplex1000\", 6), (\"halfDulplex1000\", 7)) lanStatus = MibIdentifier((1, 3,",
"1, 2, 3, 4, 5, 6, 7)) namedValues = NamedValues((\"unknown\", 0), (\"auto\", 1),",
"1, 1, 2), IpAddress()).setMaxAccess(\"readonly\") if mibBuilder.loadTexts: lanSubnetMask.setStatus('current') if mibBuilder.loadTexts: lanSubnetMask.setDescription('LAN subnet mask.') lanSpeed",
"6, 1, 4, 1, 27662, 200, 1, 3, 1, 1, 3), PortSpeedType()).setMaxAccess(\"readonly\") if",
"'Describe the port speed and type.' status = 'current' subtypeSpec = Integer32.subtypeSpec +",
"pysmi-0.3.4 at Wed May 1 14:05:04 2019 # On host DAVWANG4-M-1475 platform Darwin",
"ValueSizeConstraint, SingleValueConstraint = mibBuilder.importSymbols(\"ASN1-REFINEMENT\", \"ConstraintsIntersection\", \"ConstraintsUnion\", \"ValueRangeConstraint\", \"ValueSizeConstraint\", \"SingleValueConstraint\") NotificationGroup, ObjectGroup, ModuleCompliance =",
"mibBuilder.loadTexts: lanSpeed.setStatus('current') if mibBuilder.loadTexts: lanSpeed.setDescription('LAN speed status (Auto/10baseT-FD/ 10baseT-HD/100baseTx-FD/100baseTx-HD/1000baseTx-FD/ 1000baseTx-HD.') mibBuilder.exportSymbols(\"LAN\", pepwave=pepwave, PYSNMP_MODULE_ID=lanInfo,",
"# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/LAN # Produced by pysmi-0.3.4 at Wed May 1 14:05:04",
"= NamedValues((\"unknown\", 0), (\"auto\", 1), (\"fullDulplex10\", 2), (\"halfDulplex10\", 3), (\"fullDulplex100\", 4), (\"halfDulplex100\", 5),",
"file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/LAN # Produced by pysmi-0.3.4 at Wed May 1 14:05:04 2019 # On",
"mask.') lanSpeed = MibScalar((1, 3, 6, 1, 4, 1, 27662, 200, 1, 3,",
"3, 1, 1)) lanIp = MibScalar((1, 3, 6, 1, 4, 1, 27662, 200,",
"7)) namedValues = NamedValues((\"unknown\", 0), (\"auto\", 1), (\"fullDulplex10\", 2), (\"halfDulplex10\", 3), (\"fullDulplex100\", 4),",
"(\"fullDulplex1000\", 6), (\"halfDulplex1000\", 7)) lanStatus = MibIdentifier((1, 3, 6, 1, 4, 1, 27662,",
"\"Counter32\") MacAddress, DisplayString, TextualConvention, RowStatus, TruthValue = mibBuilder.importSymbols(\"SNMPv2-TC\", \"MacAddress\", \"DisplayString\", \"TextualConvention\", \"RowStatus\", \"TruthValue\")",
"\"Integer32\", \"iso\", \"ObjectIdentity\", \"IpAddress\", \"Unsigned32\", \"enterprises\", \"MibScalar\", \"MibTable\", \"MibTableRow\", \"MibTableColumn\", \"Counter32\") MacAddress, DisplayString,",
"Bits, Integer32, iso, ObjectIdentity, IpAddress, Unsigned32, enterprises, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter32 =",
"\"ObjectIdentifier\", \"OctetString\", \"Integer\") NamedValues, = mibBuilder.importSymbols(\"ASN1-ENUMERATION\", \"NamedValues\") ConstraintsIntersection, ConstraintsUnion, ValueRangeConstraint, ValueSizeConstraint, SingleValueConstraint =",
"3, 6, 1, 4, 1, 27662, 200, 1, 3, 1)) if mibBuilder.loadTexts: lanInfo.setLastUpdated('201305220000Z')",
"\"SingleValueConstraint\") NotificationGroup, ObjectGroup, ModuleCompliance = mibBuilder.importSymbols(\"SNMPv2-CONF\", \"NotificationGroup\", \"ObjectGroup\", \"ModuleCompliance\") ModuleIdentity, Gauge32, TimeTicks, Counter64,",
"the port speed and type.' status = 'current' subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0,",
"productMib = MibIdentifier((1, 3, 6, 1, 4, 1, 27662, 200)) generalMib = MibIdentifier((1,",
"3, 6, 1, 4, 1, 27662, 200, 1, 3, 1, 1, 2), IpAddress()).setMaxAccess(\"readonly\")",
"\"Bits\", \"Integer32\", \"iso\", \"ObjectIdentity\", \"IpAddress\", \"Unsigned32\", \"enterprises\", \"MibScalar\", \"MibTable\", \"MibTableRow\", \"MibTableColumn\", \"Counter32\") MacAddress,",
"if mibBuilder.loadTexts: lanInfo.setDescription('MIB module for LAN.') class PortSpeedType(TextualConvention, Integer32): description = 'Describe the",
"09:23:15) # ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols(\"ASN1\", \"ObjectIdentifier\", \"OctetString\", \"Integer\") NamedValues, = mibBuilder.importSymbols(\"ASN1-ENUMERATION\",",
"IpAddress()).setMaxAccess(\"readonly\") if mibBuilder.loadTexts: lanIp.setStatus('current') if mibBuilder.loadTexts: lanIp.setDescription('LAN IP address.') lanSubnetMask = MibScalar((1, 3,",
"\"MibIdentifier\", \"NotificationType\", \"Bits\", \"Integer32\", \"iso\", \"ObjectIdentity\", \"IpAddress\", \"Unsigned32\", \"enterprises\", \"MibScalar\", \"MibTable\", \"MibTableRow\", \"MibTableColumn\",",
"platform Darwin version 18.5.0 by user davwang4 # Using Python version 3.7.3 (default,",
"lanInfo.setOrganization('PEPWAVE') if mibBuilder.loadTexts: lanInfo.setContactInfo('') if mibBuilder.loadTexts: lanInfo.setDescription('MIB module for LAN.') class PortSpeedType(TextualConvention, Integer32):",
"4, 1, 27662, 200)) generalMib = MibIdentifier((1, 3, 6, 1, 4, 1, 27662,",
"6, 1, 4, 1, 27662, 200)) generalMib = MibIdentifier((1, 3, 6, 1, 4,",
"\"Integer\") NamedValues, = mibBuilder.importSymbols(\"ASN1-ENUMERATION\", \"NamedValues\") ConstraintsIntersection, ConstraintsUnion, ValueRangeConstraint, ValueSizeConstraint, SingleValueConstraint = mibBuilder.importSymbols(\"ASN1-REFINEMENT\", \"ConstraintsIntersection\",",
"PortSpeedType()).setMaxAccess(\"readonly\") if mibBuilder.loadTexts: lanSpeed.setStatus('current') if mibBuilder.loadTexts: lanSpeed.setDescription('LAN speed status (Auto/10baseT-FD/ 10baseT-HD/100baseTx-FD/100baseTx-HD/1000baseTx-FD/ 1000baseTx-HD.') mibBuilder.exportSymbols(\"LAN\",",
"= MibIdentifier((1, 3, 6, 1, 4, 1, 27662, 200, 1, 3)) lanInfo =",
"mibBuilder.loadTexts: lanSpeed.setDescription('LAN speed status (Auto/10baseT-FD/ 10baseT-HD/100baseTx-FD/100baseTx-HD/1000baseTx-FD/ 1000baseTx-HD.') mibBuilder.exportSymbols(\"LAN\", pepwave=pepwave, PYSNMP_MODULE_ID=lanInfo, lanStatus=lanStatus, lanInfo=lanInfo, lanIp=lanIp,",
"4, 1, 27662, 200, 1, 3, 1, 1)) lanIp = MibScalar((1, 3, 6,",
"\"Gauge32\", \"TimeTicks\", \"Counter64\", \"MibIdentifier\", \"NotificationType\", \"Bits\", \"Integer32\", \"iso\", \"ObjectIdentity\", \"IpAddress\", \"Unsigned32\", \"enterprises\", \"MibScalar\",",
"if mibBuilder.loadTexts: lanSpeed.setDescription('LAN speed status (Auto/10baseT-FD/ 10baseT-HD/100baseTx-FD/100baseTx-HD/1000baseTx-FD/ 1000baseTx-HD.') mibBuilder.exportSymbols(\"LAN\", pepwave=pepwave, PYSNMP_MODULE_ID=lanInfo, lanStatus=lanStatus, lanInfo=lanInfo,",
"2), IpAddress()).setMaxAccess(\"readonly\") if mibBuilder.loadTexts: lanSubnetMask.setStatus('current') if mibBuilder.loadTexts: lanSubnetMask.setDescription('LAN subnet mask.') lanSpeed = MibScalar((1,",
"version 18.5.0 by user davwang4 # Using Python version 3.7.3 (default, Mar 27",
"Counter64, MibIdentifier, NotificationType, Bits, Integer32, iso, ObjectIdentity, IpAddress, Unsigned32, enterprises, MibScalar, MibTable, MibTableRow,",
"(\"halfDulplex10\", 3), (\"fullDulplex100\", 4), (\"halfDulplex100\", 5), (\"fullDulplex1000\", 6), (\"halfDulplex1000\", 7)) lanStatus = MibIdentifier((1,",
"NamedValues, = mibBuilder.importSymbols(\"ASN1-ENUMERATION\", \"NamedValues\") ConstraintsIntersection, ConstraintsUnion, ValueRangeConstraint, ValueSizeConstraint, SingleValueConstraint = mibBuilder.importSymbols(\"ASN1-REFINEMENT\", \"ConstraintsIntersection\", \"ConstraintsUnion\",",
"4, 1, 27662, 200, 1, 3, 1, 1, 1), IpAddress()).setMaxAccess(\"readonly\") if mibBuilder.loadTexts: lanIp.setStatus('current')",
"200, 1, 3, 1)) if mibBuilder.loadTexts: lanInfo.setLastUpdated('201305220000Z') if mibBuilder.loadTexts: lanInfo.setOrganization('PEPWAVE') if mibBuilder.loadTexts: lanInfo.setContactInfo('')",
"200, 1)) lanMib = MibIdentifier((1, 3, 6, 1, 4, 1, 27662, 200, 1,",
"3, 6, 1, 4, 1, 27662, 200, 1, 3, 1, 1, 1), IpAddress()).setMaxAccess(\"readonly\")",
"\"ModuleCompliance\") ModuleIdentity, Gauge32, TimeTicks, Counter64, MibIdentifier, NotificationType, Bits, Integer32, iso, ObjectIdentity, IpAddress, Unsigned32,",
"MIB module LAN (http://snmplabs.com/pysmi) # ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/LAN # Produced by pysmi-0.3.4 at",
"DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4 # Using Python version 3.7.3",
"= MibIdentifier((1, 3, 6, 1, 4, 1, 27662, 200)) generalMib = MibIdentifier((1, 3,",
"= mibBuilder.importSymbols(\"ASN1\", \"ObjectIdentifier\", \"OctetString\", \"Integer\") NamedValues, = mibBuilder.importSymbols(\"ASN1-ENUMERATION\", \"NamedValues\") ConstraintsIntersection, ConstraintsUnion, ValueRangeConstraint, ValueSizeConstraint,",
"mibBuilder.importSymbols(\"SNMPv2-CONF\", \"NotificationGroup\", \"ObjectGroup\", \"ModuleCompliance\") ModuleIdentity, Gauge32, TimeTicks, Counter64, MibIdentifier, NotificationType, Bits, Integer32, iso,",
"# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15) # ObjectIdentifier, OctetString,",
"= 'Describe the port speed and type.' status = 'current' subtypeSpec = Integer32.subtypeSpec",
"200, 1, 3, 1, 1, 3), PortSpeedType()).setMaxAccess(\"readonly\") if mibBuilder.loadTexts: lanSpeed.setStatus('current') if mibBuilder.loadTexts: lanSpeed.setDescription('LAN",
"iso, ObjectIdentity, IpAddress, Unsigned32, enterprises, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter32 = mibBuilder.importSymbols(\"SNMPv2-SMI\", \"ModuleIdentity\",",
"lanIp.setStatus('current') if mibBuilder.loadTexts: lanIp.setDescription('LAN IP address.') lanSubnetMask = MibScalar((1, 3, 6, 1, 4,",
"Produced by pysmi-0.3.4 at Wed May 1 14:05:04 2019 # On host DAVWANG4-M-1475",
"27662, 200, 1, 3, 1)) if mibBuilder.loadTexts: lanInfo.setLastUpdated('201305220000Z') if mibBuilder.loadTexts: lanInfo.setOrganization('PEPWAVE') if mibBuilder.loadTexts:",
"1, 3, 1)) if mibBuilder.loadTexts: lanInfo.setLastUpdated('201305220000Z') if mibBuilder.loadTexts: lanInfo.setOrganization('PEPWAVE') if mibBuilder.loadTexts: lanInfo.setContactInfo('') if",
"3, 1, 1, 2), IpAddress()).setMaxAccess(\"readonly\") if mibBuilder.loadTexts: lanSubnetMask.setStatus('current') if mibBuilder.loadTexts: lanSubnetMask.setDescription('LAN subnet mask.')",
"\"MacAddress\", \"DisplayString\", \"TextualConvention\", \"RowStatus\", \"TruthValue\") pepwave = MibIdentifier((1, 3, 6, 1, 4, 1,",
"2, 3, 4, 5, 6, 7)) namedValues = NamedValues((\"unknown\", 0), (\"auto\", 1), (\"fullDulplex10\",",
"Wed May 1 14:05:04 2019 # On host DAVWANG4-M-1475 platform Darwin version 18.5.0",
"\"ModuleIdentity\", \"Gauge32\", \"TimeTicks\", \"Counter64\", \"MibIdentifier\", \"NotificationType\", \"Bits\", \"Integer32\", \"iso\", \"ObjectIdentity\", \"IpAddress\", \"Unsigned32\", \"enterprises\",",
"mibBuilder.importSymbols(\"ASN1-ENUMERATION\", \"NamedValues\") ConstraintsIntersection, ConstraintsUnion, ValueRangeConstraint, ValueSizeConstraint, SingleValueConstraint = mibBuilder.importSymbols(\"ASN1-REFINEMENT\", \"ConstraintsIntersection\", \"ConstraintsUnion\", \"ValueRangeConstraint\", \"ValueSizeConstraint\",",
"1, 4, 1, 27662, 200, 1)) lanMib = MibIdentifier((1, 3, 6, 1, 4,",
"4), (\"halfDulplex100\", 5), (\"fullDulplex1000\", 6), (\"halfDulplex1000\", 7)) lanStatus = MibIdentifier((1, 3, 6, 1,",
"ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols(\"ASN1\", \"ObjectIdentifier\", \"OctetString\", \"Integer\") NamedValues, = mibBuilder.importSymbols(\"ASN1-ENUMERATION\", \"NamedValues\") ConstraintsIntersection,",
"user davwang4 # Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15) #",
"+ ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7)) namedValues = NamedValues((\"unknown\", 0),",
"1, 3)) lanInfo = ModuleIdentity((1, 3, 6, 1, 4, 1, 27662, 200, 1,",
"2019 # On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4 #",
"ObjectGroup, ModuleCompliance = mibBuilder.importSymbols(\"SNMPv2-CONF\", \"NotificationGroup\", \"ObjectGroup\", \"ModuleCompliance\") ModuleIdentity, Gauge32, TimeTicks, Counter64, MibIdentifier, NotificationType,",
"3, 4, 5, 6, 7)) namedValues = NamedValues((\"unknown\", 0), (\"auto\", 1), (\"fullDulplex10\", 2),",
"ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7)) namedValues = NamedValues((\"unknown\", 0), (\"auto\",",
"27662, 200, 1)) lanMib = MibIdentifier((1, 3, 6, 1, 4, 1, 27662, 200,",
"\"Unsigned32\", \"enterprises\", \"MibScalar\", \"MibTable\", \"MibTableRow\", \"MibTableColumn\", \"Counter32\") MacAddress, DisplayString, TextualConvention, RowStatus, TruthValue =",
"(\"auto\", 1), (\"fullDulplex10\", 2), (\"halfDulplex10\", 3), (\"fullDulplex100\", 4), (\"halfDulplex100\", 5), (\"fullDulplex1000\", 6), (\"halfDulplex1000\",",
"\"Counter64\", \"MibIdentifier\", \"NotificationType\", \"Bits\", \"Integer32\", \"iso\", \"ObjectIdentity\", \"IpAddress\", \"Unsigned32\", \"enterprises\", \"MibScalar\", \"MibTable\", \"MibTableRow\",",
"subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7)) namedValues",
"enterprises, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter32 = mibBuilder.importSymbols(\"SNMPv2-SMI\", \"ModuleIdentity\", \"Gauge32\", \"TimeTicks\", \"Counter64\", \"MibIdentifier\",",
"1, 4, 1, 27662)) productMib = MibIdentifier((1, 3, 6, 1, 4, 1, 27662,",
"27662, 200, 1, 3, 1, 1, 2), IpAddress()).setMaxAccess(\"readonly\") if mibBuilder.loadTexts: lanSubnetMask.setStatus('current') if mibBuilder.loadTexts:",
"for LAN.') class PortSpeedType(TextualConvention, Integer32): description = 'Describe the port speed and type.'",
"3, 1, 1, 1), IpAddress()).setMaxAccess(\"readonly\") if mibBuilder.loadTexts: lanIp.setStatus('current') if mibBuilder.loadTexts: lanIp.setDescription('LAN IP address.')",
"lanSubnetMask.setStatus('current') if mibBuilder.loadTexts: lanSubnetMask.setDescription('LAN subnet mask.') lanSpeed = MibScalar((1, 3, 6, 1, 4,",
"LAN.') class PortSpeedType(TextualConvention, Integer32): description = 'Describe the port speed and type.' status",
"4, 1, 27662)) productMib = MibIdentifier((1, 3, 6, 1, 4, 1, 27662, 200))",
"PySNMP MIB module LAN (http://snmplabs.com/pysmi) # ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/LAN # Produced by pysmi-0.3.4",
"MibScalar((1, 3, 6, 1, 4, 1, 27662, 200, 1, 3, 1, 1, 3),",
"mibBuilder.importSymbols(\"ASN1\", \"ObjectIdentifier\", \"OctetString\", \"Integer\") NamedValues, = mibBuilder.importSymbols(\"ASN1-ENUMERATION\", \"NamedValues\") ConstraintsIntersection, ConstraintsUnion, ValueRangeConstraint, ValueSizeConstraint, SingleValueConstraint",
"Gauge32, TimeTicks, Counter64, MibIdentifier, NotificationType, Bits, Integer32, iso, ObjectIdentity, IpAddress, Unsigned32, enterprises, MibScalar,",
"3, 1, 1, 3), PortSpeedType()).setMaxAccess(\"readonly\") if mibBuilder.loadTexts: lanSpeed.setStatus('current') if mibBuilder.loadTexts: lanSpeed.setDescription('LAN speed status",
"\"MibTableColumn\", \"Counter32\") MacAddress, DisplayString, TextualConvention, RowStatus, TruthValue = mibBuilder.importSymbols(\"SNMPv2-TC\", \"MacAddress\", \"DisplayString\", \"TextualConvention\", \"RowStatus\",",
"(\"halfDulplex100\", 5), (\"fullDulplex1000\", 6), (\"halfDulplex1000\", 7)) lanStatus = MibIdentifier((1, 3, 6, 1, 4,",
"(\"fullDulplex10\", 2), (\"halfDulplex10\", 3), (\"fullDulplex100\", 4), (\"halfDulplex100\", 5), (\"fullDulplex1000\", 6), (\"halfDulplex1000\", 7)) lanStatus",
"200, 1, 3, 1, 1, 2), IpAddress()).setMaxAccess(\"readonly\") if mibBuilder.loadTexts: lanSubnetMask.setStatus('current') if mibBuilder.loadTexts: lanSubnetMask.setDescription('LAN",
"4, 1, 27662, 200, 1, 3, 1, 1, 3), PortSpeedType()).setMaxAccess(\"readonly\") if mibBuilder.loadTexts: lanSpeed.setStatus('current')",
"27 2019, 09:23:15) # ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols(\"ASN1\", \"ObjectIdentifier\", \"OctetString\", \"Integer\") NamedValues,",
"27662, 200)) generalMib = MibIdentifier((1, 3, 6, 1, 4, 1, 27662, 200, 1))",
"host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4 # Using Python version",
"7)) lanStatus = MibIdentifier((1, 3, 6, 1, 4, 1, 27662, 200, 1, 3,",
"1, 4, 1, 27662, 200, 1, 3, 1, 1, 2), IpAddress()).setMaxAccess(\"readonly\") if mibBuilder.loadTexts:",
"\"ValueRangeConstraint\", \"ValueSizeConstraint\", \"SingleValueConstraint\") NotificationGroup, ObjectGroup, ModuleCompliance = mibBuilder.importSymbols(\"SNMPv2-CONF\", \"NotificationGroup\", \"ObjectGroup\", \"ModuleCompliance\") ModuleIdentity, Gauge32,",
"port speed and type.' status = 'current' subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1,",
"\"MibTable\", \"MibTableRow\", \"MibTableColumn\", \"Counter32\") MacAddress, DisplayString, TextualConvention, RowStatus, TruthValue = mibBuilder.importSymbols(\"SNMPv2-TC\", \"MacAddress\", \"DisplayString\",",
"= MibIdentifier((1, 3, 6, 1, 4, 1, 27662, 200, 1)) lanMib = MibIdentifier((1,",
"Counter32 = mibBuilder.importSymbols(\"SNMPv2-SMI\", \"ModuleIdentity\", \"Gauge32\", \"TimeTicks\", \"Counter64\", \"MibIdentifier\", \"NotificationType\", \"Bits\", \"Integer32\", \"iso\", \"ObjectIdentity\",",
"lanInfo.setLastUpdated('201305220000Z') if mibBuilder.loadTexts: lanInfo.setOrganization('PEPWAVE') if mibBuilder.loadTexts: lanInfo.setContactInfo('') if mibBuilder.loadTexts: lanInfo.setDescription('MIB module for LAN.')",
"1, 4, 1, 27662, 200)) generalMib = MibIdentifier((1, 3, 6, 1, 4, 1,",
"1)) lanMib = MibIdentifier((1, 3, 6, 1, 4, 1, 27662, 200, 1, 3))",
"if mibBuilder.loadTexts: lanInfo.setOrganization('PEPWAVE') if mibBuilder.loadTexts: lanInfo.setContactInfo('') if mibBuilder.loadTexts: lanInfo.setDescription('MIB module for LAN.') class",
"generalMib = MibIdentifier((1, 3, 6, 1, 4, 1, 27662, 200, 1)) lanMib =",
"lanSpeed.setStatus('current') if mibBuilder.loadTexts: lanSpeed.setDescription('LAN speed status (Auto/10baseT-FD/ 10baseT-HD/100baseTx-FD/100baseTx-HD/1000baseTx-FD/ 1000baseTx-HD.') mibBuilder.exportSymbols(\"LAN\", pepwave=pepwave, PYSNMP_MODULE_ID=lanInfo, lanStatus=lanStatus,",
"ConstraintsIntersection, ConstraintsUnion, ValueRangeConstraint, ValueSizeConstraint, SingleValueConstraint = mibBuilder.importSymbols(\"ASN1-REFINEMENT\", \"ConstraintsIntersection\", \"ConstraintsUnion\", \"ValueRangeConstraint\", \"ValueSizeConstraint\", \"SingleValueConstraint\") NotificationGroup,",
"module LAN (http://snmplabs.com/pysmi) # ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/LAN # Produced by pysmi-0.3.4 at Wed",
"ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/LAN # Produced by pysmi-0.3.4 at Wed May 1 14:05:04 2019",
"Darwin version 18.5.0 by user davwang4 # Using Python version 3.7.3 (default, Mar",
"3.7.3 (default, Mar 27 2019, 09:23:15) # ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols(\"ASN1\", \"ObjectIdentifier\",",
"lanSubnetMask = MibScalar((1, 3, 6, 1, 4, 1, 27662, 200, 1, 3, 1,",
"lanSpeed.setDescription('LAN speed status (Auto/10baseT-FD/ 10baseT-HD/100baseTx-FD/100baseTx-HD/1000baseTx-FD/ 1000baseTx-HD.') mibBuilder.exportSymbols(\"LAN\", pepwave=pepwave, PYSNMP_MODULE_ID=lanInfo, lanStatus=lanStatus, lanInfo=lanInfo, lanIp=lanIp, lanSpeed=lanSpeed,",
"MibTable, MibTableRow, MibTableColumn, Counter32 = mibBuilder.importSymbols(\"SNMPv2-SMI\", \"ModuleIdentity\", \"Gauge32\", \"TimeTicks\", \"Counter64\", \"MibIdentifier\", \"NotificationType\", \"Bits\",",
"MibScalar((1, 3, 6, 1, 4, 1, 27662, 200, 1, 3, 1, 1, 1),",
"address.') lanSubnetMask = MibScalar((1, 3, 6, 1, 4, 1, 27662, 200, 1, 3,",
"1, 4, 1, 27662, 200, 1, 3, 1, 1)) lanIp = MibScalar((1, 3,",
"(\"halfDulplex1000\", 7)) lanStatus = MibIdentifier((1, 3, 6, 1, 4, 1, 27662, 200, 1,",
"1, 4, 1, 27662, 200, 1, 3, 1, 1, 1), IpAddress()).setMaxAccess(\"readonly\") if mibBuilder.loadTexts:",
"\"NamedValues\") ConstraintsIntersection, ConstraintsUnion, ValueRangeConstraint, ValueSizeConstraint, SingleValueConstraint = mibBuilder.importSymbols(\"ASN1-REFINEMENT\", \"ConstraintsIntersection\", \"ConstraintsUnion\", \"ValueRangeConstraint\", \"ValueSizeConstraint\", \"SingleValueConstraint\")",
"mibBuilder.loadTexts: lanInfo.setOrganization('PEPWAVE') if mibBuilder.loadTexts: lanInfo.setContactInfo('') if mibBuilder.loadTexts: lanInfo.setDescription('MIB module for LAN.') class PortSpeedType(TextualConvention,",
"1, 27662, 200, 1, 3, 1)) if mibBuilder.loadTexts: lanInfo.setLastUpdated('201305220000Z') if mibBuilder.loadTexts: lanInfo.setOrganization('PEPWAVE') if",
"mibBuilder.loadTexts: lanInfo.setLastUpdated('201305220000Z') if mibBuilder.loadTexts: lanInfo.setOrganization('PEPWAVE') if mibBuilder.loadTexts: lanInfo.setContactInfo('') if mibBuilder.loadTexts: lanInfo.setDescription('MIB module for",
"module for LAN.') class PortSpeedType(TextualConvention, Integer32): description = 'Describe the port speed and",
"mibBuilder.loadTexts: lanInfo.setContactInfo('') if mibBuilder.loadTexts: lanInfo.setDescription('MIB module for LAN.') class PortSpeedType(TextualConvention, Integer32): description =",
"0), (\"auto\", 1), (\"fullDulplex10\", 2), (\"halfDulplex10\", 3), (\"fullDulplex100\", 4), (\"halfDulplex100\", 5), (\"fullDulplex1000\", 6),",
"MacAddress, DisplayString, TextualConvention, RowStatus, TruthValue = mibBuilder.importSymbols(\"SNMPv2-TC\", \"MacAddress\", \"DisplayString\", \"TextualConvention\", \"RowStatus\", \"TruthValue\") pepwave",
"2019, 09:23:15) # ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols(\"ASN1\", \"ObjectIdentifier\", \"OctetString\", \"Integer\") NamedValues, =",
"= 'current' subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6,",
"= mibBuilder.importSymbols(\"ASN1-REFINEMENT\", \"ConstraintsIntersection\", \"ConstraintsUnion\", \"ValueRangeConstraint\", \"ValueSizeConstraint\", \"SingleValueConstraint\") NotificationGroup, ObjectGroup, ModuleCompliance = mibBuilder.importSymbols(\"SNMPv2-CONF\", \"NotificationGroup\",",
"\"DisplayString\", \"TextualConvention\", \"RowStatus\", \"TruthValue\") pepwave = MibIdentifier((1, 3, 6, 1, 4, 1, 27662))",
"= MibIdentifier((1, 3, 6, 1, 4, 1, 27662, 200, 1, 3, 1, 1))",
"\"MibTableRow\", \"MibTableColumn\", \"Counter32\") MacAddress, DisplayString, TextualConvention, RowStatus, TruthValue = mibBuilder.importSymbols(\"SNMPv2-TC\", \"MacAddress\", \"DisplayString\", \"TextualConvention\",",
"lanInfo.setContactInfo('') if mibBuilder.loadTexts: lanInfo.setDescription('MIB module for LAN.') class PortSpeedType(TextualConvention, Integer32): description = 'Describe",
"6, 1, 4, 1, 27662, 200, 1, 3, 1, 1, 1), IpAddress()).setMaxAccess(\"readonly\") if",
"1, 27662, 200, 1, 3, 1, 1, 1), IpAddress()).setMaxAccess(\"readonly\") if mibBuilder.loadTexts: lanIp.setStatus('current') if",
"1, 3, 1, 1)) lanIp = MibScalar((1, 3, 6, 1, 4, 1, 27662,",
"pepwave = MibIdentifier((1, 3, 6, 1, 4, 1, 27662)) productMib = MibIdentifier((1, 3,",
"MibIdentifier((1, 3, 6, 1, 4, 1, 27662)) productMib = MibIdentifier((1, 3, 6, 1,",
"6, 1, 4, 1, 27662)) productMib = MibIdentifier((1, 3, 6, 1, 4, 1,",
"TextualConvention, RowStatus, TruthValue = mibBuilder.importSymbols(\"SNMPv2-TC\", \"MacAddress\", \"DisplayString\", \"TextualConvention\", \"RowStatus\", \"TruthValue\") pepwave = MibIdentifier((1,",
"mibBuilder.loadTexts: lanSubnetMask.setDescription('LAN subnet mask.') lanSpeed = MibScalar((1, 3, 6, 1, 4, 1, 27662,",
"10baseT-HD/100baseTx-FD/100baseTx-HD/1000baseTx-FD/ 1000baseTx-HD.') mibBuilder.exportSymbols(\"LAN\", pepwave=pepwave, PYSNMP_MODULE_ID=lanInfo, lanStatus=lanStatus, lanInfo=lanInfo, lanIp=lanIp, lanSpeed=lanSpeed, lanMib=lanMib, PortSpeedType=PortSpeedType, generalMib=generalMib, productMib=productMib,",
"1, 3), PortSpeedType()).setMaxAccess(\"readonly\") if mibBuilder.loadTexts: lanSpeed.setStatus('current') if mibBuilder.loadTexts: lanSpeed.setDescription('LAN speed status (Auto/10baseT-FD/ 10baseT-HD/100baseTx-FD/100baseTx-HD/1000baseTx-FD/",
"\"TimeTicks\", \"Counter64\", \"MibIdentifier\", \"NotificationType\", \"Bits\", \"Integer32\", \"iso\", \"ObjectIdentity\", \"IpAddress\", \"Unsigned32\", \"enterprises\", \"MibScalar\", \"MibTable\",",
"6, 1, 4, 1, 27662, 200, 1, 3)) lanInfo = ModuleIdentity((1, 3, 6,",
"(Auto/10baseT-FD/ 10baseT-HD/100baseTx-FD/100baseTx-HD/1000baseTx-FD/ 1000baseTx-HD.') mibBuilder.exportSymbols(\"LAN\", pepwave=pepwave, PYSNMP_MODULE_ID=lanInfo, lanStatus=lanStatus, lanInfo=lanInfo, lanIp=lanIp, lanSpeed=lanSpeed, lanMib=lanMib, PortSpeedType=PortSpeedType, generalMib=generalMib,",
"MibIdentifier((1, 3, 6, 1, 4, 1, 27662, 200, 1)) lanMib = MibIdentifier((1, 3,",
"1, 27662, 200, 1, 3)) lanInfo = ModuleIdentity((1, 3, 6, 1, 4, 1,",
"# # PySNMP MIB module LAN (http://snmplabs.com/pysmi) # ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/LAN # Produced",
"version 3.7.3 (default, Mar 27 2019, 09:23:15) # ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols(\"ASN1\",",
"3, 6, 1, 4, 1, 27662, 200, 1, 3, 1, 1, 3), PortSpeedType()).setMaxAccess(\"readonly\")",
"Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7)) namedValues = NamedValues((\"unknown\",",
"Unsigned32, enterprises, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter32 = mibBuilder.importSymbols(\"SNMPv2-SMI\", \"ModuleIdentity\", \"Gauge32\", \"TimeTicks\", \"Counter64\",",
"class PortSpeedType(TextualConvention, Integer32): description = 'Describe the port speed and type.' status =",
"4, 1, 27662, 200, 1)) lanMib = MibIdentifier((1, 3, 6, 1, 4, 1,",
"6), (\"halfDulplex1000\", 7)) lanStatus = MibIdentifier((1, 3, 6, 1, 4, 1, 27662, 200,",
"= Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7)) namedValues =",
"IpAddress, Unsigned32, enterprises, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter32 = mibBuilder.importSymbols(\"SNMPv2-SMI\", \"ModuleIdentity\", \"Gauge32\", \"TimeTicks\",",
"27662)) productMib = MibIdentifier((1, 3, 6, 1, 4, 1, 27662, 200)) generalMib =",
"May 1 14:05:04 2019 # On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by",
"1)) lanIp = MibScalar((1, 3, 6, 1, 4, 1, 27662, 200, 1, 3,",
"NotificationGroup, ObjectGroup, ModuleCompliance = mibBuilder.importSymbols(\"SNMPv2-CONF\", \"NotificationGroup\", \"ObjectGroup\", \"ModuleCompliance\") ModuleIdentity, Gauge32, TimeTicks, Counter64, MibIdentifier,",
"(\"fullDulplex100\", 4), (\"halfDulplex100\", 5), (\"fullDulplex1000\", 6), (\"halfDulplex1000\", 7)) lanStatus = MibIdentifier((1, 3, 6,",
"\"ValueSizeConstraint\", \"SingleValueConstraint\") NotificationGroup, ObjectGroup, ModuleCompliance = mibBuilder.importSymbols(\"SNMPv2-CONF\", \"NotificationGroup\", \"ObjectGroup\", \"ModuleCompliance\") ModuleIdentity, Gauge32, TimeTicks,",
"6, 7)) namedValues = NamedValues((\"unknown\", 0), (\"auto\", 1), (\"fullDulplex10\", 2), (\"halfDulplex10\", 3), (\"fullDulplex100\",",
"3, 6, 1, 4, 1, 27662, 200)) generalMib = MibIdentifier((1, 3, 6, 1,",
"lanMib = MibIdentifier((1, 3, 6, 1, 4, 1, 27662, 200, 1, 3)) lanInfo",
"\"NotificationGroup\", \"ObjectGroup\", \"ModuleCompliance\") ModuleIdentity, Gauge32, TimeTicks, Counter64, MibIdentifier, NotificationType, Bits, Integer32, iso, ObjectIdentity,",
"LAN (http://snmplabs.com/pysmi) # ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/LAN # Produced by pysmi-0.3.4 at Wed May",
"speed status (Auto/10baseT-FD/ 10baseT-HD/100baseTx-FD/100baseTx-HD/1000baseTx-FD/ 1000baseTx-HD.') mibBuilder.exportSymbols(\"LAN\", pepwave=pepwave, PYSNMP_MODULE_ID=lanInfo, lanStatus=lanStatus, lanInfo=lanInfo, lanIp=lanIp, lanSpeed=lanSpeed, lanMib=lanMib,",
"1, 1, 1), IpAddress()).setMaxAccess(\"readonly\") if mibBuilder.loadTexts: lanIp.setStatus('current') if mibBuilder.loadTexts: lanIp.setDescription('LAN IP address.') lanSubnetMask",
"1, 1, 3), PortSpeedType()).setMaxAccess(\"readonly\") if mibBuilder.loadTexts: lanSpeed.setStatus('current') if mibBuilder.loadTexts: lanSpeed.setDescription('LAN speed status (Auto/10baseT-FD/",
"1, 27662, 200, 1, 3, 1, 1, 3), PortSpeedType()).setMaxAccess(\"readonly\") if mibBuilder.loadTexts: lanSpeed.setStatus('current') if",
"by pysmi-0.3.4 at Wed May 1 14:05:04 2019 # On host DAVWANG4-M-1475 platform",
"davwang4 # Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15) # ObjectIdentifier,",
"27662, 200, 1, 3, 1, 1)) lanIp = MibScalar((1, 3, 6, 1, 4,",
"mibBuilder.importSymbols(\"SNMPv2-SMI\", \"ModuleIdentity\", \"Gauge32\", \"TimeTicks\", \"Counter64\", \"MibIdentifier\", \"NotificationType\", \"Bits\", \"Integer32\", \"iso\", \"ObjectIdentity\", \"IpAddress\", \"Unsigned32\",",
"1, 27662, 200, 1, 3, 1, 1, 2), IpAddress()).setMaxAccess(\"readonly\") if mibBuilder.loadTexts: lanSubnetMask.setStatus('current') if",
"MibScalar((1, 3, 6, 1, 4, 1, 27662, 200, 1, 3, 1, 1, 2),",
"3, 6, 1, 4, 1, 27662)) productMib = MibIdentifier((1, 3, 6, 1, 4,",
"27662, 200, 1, 3)) lanInfo = ModuleIdentity((1, 3, 6, 1, 4, 1, 27662,",
"6, 1, 4, 1, 27662, 200, 1)) lanMib = MibIdentifier((1, 3, 6, 1,",
"Integer32): description = 'Describe the port speed and type.' status = 'current' subtypeSpec",
"if mibBuilder.loadTexts: lanIp.setDescription('LAN IP address.') lanSubnetMask = MibScalar((1, 3, 6, 1, 4, 1,",
"1, 4, 1, 27662, 200, 1, 3, 1, 1, 3), PortSpeedType()).setMaxAccess(\"readonly\") if mibBuilder.loadTexts:",
"1, 4, 1, 27662, 200, 1, 3, 1)) if mibBuilder.loadTexts: lanInfo.setLastUpdated('201305220000Z') if mibBuilder.loadTexts:",
"MibScalar, MibTable, MibTableRow, MibTableColumn, Counter32 = mibBuilder.importSymbols(\"SNMPv2-SMI\", \"ModuleIdentity\", \"Gauge32\", \"TimeTicks\", \"Counter64\", \"MibIdentifier\", \"NotificationType\",",
"4, 1, 27662, 200, 1, 3, 1, 1, 2), IpAddress()).setMaxAccess(\"readonly\") if mibBuilder.loadTexts: lanSubnetMask.setStatus('current')",
"27662, 200, 1, 3, 1, 1, 1), IpAddress()).setMaxAccess(\"readonly\") if mibBuilder.loadTexts: lanIp.setStatus('current') if mibBuilder.loadTexts:",
"NotificationType, Bits, Integer32, iso, ObjectIdentity, IpAddress, Unsigned32, enterprises, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter32",
"mibBuilder.importSymbols(\"SNMPv2-TC\", \"MacAddress\", \"DisplayString\", \"TextualConvention\", \"RowStatus\", \"TruthValue\") pepwave = MibIdentifier((1, 3, 6, 1, 4,",
"TimeTicks, Counter64, MibIdentifier, NotificationType, Bits, Integer32, iso, ObjectIdentity, IpAddress, Unsigned32, enterprises, MibScalar, MibTable,",
"\"ObjectGroup\", \"ModuleCompliance\") ModuleIdentity, Gauge32, TimeTicks, Counter64, MibIdentifier, NotificationType, Bits, Integer32, iso, ObjectIdentity, IpAddress,",
"4, 5, 6, 7)) namedValues = NamedValues((\"unknown\", 0), (\"auto\", 1), (\"fullDulplex10\", 2), (\"halfDulplex10\",",
"ModuleCompliance = mibBuilder.importSymbols(\"SNMPv2-CONF\", \"NotificationGroup\", \"ObjectGroup\", \"ModuleCompliance\") ModuleIdentity, Gauge32, TimeTicks, Counter64, MibIdentifier, NotificationType, Bits,",
"type.' status = 'current' subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4,",
"lanInfo.setDescription('MIB module for LAN.') class PortSpeedType(TextualConvention, Integer32): description = 'Describe the port speed",
"= MibIdentifier((1, 3, 6, 1, 4, 1, 27662)) productMib = MibIdentifier((1, 3, 6,",
"1, 3, 1, 1, 3), PortSpeedType()).setMaxAccess(\"readonly\") if mibBuilder.loadTexts: lanSpeed.setStatus('current') if mibBuilder.loadTexts: lanSpeed.setDescription('LAN speed",
"3, 1)) if mibBuilder.loadTexts: lanInfo.setLastUpdated('201305220000Z') if mibBuilder.loadTexts: lanInfo.setOrganization('PEPWAVE') if mibBuilder.loadTexts: lanInfo.setContactInfo('') if mibBuilder.loadTexts:",
"\"ConstraintsIntersection\", \"ConstraintsUnion\", \"ValueRangeConstraint\", \"ValueSizeConstraint\", \"SingleValueConstraint\") NotificationGroup, ObjectGroup, ModuleCompliance = mibBuilder.importSymbols(\"SNMPv2-CONF\", \"NotificationGroup\", \"ObjectGroup\", \"ModuleCompliance\")",
"(http://snmplabs.com/pysmi) # ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/LAN # Produced by pysmi-0.3.4 at Wed May 1",
"200, 1, 3, 1, 1)) lanIp = MibScalar((1, 3, 6, 1, 4, 1,",
"if mibBuilder.loadTexts: lanSubnetMask.setDescription('LAN subnet mask.') lanSpeed = MibScalar((1, 3, 6, 1, 4, 1,",
"source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/LAN # Produced by pysmi-0.3.4 at Wed May 1 14:05:04 2019 #",
"= MibScalar((1, 3, 6, 1, 4, 1, 27662, 200, 1, 3, 1, 1,",
"Python version 3.7.3 (default, Mar 27 2019, 09:23:15) # ObjectIdentifier, OctetString, Integer =",
"mibBuilder.importSymbols(\"ASN1-REFINEMENT\", \"ConstraintsIntersection\", \"ConstraintsUnion\", \"ValueRangeConstraint\", \"ValueSizeConstraint\", \"SingleValueConstraint\") NotificationGroup, ObjectGroup, ModuleCompliance = mibBuilder.importSymbols(\"SNMPv2-CONF\", \"NotificationGroup\", \"ObjectGroup\",",
"4, 1, 27662, 200, 1, 3)) lanInfo = ModuleIdentity((1, 3, 6, 1, 4,",
"1, 1)) lanIp = MibScalar((1, 3, 6, 1, 4, 1, 27662, 200, 1,",
"\"TruthValue\") pepwave = MibIdentifier((1, 3, 6, 1, 4, 1, 27662)) productMib = MibIdentifier((1,",
"OctetString, Integer = mibBuilder.importSymbols(\"ASN1\", \"ObjectIdentifier\", \"OctetString\", \"Integer\") NamedValues, = mibBuilder.importSymbols(\"ASN1-ENUMERATION\", \"NamedValues\") ConstraintsIntersection, ConstraintsUnion,",
"3, 6, 1, 4, 1, 27662, 200, 1, 3)) lanInfo = ModuleIdentity((1, 3,"
] |
[
"render(request,\"login.html\") def logout(request): auth.logout(request) return redirect('login') # ADD USERS def add_users(request): if request.method",
"else: return render(request,\"login.html\") def logout(request): auth.logout(request) return redirect('login') # ADD USERS def add_users(request):",
"import User, auth from django.contrib import messages from django.contrib.auth.forms import UserCreationForm from .forms",
"= request.POST['password'] user = auth.authenticate(username=username, password=password) if user is not None: # User",
"return render(request,\"login.html\") def logout(request): auth.logout(request) return redirect('login') # ADD USERS def add_users(request): if",
"redirect(\"login\") else: return render(request,\"login.html\") def logout(request): auth.logout(request) return redirect('login') # ADD USERS def",
"login_required # Create your views here. def login(request): if request.method == \"POST\": username",
"USERS def add_users(request): if request.method == \"POST\": form = CreateUserForm(request.POST) if form.is_valid(): form.save()",
"auth.logout(request) return redirect('login') # ADD USERS def add_users(request): if request.method == \"POST\": form",
"login(request): if request.method == \"POST\": username = request.POST['username'] password = request.POST['password'] user =",
"not None: # User is authenticated auth.login(request,user) return redirect(\"/\") else: messages.info(request, \"invalid credentials\")",
"from django.shortcuts import redirect, render from django.http import HttpResponse from django.contrib.auth.models import User,",
"auth.authenticate(username=username, password=password) if user is not None: # User is authenticated auth.login(request,user) return",
"User is authenticated auth.login(request,user) return redirect(\"/\") else: messages.info(request, \"invalid credentials\") print(\"Error\") return redirect(\"login\")",
"redirect, render from django.http import HttpResponse from django.contrib.auth.models import User, auth from django.contrib",
"import HttpResponse from django.contrib.auth.models import User, auth from django.contrib import messages from django.contrib.auth.forms",
"# Create your views here. def login(request): if request.method == \"POST\": username =",
"= auth.authenticate(username=username, password=password) if user is not None: # User is authenticated auth.login(request,user)",
"django.contrib.auth.forms import UserCreationForm from .forms import CreateUserForm from django.contrib.auth.decorators import login_required # Create",
"from django.contrib.auth.forms import UserCreationForm from .forms import CreateUserForm from django.contrib.auth.decorators import login_required #",
"if user is not None: # User is authenticated auth.login(request,user) return redirect(\"/\") else:",
"user is not None: # User is authenticated auth.login(request,user) return redirect(\"/\") else: messages.info(request,",
"\"invalid credentials\") print(\"Error\") return redirect(\"login\") else: return render(request,\"login.html\") def logout(request): auth.logout(request) return redirect('login')",
"HttpResponse from django.contrib.auth.models import User, auth from django.contrib import messages from django.contrib.auth.forms import",
"request.POST['username'] password = request.POST['password'] user = auth.authenticate(username=username, password=password) if user is not None:",
"\"POST\": form = CreateUserForm(request.POST) if form.is_valid(): form.save() return redirect('add_users') else: form = CreateUserForm()",
"django.http import HttpResponse from django.contrib.auth.models import User, auth from django.contrib import messages from",
"if request.method == \"POST\": username = request.POST['username'] password = request.POST['password'] user = auth.authenticate(username=username,",
"username = request.POST['username'] password = request.POST['password'] user = auth.authenticate(username=username, password=password) if user is",
"print(\"Error\") return redirect(\"login\") else: return render(request,\"login.html\") def logout(request): auth.logout(request) return redirect('login') # ADD",
"django.shortcuts import redirect, render from django.http import HttpResponse from django.contrib.auth.models import User, auth",
"authenticated auth.login(request,user) return redirect(\"/\") else: messages.info(request, \"invalid credentials\") print(\"Error\") return redirect(\"login\") else: return",
"return redirect(\"/\") else: messages.info(request, \"invalid credentials\") print(\"Error\") return redirect(\"login\") else: return render(request,\"login.html\") def",
"credentials\") print(\"Error\") return redirect(\"login\") else: return render(request,\"login.html\") def logout(request): auth.logout(request) return redirect('login') #",
"import login_required # Create your views here. def login(request): if request.method == \"POST\":",
"return redirect('login') # ADD USERS def add_users(request): if request.method == \"POST\": form =",
"auth.login(request,user) return redirect(\"/\") else: messages.info(request, \"invalid credentials\") print(\"Error\") return redirect(\"login\") else: return render(request,\"login.html\")",
"password=password) if user is not None: # User is authenticated auth.login(request,user) return redirect(\"/\")",
"is authenticated auth.login(request,user) return redirect(\"/\") else: messages.info(request, \"invalid credentials\") print(\"Error\") return redirect(\"login\") else:",
"django.contrib.auth.decorators import login_required # Create your views here. def login(request): if request.method ==",
"<reponame>samcodesio/faravdms_active from django.shortcuts import redirect, render from django.http import HttpResponse from django.contrib.auth.models import",
"django.contrib.auth.models import User, auth from django.contrib import messages from django.contrib.auth.forms import UserCreationForm from",
"import messages from django.contrib.auth.forms import UserCreationForm from .forms import CreateUserForm from django.contrib.auth.decorators import",
"auth from django.contrib import messages from django.contrib.auth.forms import UserCreationForm from .forms import CreateUserForm",
"from django.contrib.auth.decorators import login_required # Create your views here. def login(request): if request.method",
"Create your views here. def login(request): if request.method == \"POST\": username = request.POST['username']",
"# ADD USERS def add_users(request): if request.method == \"POST\": form = CreateUserForm(request.POST) if",
"UserCreationForm from .forms import CreateUserForm from django.contrib.auth.decorators import login_required # Create your views",
"== \"POST\": form = CreateUserForm(request.POST) if form.is_valid(): form.save() return redirect('add_users') else: form =",
"user = auth.authenticate(username=username, password=password) if user is not None: # User is authenticated",
"views here. def login(request): if request.method == \"POST\": username = request.POST['username'] password =",
"import CreateUserForm from django.contrib.auth.decorators import login_required # Create your views here. def login(request):",
"None: # User is authenticated auth.login(request,user) return redirect(\"/\") else: messages.info(request, \"invalid credentials\") print(\"Error\")",
"from django.contrib import messages from django.contrib.auth.forms import UserCreationForm from .forms import CreateUserForm from",
"import UserCreationForm from .forms import CreateUserForm from django.contrib.auth.decorators import login_required # Create your",
"your views here. def login(request): if request.method == \"POST\": username = request.POST['username'] password",
"ADD USERS def add_users(request): if request.method == \"POST\": form = CreateUserForm(request.POST) if form.is_valid():",
"\"POST\": username = request.POST['username'] password = request.POST['password'] user = auth.authenticate(username=username, password=password) if user",
"request.method == \"POST\": username = request.POST['username'] password = request.POST['password'] user = auth.authenticate(username=username, password=password)",
"render from django.http import HttpResponse from django.contrib.auth.models import User, auth from django.contrib import",
"from django.http import HttpResponse from django.contrib.auth.models import User, auth from django.contrib import messages",
"messages.info(request, \"invalid credentials\") print(\"Error\") return redirect(\"login\") else: return render(request,\"login.html\") def logout(request): auth.logout(request) return",
"is not None: # User is authenticated auth.login(request,user) return redirect(\"/\") else: messages.info(request, \"invalid",
".forms import CreateUserForm from django.contrib.auth.decorators import login_required # Create your views here. def",
"password = request.POST['password'] user = auth.authenticate(username=username, password=password) if user is not None: #",
"request.method == \"POST\": form = CreateUserForm(request.POST) if form.is_valid(): form.save() return redirect('add_users') else: form",
"redirect('login') # ADD USERS def add_users(request): if request.method == \"POST\": form = CreateUserForm(request.POST)",
"User, auth from django.contrib import messages from django.contrib.auth.forms import UserCreationForm from .forms import",
"here. def login(request): if request.method == \"POST\": username = request.POST['username'] password = request.POST['password']",
"else: messages.info(request, \"invalid credentials\") print(\"Error\") return redirect(\"login\") else: return render(request,\"login.html\") def logout(request): auth.logout(request)",
"# User is authenticated auth.login(request,user) return redirect(\"/\") else: messages.info(request, \"invalid credentials\") print(\"Error\") return",
"= CreateUserForm(request.POST) if form.is_valid(): form.save() return redirect('add_users') else: form = CreateUserForm() return render(request,'add_users.html',{'form':form})",
"logout(request): auth.logout(request) return redirect('login') # ADD USERS def add_users(request): if request.method == \"POST\":",
"import redirect, render from django.http import HttpResponse from django.contrib.auth.models import User, auth from",
"return redirect(\"login\") else: return render(request,\"login.html\") def logout(request): auth.logout(request) return redirect('login') # ADD USERS",
"messages from django.contrib.auth.forms import UserCreationForm from .forms import CreateUserForm from django.contrib.auth.decorators import login_required",
"def logout(request): auth.logout(request) return redirect('login') # ADD USERS def add_users(request): if request.method ==",
"def login(request): if request.method == \"POST\": username = request.POST['username'] password = request.POST['password'] user",
"== \"POST\": username = request.POST['username'] password = request.POST['password'] user = auth.authenticate(username=username, password=password) if",
"if request.method == \"POST\": form = CreateUserForm(request.POST) if form.is_valid(): form.save() return redirect('add_users') else:",
"from .forms import CreateUserForm from django.contrib.auth.decorators import login_required # Create your views here.",
"= request.POST['username'] password = request.POST['password'] user = auth.authenticate(username=username, password=password) if user is not",
"django.contrib import messages from django.contrib.auth.forms import UserCreationForm from .forms import CreateUserForm from django.contrib.auth.decorators",
"def add_users(request): if request.method == \"POST\": form = CreateUserForm(request.POST) if form.is_valid(): form.save() return",
"add_users(request): if request.method == \"POST\": form = CreateUserForm(request.POST) if form.is_valid(): form.save() return redirect('add_users')",
"from django.contrib.auth.models import User, auth from django.contrib import messages from django.contrib.auth.forms import UserCreationForm",
"redirect(\"/\") else: messages.info(request, \"invalid credentials\") print(\"Error\") return redirect(\"login\") else: return render(request,\"login.html\") def logout(request):",
"request.POST['password'] user = auth.authenticate(username=username, password=password) if user is not None: # User is",
"CreateUserForm from django.contrib.auth.decorators import login_required # Create your views here. def login(request): if",
"form = CreateUserForm(request.POST) if form.is_valid(): form.save() return redirect('add_users') else: form = CreateUserForm() return"
] |
[
"Excel Sheet Column Number https://leetcode.com/problems/excel-sheet-column-number/description/ Related to question Excel Sheet Column Title Given",
"number. For example: A -> 1 B -> 2 C -> 3 ...",
"-> 27 AB -> 28 \"\"\" class Solution(object): def titleToNumber(self, s): \"\"\" :type",
"Column Number https://leetcode.com/problems/excel-sheet-column-number/description/ Related to question Excel Sheet Column Title Given a column",
"\"\"\" class Solution(object): def titleToNumber(self, s): \"\"\" :type s: str :rtype: int \"\"\"",
"-> 26 AA -> 27 AB -> 28 \"\"\" class Solution(object): def titleToNumber(self,",
"Solution(object): def titleToNumber(self, s): \"\"\" :type s: str :rtype: int \"\"\" column_num =",
"171 Excel Sheet Column Number https://leetcode.com/problems/excel-sheet-column-number/description/ Related to question Excel Sheet Column Title",
"a column title as appear in an Excel sheet, return its corresponding column",
"Related to question Excel Sheet Column Title Given a column title as appear",
"Given a column title as appear in an Excel sheet, return its corresponding",
"-> 3 ... Z -> 26 AA -> 27 AB -> 28 \"\"\"",
"26 AA -> 27 AB -> 28 \"\"\" class Solution(object): def titleToNumber(self, s):",
"A -> 1 B -> 2 C -> 3 ... Z -> 26",
"AB -> 28 \"\"\" class Solution(object): def titleToNumber(self, s): \"\"\" :type s: str",
"\"\"\" :type s: str :rtype: int \"\"\" column_num = 0 for i,letter in",
"corresponding column number. For example: A -> 1 B -> 2 C ->",
"for i,letter in enumerate(s): column_num = 26 * column_num + ord(letter) - 64",
"titleToNumber(self, s): \"\"\" :type s: str :rtype: int \"\"\" column_num = 0 for",
"AA -> 27 AB -> 28 \"\"\" class Solution(object): def titleToNumber(self, s): \"\"\"",
"Z -> 26 AA -> 27 AB -> 28 \"\"\" class Solution(object): def",
"sheet, return its corresponding column number. For example: A -> 1 B ->",
"in an Excel sheet, return its corresponding column number. For example: A ->",
"an Excel sheet, return its corresponding column number. For example: A -> 1",
"https://leetcode.com/problems/excel-sheet-column-number/description/ Related to question Excel Sheet Column Title Given a column title as",
"\"\"\" 171 Excel Sheet Column Number https://leetcode.com/problems/excel-sheet-column-number/description/ Related to question Excel Sheet Column",
"-> 2 C -> 3 ... Z -> 26 AA -> 27 AB",
"s: str :rtype: int \"\"\" column_num = 0 for i,letter in enumerate(s): column_num",
"in enumerate(s): column_num = 26 * column_num + ord(letter) - 64 return column_num",
"str :rtype: int \"\"\" column_num = 0 for i,letter in enumerate(s): column_num =",
":rtype: int \"\"\" column_num = 0 for i,letter in enumerate(s): column_num = 26",
"column number. For example: A -> 1 B -> 2 C -> 3",
"question Excel Sheet Column Title Given a column title as appear in an",
"27 AB -> 28 \"\"\" class Solution(object): def titleToNumber(self, s): \"\"\" :type s:",
"Column Title Given a column title as appear in an Excel sheet, return",
"0 for i,letter in enumerate(s): column_num = 26 * column_num + ord(letter) -",
"i,letter in enumerate(s): column_num = 26 * column_num + ord(letter) - 64 return",
"= 0 for i,letter in enumerate(s): column_num = 26 * column_num + ord(letter)",
"as appear in an Excel sheet, return its corresponding column number. For example:",
"... Z -> 26 AA -> 27 AB -> 28 \"\"\" class Solution(object):",
"column_num = 0 for i,letter in enumerate(s): column_num = 26 * column_num +",
"Sheet Column Number https://leetcode.com/problems/excel-sheet-column-number/description/ Related to question Excel Sheet Column Title Given a",
"-> 28 \"\"\" class Solution(object): def titleToNumber(self, s): \"\"\" :type s: str :rtype:",
"column title as appear in an Excel sheet, return its corresponding column number.",
"1 B -> 2 C -> 3 ... Z -> 26 AA ->",
"example: A -> 1 B -> 2 C -> 3 ... Z ->",
"return its corresponding column number. For example: A -> 1 B -> 2",
"-> 1 B -> 2 C -> 3 ... Z -> 26 AA",
"def titleToNumber(self, s): \"\"\" :type s: str :rtype: int \"\"\" column_num = 0",
"to question Excel Sheet Column Title Given a column title as appear in",
"Excel sheet, return its corresponding column number. For example: A -> 1 B",
"appear in an Excel sheet, return its corresponding column number. For example: A",
"Number https://leetcode.com/problems/excel-sheet-column-number/description/ Related to question Excel Sheet Column Title Given a column title",
"Title Given a column title as appear in an Excel sheet, return its",
"C -> 3 ... Z -> 26 AA -> 27 AB -> 28",
"class Solution(object): def titleToNumber(self, s): \"\"\" :type s: str :rtype: int \"\"\" column_num",
"Excel Sheet Column Title Given a column title as appear in an Excel",
"its corresponding column number. For example: A -> 1 B -> 2 C",
"s): \"\"\" :type s: str :rtype: int \"\"\" column_num = 0 for i,letter",
"B -> 2 C -> 3 ... Z -> 26 AA -> 27",
"int \"\"\" column_num = 0 for i,letter in enumerate(s): column_num = 26 *",
"Sheet Column Title Given a column title as appear in an Excel sheet,",
":type s: str :rtype: int \"\"\" column_num = 0 for i,letter in enumerate(s):",
"title as appear in an Excel sheet, return its corresponding column number. For",
"For example: A -> 1 B -> 2 C -> 3 ... Z",
"3 ... Z -> 26 AA -> 27 AB -> 28 \"\"\" class",
"2 C -> 3 ... Z -> 26 AA -> 27 AB ->",
"\"\"\" column_num = 0 for i,letter in enumerate(s): column_num = 26 * column_num",
"28 \"\"\" class Solution(object): def titleToNumber(self, s): \"\"\" :type s: str :rtype: int"
] |
[
"name self.artist = artist self.album = album def __str__(self): return \"{} by {}",
"QtGui.QIcon() #icon.addPixmap(QtGui.QPixmap(\"music.ico\"), QtGui.QIcon.Normal, QtGui.QIcon.Off) #self.setWindowTitle(\"Music List\") def get_song_from_lastfm(self,song_name): try: # GETTING TRACK &",
"0: msg_box = QtWidgets.QMessageBox.warning(self, 'Empty List!', \"Empty List!\", QtWidgets.QMessageBox.Ok) else: save_to_where = QtWidgets.QFileDialog.getSaveFileName(self,",
"track_val = requests.get(track_url).json() # GETTING ALBUM FROM TRACK album_url = \"http://ws.audioscrobbler.com/2.0/?method=track.getInfo&api_key=31873cc90c27539710b2e41cf3a7ef24&artist={}&track={}&format=json\".format(track_val[\"results\"][\"trackmatches\"][\"track\"][0][\"artist\"],track_val[\"results\"][\"trackmatches\"][\"track\"][0][\"name\"]) album_val =",
"song(): def __init__(self,name,artist,album): self.name = name self.artist = artist self.album = album def",
"== '': new_img.setPixmap(QtGui.QPixmap('music.ico').scaled(64, 64, QtCore.Qt.KeepAspectRatio)) else: data = urllib.request.urlopen(url).read() image = QtGui.QImage() image.loadFromData(data)",
"musiclist Values(?,?,?)\",(song_info[0],song_info[1],song_info[2])) self.connection.commit() self.add_label = QtWidgets.QLabel(str(track)) self.add_label.setObjectName(song_info[0]) #Create Widgets & Layout self.new_h_box =",
"= QtWidgets.QMessageBox.warning(self, 'Empty List!', \"Empty List!\", QtWidgets.QMessageBox.Ok) else: save_to_where = QtWidgets.QFileDialog.getSaveFileName(self, \"Save Your",
"items to layout & setting to v_box_2 self.new_h_box.addWidget(new_img) self.new_h_box.addWidget(self.add_label) self.new_h_box.addStretch() self.new_h_box.addWidget(new_del_btn) self.v_box_2.addLayout(self.new_h_box) self.countbtn",
"h_box.addWidget(self.add_song_field) h_box.addWidget(self.delete_list) h_box.addWidget(self.save_list_txt) self.v_box_2 = QtWidgets.QVBoxLayout() self.v_box = QtWidgets.QVBoxLayout() self.v_box.addLayout(h_box) self.v_box.addLayout(self.v_box_2) self.v_box.addStretch() #EVENTS",
"= QtWidgets.QLineEdit() #STYLE self.save_list_txt.setStyleSheet(\"color: rgb(162, 16, 26);\") self.delete_list.setStyleSheet(\"color: rgb(162, 16, 26);\") self.add_song.setStyleSheet(\"color: rgb(162,",
"except: return False def event_add_song(self): from_who = self.sender().objectName() if from_who == 'add_song_btn': get_input",
"def event_add_song(self): from_who = self.sender().objectName() if from_who == 'add_song_btn': get_input = self.add_song_field.text() song_info",
"List\") self.add_song = QtWidgets.QPushButton(\"Add Song\") self.add_song.setObjectName('add_song_btn') self.add_song_field = QtWidgets.QLineEdit() #STYLE self.save_list_txt.setStyleSheet(\"color: rgb(162, 16,",
"requests import os import urllib.request class song(): def __init__(self,name,artist,album): self.name = name self.artist",
"def get_song_from_lastfm(self,song_name): try: # GETTING TRACK & ARTIST track_url = \"http://ws.audioscrobbler.com/2.0/?method=track.search&track={}&api_key=31873cc90c27539710b2e41cf3a7ef24&format=json\".format( song_name) track_val",
"setting to v_box_2 self.new_h_box.addWidget(new_img) self.new_h_box.addWidget(self.add_label) self.new_h_box.addStretch() self.new_h_box.addWidget(new_del_btn) self.v_box_2.addLayout(self.new_h_box) self.countbtn += 1 new_del_btn.clicked.connect(self.del_selected_item) self.add_song_field.clear()",
"file: for i in current_songs: add_this = song(i[0],i[1],i[2]) file.write(str(add_this) + '\\n') msg_box =",
"song_name = layout.itemAt(1).widget().objectName() self.cursor.execute(\"DELETE FROM musiclist WHERE song_name = ?\",(song_name,)) self.connection.commit() self.clearLayout(layout) def",
"layout.takeAt(0) widget = item.widget() if widget is not None: widget.deleteLater() else: self.clearLayout(item.layout()) #",
"self.delete_list = QtWidgets.QPushButton(\"Delete Current List\") self.add_song = QtWidgets.QPushButton(\"Add Song\") self.add_song.setObjectName('add_song_btn') self.add_song_field = QtWidgets.QLineEdit()",
"\", QtWidgets.QLineEdit.Normal, \"\") song_info = self.get_song_from_lastfm(text) if song_info == False: pass else: track",
"= \"http://ws.audioscrobbler.com/2.0/?method=track.getInfo&api_key=31873cc90c27539710b2e41cf3a7ef24&artist={}&track={}&format=json\".format(song_info[1],song_info[0]) track_val = requests.get(track_url).json() url = track_val['track']['album']['image'][1]['#text'] if url == '': new_img.setPixmap(QtGui.QPixmap('music.ico').scaled(64,",
"song(i[0],i[1],i[2]) file.write(str(add_this) + '\\n') msg_box = QtWidgets.QMessageBox.warning(self, 'List Saved !', \"List Saved !\",",
"if song_info == False: pass else: track = song(song_info[0],song_info[1],song_info[2]) self.cursor.execute(\"INSERT INTO musiclist Values(?,?,?)\",(song_info[0],song_info[1],song_info[2]))",
"= self.sender().objectName() if from_who == 'add_song_btn': get_input = self.add_song_field.text() song_info = self.get_song_from_lastfm(get_input) elif",
"os.getenv(\"HOME\"),\"Text Files (*.txt)\") try: with open(save_to_where[0],\"w\",encoding = \"utf-8\") as file: for i in",
"not None: widget.deleteLater() else: self.clearLayout(item.layout()) # app = QtWidgets.QApplication(sys.argv) # ui = App_Musiclist()",
"== 'file_add_song': text, okPressed = QtWidgets.QInputDialog.getText(self, \"Add Song\",\"Song Name: \", QtWidgets.QLineEdit.Normal, \"\") song_info",
"with open(save_to_where[0],\"w\",encoding = \"utf-8\") as file: for i in current_songs: add_this = song(i[0],i[1],i[2])",
"FileNotFoundError: pass def event_delete_list(self): self.clearLayout(self.v_box_2) self.countbtn = 0 self.cursor.execute(\"DELETE FROM musiclist\") def clearLayout(self,",
"self.setLayout(self.v_box) #self.setGeometry(700,100,750,500) #icon = QtGui.QIcon() #icon.addPixmap(QtGui.QPixmap(\"music.ico\"), QtGui.QIcon.Normal, QtGui.QIcon.Off) #self.setWindowTitle(\"Music List\") def get_song_from_lastfm(self,song_name): try:",
"h_box = QtWidgets.QHBoxLayout() h_box.addWidget(self.add_song) h_box.addWidget(self.add_song_field) h_box.addWidget(self.delete_list) h_box.addWidget(self.save_list_txt) self.v_box_2 = QtWidgets.QVBoxLayout() self.v_box = QtWidgets.QVBoxLayout()",
"= QtWidgets.QPushButton(\"X\") new_del_btn.setObjectName(\"delbtn|{}\".format(self.countbtn)) new_img = QtWidgets.QLabel() #Getting img and create img #getting url",
"btn = self.sender().objectName() number = btn.split(\"|\")[1] layout = self.findChild(QtCore.QObject, \"delhbox|{}\".format(number)) song_name = layout.itemAt(1).widget().objectName()",
"= QtWidgets.QFileDialog.getSaveFileName(self, \"Save Your List to ?\", os.getenv(\"HOME\"),\"Text Files (*.txt)\") try: with open(save_to_where[0],\"w\",encoding",
"LAYOUT h_box = QtWidgets.QHBoxLayout() h_box.addWidget(self.add_song) h_box.addWidget(self.add_song_field) h_box.addWidget(self.delete_list) h_box.addWidget(self.save_list_txt) self.v_box_2 = QtWidgets.QVBoxLayout() self.v_box =",
"song_info = self.get_song_from_lastfm(get_input) elif from_who == 'file_add_song': text, okPressed = QtWidgets.QInputDialog.getText(self, \"Add Song\",\"Song",
"\"http://ws.audioscrobbler.com/2.0/?method=track.getInfo&api_key=31873cc90c27539710b2e41cf3a7ef24&artist={}&track={}&format=json\".format(song_info[1],song_info[0]) track_val = requests.get(track_url).json() url = track_val['track']['album']['image'][1]['#text'] if url == '': new_img.setPixmap(QtGui.QPixmap('music.ico').scaled(64, 64,",
"#self.setWindowTitle(\"Music List\") def get_song_from_lastfm(self,song_name): try: # GETTING TRACK & ARTIST track_url = \"http://ws.audioscrobbler.com/2.0/?method=track.search&track={}&api_key=31873cc90c27539710b2e41cf3a7ef24&format=json\".format(",
"= QtWidgets.QLabel(str(track)) self.add_label.setObjectName(song_info[0]) #Create Widgets & Layout self.new_h_box = QtWidgets.QHBoxLayout() self.new_h_box.setObjectName(\"delhbox|{}\".format(self.countbtn)) new_del_btn =",
"def del_selected_item(self): btn = self.sender().objectName() number = btn.split(\"|\")[1] layout = self.findChild(QtCore.QObject, \"delhbox|{}\".format(number)) song_name",
"16, 26);\") self.add_song.setStyleSheet(\"color: rgb(162, 16, 26);\") #SETTING LAYOUT h_box = QtWidgets.QHBoxLayout() h_box.addWidget(self.add_song) h_box.addWidget(self.add_song_field)",
"self.cursor.fetchall() if len(current_songs) == 0: msg_box = QtWidgets.QMessageBox.warning(self, 'Empty List!', \"Empty List!\", QtWidgets.QMessageBox.Ok)",
"setupUi(self): #CREATING WIDGETS self.save_list_txt = QtWidgets.QPushButton(\"Save List to TXT\") self.delete_list = QtWidgets.QPushButton(\"Delete Current",
"{}\".format(self.name,self.artist,self.album) class App_Musiclist(QtWidgets.QWidget): def __init__(self): super().__init__() self.setupUi() self.connectdb() self.countbtn = 0 def connectdb(self):",
"text, okPressed = QtWidgets.QInputDialog.getText(self, \"Add Song\",\"Song Name: \", QtWidgets.QLineEdit.Normal, \"\") song_info = self.get_song_from_lastfm(text)",
"self.name = name self.artist = artist self.album = album def __str__(self): return \"{}",
"= self.sender().objectName() number = btn.split(\"|\")[1] layout = self.findChild(QtCore.QObject, \"delhbox|{}\".format(number)) song_name = layout.itemAt(1).widget().objectName() self.cursor.execute(\"DELETE",
"del_selected_item(self): btn = self.sender().objectName() number = btn.split(\"|\")[1] layout = self.findChild(QtCore.QObject, \"delhbox|{}\".format(number)) song_name =",
"#SETTING LAYOUT h_box = QtWidgets.QHBoxLayout() h_box.addWidget(self.add_song) h_box.addWidget(self.add_song_field) h_box.addWidget(self.delete_list) h_box.addWidget(self.save_list_txt) self.v_box_2 = QtWidgets.QVBoxLayout() self.v_box",
"song_name) track_val = requests.get(track_url).json() # GETTING ALBUM FROM TRACK album_url = \"http://ws.audioscrobbler.com/2.0/?method=track.getInfo&api_key=31873cc90c27539710b2e41cf3a7ef24&artist={}&track={}&format=json\".format(track_val[\"results\"][\"trackmatches\"][\"track\"][0][\"artist\"],track_val[\"results\"][\"trackmatches\"][\"track\"][0][\"name\"]) album_val",
"self.delete_list.setStyleSheet(\"color: rgb(162, 16, 26);\") self.add_song.setStyleSheet(\"color: rgb(162, 16, 26);\") #SETTING LAYOUT h_box = QtWidgets.QHBoxLayout()",
"img #getting url track_url = \"http://ws.audioscrobbler.com/2.0/?method=track.getInfo&api_key=31873cc90c27539710b2e41cf3a7ef24&artist={}&track={}&format=json\".format(song_info[1],song_info[0]) track_val = requests.get(track_url).json() url = track_val['track']['album']['image'][1]['#text'] if",
"event_save_list_to_txt(self): self.cursor.execute(\"SELECT * FROM musiclist\") current_songs = self.cursor.fetchall() if len(current_songs) == 0: msg_box",
"as file: for i in current_songs: add_this = song(i[0],i[1],i[2]) file.write(str(add_this) + '\\n') msg_box",
"len(current_songs) == 0: msg_box = QtWidgets.QMessageBox.warning(self, 'Empty List!', \"Empty List!\", QtWidgets.QMessageBox.Ok) else: save_to_where",
"sqlite3 import sys import requests import os import urllib.request class song(): def __init__(self,name,artist,album):",
"new_img = QtWidgets.QLabel() #Getting img and create img #getting url track_url = \"http://ws.audioscrobbler.com/2.0/?method=track.getInfo&api_key=31873cc90c27539710b2e41cf3a7ef24&artist={}&track={}&format=json\".format(song_info[1],song_info[0])",
"self.delete_list.clicked.connect(self.event_delete_list) self.save_list_txt.clicked.connect(self.event_save_list_to_txt) #WINDOW PROPERTIES self.setLayout(self.v_box) #self.setGeometry(700,100,750,500) #icon = QtGui.QIcon() #icon.addPixmap(QtGui.QPixmap(\"music.ico\"), QtGui.QIcon.Normal, QtGui.QIcon.Off) #self.setWindowTitle(\"Music",
"= QtWidgets.QVBoxLayout() self.v_box = QtWidgets.QVBoxLayout() self.v_box.addLayout(h_box) self.v_box.addLayout(self.v_box_2) self.v_box.addStretch() #EVENTS self.add_song.clicked.connect(self.event_add_song) self.delete_list.clicked.connect(self.event_delete_list) self.save_list_txt.clicked.connect(self.event_save_list_to_txt) #WINDOW",
"self.add_song.setStyleSheet(\"color: rgb(162, 16, 26);\") #SETTING LAYOUT h_box = QtWidgets.QHBoxLayout() h_box.addWidget(self.add_song) h_box.addWidget(self.add_song_field) h_box.addWidget(self.delete_list) h_box.addWidget(self.save_list_txt)",
"GETTING ALBUM FROM TRACK album_url = \"http://ws.audioscrobbler.com/2.0/?method=track.getInfo&api_key=31873cc90c27539710b2e41cf3a7ef24&artist={}&track={}&format=json\".format(track_val[\"results\"][\"trackmatches\"][\"track\"][0][\"artist\"],track_val[\"results\"][\"trackmatches\"][\"track\"][0][\"name\"]) album_val = requests.get(album_url).json() song_info = [album_val[\"track\"][\"name\"],",
"= album def __str__(self): return \"{} by {} | Album: {}\".format(self.name,self.artist,self.album) class App_Musiclist(QtWidgets.QWidget):",
"= requests.get(track_url).json() url = track_val['track']['album']['image'][1]['#text'] if url == '': new_img.setPixmap(QtGui.QPixmap('music.ico').scaled(64, 64, QtCore.Qt.KeepAspectRatio)) else:",
"TEXT,song_album TEXT)\") self.connection.commit() def setupUi(self): #CREATING WIDGETS self.save_list_txt = QtWidgets.QPushButton(\"Save List to TXT\")",
"self.new_h_box = QtWidgets.QHBoxLayout() self.new_h_box.setObjectName(\"delhbox|{}\".format(self.countbtn)) new_del_btn = QtWidgets.QPushButton(\"X\") new_del_btn.setObjectName(\"delbtn|{}\".format(self.countbtn)) new_img = QtWidgets.QLabel() #Getting img",
"self.add_label = QtWidgets.QLabel(str(track)) self.add_label.setObjectName(song_info[0]) #Create Widgets & Layout self.new_h_box = QtWidgets.QHBoxLayout() self.new_h_box.setObjectName(\"delhbox|{}\".format(self.countbtn)) new_del_btn",
"TEXT,song_artist TEXT,song_album TEXT)\") self.connection.commit() def setupUi(self): #CREATING WIDGETS self.save_list_txt = QtWidgets.QPushButton(\"Save List to",
"#STYLE self.save_list_txt.setStyleSheet(\"color: rgb(162, 16, 26);\") self.delete_list.setStyleSheet(\"color: rgb(162, 16, 26);\") self.add_song.setStyleSheet(\"color: rgb(162, 16, 26);\")",
"TEXT)\") self.connection.commit() def setupUi(self): #CREATING WIDGETS self.save_list_txt = QtWidgets.QPushButton(\"Save List to TXT\") self.delete_list",
"is not None: while layout.count(): item = layout.takeAt(0) widget = item.widget() if widget",
"new_del_btn.setObjectName(\"delbtn|{}\".format(self.countbtn)) new_img = QtWidgets.QLabel() #Getting img and create img #getting url track_url =",
"self.connection.commit() self.add_label = QtWidgets.QLabel(str(track)) self.add_label.setObjectName(song_info[0]) #Create Widgets & Layout self.new_h_box = QtWidgets.QHBoxLayout() self.new_h_box.setObjectName(\"delhbox|{}\".format(self.countbtn))",
"GETTING TRACK & ARTIST track_url = \"http://ws.audioscrobbler.com/2.0/?method=track.search&track={}&api_key=31873cc90c27539710b2e41cf3a7ef24&format=json\".format( song_name) track_val = requests.get(track_url).json() # GETTING",
"= requests.get(track_url).json() # GETTING ALBUM FROM TRACK album_url = \"http://ws.audioscrobbler.com/2.0/?method=track.getInfo&api_key=31873cc90c27539710b2e41cf3a7ef24&artist={}&track={}&format=json\".format(track_val[\"results\"][\"trackmatches\"][\"track\"][0][\"artist\"],track_val[\"results\"][\"trackmatches\"][\"track\"][0][\"name\"]) album_val = requests.get(album_url).json()",
"self.clearLayout(layout) def event_save_list_to_txt(self): self.cursor.execute(\"SELECT * FROM musiclist\") current_songs = self.cursor.fetchall() if len(current_songs) ==",
"Files (*.txt)\") try: with open(save_to_where[0],\"w\",encoding = \"utf-8\") as file: for i in current_songs:",
"= btn.split(\"|\")[1] layout = self.findChild(QtCore.QObject, \"delhbox|{}\".format(number)) song_name = layout.itemAt(1).widget().objectName() self.cursor.execute(\"DELETE FROM musiclist WHERE",
"while layout.count(): item = layout.takeAt(0) widget = item.widget() if widget is not None:",
"QtWidgets.QPushButton(\"Save List to TXT\") self.delete_list = QtWidgets.QPushButton(\"Delete Current List\") self.add_song = QtWidgets.QPushButton(\"Add Song\")",
"clearLayout(self, layout): if layout is not None: while layout.count(): item = layout.takeAt(0) widget",
"connectdb(self): try: os.remove(\"musiclist.db\") except FileNotFoundError: pass self.connection = sqlite3.connect(\"musiclist.db\") self.cursor = self.connection.cursor() self.cursor.execute(\"CREATE",
"'List Saved !', \"List Saved !\", QtWidgets.QMessageBox.Ok) except FileNotFoundError: pass def event_delete_list(self): self.clearLayout(self.v_box_2)",
"rgb(162, 16, 26);\") self.add_song.setStyleSheet(\"color: rgb(162, 16, 26);\") #SETTING LAYOUT h_box = QtWidgets.QHBoxLayout() h_box.addWidget(self.add_song)",
"QtGui.QImage() image.loadFromData(data) new_img.setPixmap(QtGui.QPixmap(image)) #Add items to layout & setting to v_box_2 self.new_h_box.addWidget(new_img) self.new_h_box.addWidget(self.add_label)",
"musiclist (song_name TEXT,song_artist TEXT,song_album TEXT)\") self.connection.commit() def setupUi(self): #CREATING WIDGETS self.save_list_txt = QtWidgets.QPushButton(\"Save",
"List to ?\", os.getenv(\"HOME\"),\"Text Files (*.txt)\") try: with open(save_to_where[0],\"w\",encoding = \"utf-8\") as file:",
"= sqlite3.connect(\"musiclist.db\") self.cursor = self.connection.cursor() self.cursor.execute(\"CREATE TABLE musiclist (song_name TEXT,song_artist TEXT,song_album TEXT)\") self.connection.commit()",
"open(save_to_where[0],\"w\",encoding = \"utf-8\") as file: for i in current_songs: add_this = song(i[0],i[1],i[2]) file.write(str(add_this)",
"song_info except: return False def event_add_song(self): from_who = self.sender().objectName() if from_who == 'add_song_btn':",
"= self.get_song_from_lastfm(text) if song_info == False: pass else: track = song(song_info[0],song_info[1],song_info[2]) self.cursor.execute(\"INSERT INTO",
"def clearLayout(self, layout): if layout is not None: while layout.count(): item = layout.takeAt(0)",
"'': new_img.setPixmap(QtGui.QPixmap('music.ico').scaled(64, 64, QtCore.Qt.KeepAspectRatio)) else: data = urllib.request.urlopen(url).read() image = QtGui.QImage() image.loadFromData(data) new_img.setPixmap(QtGui.QPixmap(image))",
"#icon = QtGui.QIcon() #icon.addPixmap(QtGui.QPixmap(\"music.ico\"), QtGui.QIcon.Normal, QtGui.QIcon.Off) #self.setWindowTitle(\"Music List\") def get_song_from_lastfm(self,song_name): try: # GETTING",
"from_who == 'add_song_btn': get_input = self.add_song_field.text() song_info = self.get_song_from_lastfm(get_input) elif from_who == 'file_add_song':",
"self.countbtn = 0 self.cursor.execute(\"DELETE FROM musiclist\") def clearLayout(self, layout): if layout is not",
"self.v_box_2.addLayout(self.new_h_box) self.countbtn += 1 new_del_btn.clicked.connect(self.del_selected_item) self.add_song_field.clear() def del_selected_item(self): btn = self.sender().objectName() number =",
"from_who == 'file_add_song': text, okPressed = QtWidgets.QInputDialog.getText(self, \"Add Song\",\"Song Name: \", QtWidgets.QLineEdit.Normal, \"\")",
"#Add items to layout & setting to v_box_2 self.new_h_box.addWidget(new_img) self.new_h_box.addWidget(self.add_label) self.new_h_box.addStretch() self.new_h_box.addWidget(new_del_btn) self.v_box_2.addLayout(self.new_h_box)",
"self.get_song_from_lastfm(get_input) elif from_who == 'file_add_song': text, okPressed = QtWidgets.QInputDialog.getText(self, \"Add Song\",\"Song Name: \",",
"urllib.request.urlopen(url).read() image = QtGui.QImage() image.loadFromData(data) new_img.setPixmap(QtGui.QPixmap(image)) #Add items to layout & setting to",
"self.v_box.addLayout(self.v_box_2) self.v_box.addStretch() #EVENTS self.add_song.clicked.connect(self.event_add_song) self.delete_list.clicked.connect(self.event_delete_list) self.save_list_txt.clicked.connect(self.event_save_list_to_txt) #WINDOW PROPERTIES self.setLayout(self.v_box) #self.setGeometry(700,100,750,500) #icon = QtGui.QIcon()",
"16, 26);\") self.delete_list.setStyleSheet(\"color: rgb(162, 16, 26);\") self.add_song.setStyleSheet(\"color: rgb(162, 16, 26);\") #SETTING LAYOUT h_box",
"self.cursor = self.connection.cursor() self.cursor.execute(\"CREATE TABLE musiclist (song_name TEXT,song_artist TEXT,song_album TEXT)\") self.connection.commit() def setupUi(self):",
"if layout is not None: while layout.count(): item = layout.takeAt(0) widget = item.widget()",
"self.new_h_box.addWidget(self.add_label) self.new_h_box.addStretch() self.new_h_box.addWidget(new_del_btn) self.v_box_2.addLayout(self.new_h_box) self.countbtn += 1 new_del_btn.clicked.connect(self.del_selected_item) self.add_song_field.clear() def del_selected_item(self): btn =",
"= QtGui.QImage() image.loadFromData(data) new_img.setPixmap(QtGui.QPixmap(image)) #Add items to layout & setting to v_box_2 self.new_h_box.addWidget(new_img)",
"List\") def get_song_from_lastfm(self,song_name): try: # GETTING TRACK & ARTIST track_url = \"http://ws.audioscrobbler.com/2.0/?method=track.search&track={}&api_key=31873cc90c27539710b2e41cf3a7ef24&format=json\".format( song_name)",
"new_img.setPixmap(QtGui.QPixmap(image)) #Add items to layout & setting to v_box_2 self.new_h_box.addWidget(new_img) self.new_h_box.addWidget(self.add_label) self.new_h_box.addStretch() self.new_h_box.addWidget(new_del_btn)",
"to v_box_2 self.new_h_box.addWidget(new_img) self.new_h_box.addWidget(self.add_label) self.new_h_box.addStretch() self.new_h_box.addWidget(new_del_btn) self.v_box_2.addLayout(self.new_h_box) self.countbtn += 1 new_del_btn.clicked.connect(self.del_selected_item) self.add_song_field.clear() def",
"= QtWidgets.QHBoxLayout() self.new_h_box.setObjectName(\"delhbox|{}\".format(self.countbtn)) new_del_btn = QtWidgets.QPushButton(\"X\") new_del_btn.setObjectName(\"delbtn|{}\".format(self.countbtn)) new_img = QtWidgets.QLabel() #Getting img and",
"self.add_song.setObjectName('add_song_btn') self.add_song_field = QtWidgets.QLineEdit() #STYLE self.save_list_txt.setStyleSheet(\"color: rgb(162, 16, 26);\") self.delete_list.setStyleSheet(\"color: rgb(162, 16, 26);\")",
"pass self.connection = sqlite3.connect(\"musiclist.db\") self.cursor = self.connection.cursor() self.cursor.execute(\"CREATE TABLE musiclist (song_name TEXT,song_artist TEXT,song_album",
"album_url = \"http://ws.audioscrobbler.com/2.0/?method=track.getInfo&api_key=31873cc90c27539710b2e41cf3a7ef24&artist={}&track={}&format=json\".format(track_val[\"results\"][\"trackmatches\"][\"track\"][0][\"artist\"],track_val[\"results\"][\"trackmatches\"][\"track\"][0][\"name\"]) album_val = requests.get(album_url).json() song_info = [album_val[\"track\"][\"name\"], album_val[\"track\"][\"artist\"][\"name\"],album_val[\"track\"][\"album\"][\"title\"]] return song_info except:",
"requests.get(track_url).json() url = track_val['track']['album']['image'][1]['#text'] if url == '': new_img.setPixmap(QtGui.QPixmap('music.ico').scaled(64, 64, QtCore.Qt.KeepAspectRatio)) else: data",
"26);\") self.add_song.setStyleSheet(\"color: rgb(162, 16, 26);\") #SETTING LAYOUT h_box = QtWidgets.QHBoxLayout() h_box.addWidget(self.add_song) h_box.addWidget(self.add_song_field) h_box.addWidget(self.delete_list)",
"!\", QtWidgets.QMessageBox.Ok) except FileNotFoundError: pass def event_delete_list(self): self.clearLayout(self.v_box_2) self.countbtn = 0 self.cursor.execute(\"DELETE FROM",
"Album: {}\".format(self.name,self.artist,self.album) class App_Musiclist(QtWidgets.QWidget): def __init__(self): super().__init__() self.setupUi() self.connectdb() self.countbtn = 0 def",
"#EVENTS self.add_song.clicked.connect(self.event_add_song) self.delete_list.clicked.connect(self.event_delete_list) self.save_list_txt.clicked.connect(self.event_save_list_to_txt) #WINDOW PROPERTIES self.setLayout(self.v_box) #self.setGeometry(700,100,750,500) #icon = QtGui.QIcon() #icon.addPixmap(QtGui.QPixmap(\"music.ico\"), QtGui.QIcon.Normal,",
"__init__(self,name,artist,album): self.name = name self.artist = artist self.album = album def __str__(self): return",
"self.new_h_box.addStretch() self.new_h_box.addWidget(new_del_btn) self.v_box_2.addLayout(self.new_h_box) self.countbtn += 1 new_del_btn.clicked.connect(self.del_selected_item) self.add_song_field.clear() def del_selected_item(self): btn = self.sender().objectName()",
"return song_info except: return False def event_add_song(self): from_who = self.sender().objectName() if from_who ==",
"QtWidgets.QMessageBox.Ok) else: save_to_where = QtWidgets.QFileDialog.getSaveFileName(self, \"Save Your List to ?\", os.getenv(\"HOME\"),\"Text Files (*.txt)\")",
"def connectdb(self): try: os.remove(\"musiclist.db\") except FileNotFoundError: pass self.connection = sqlite3.connect(\"musiclist.db\") self.cursor = self.connection.cursor()",
"current_songs: add_this = song(i[0],i[1],i[2]) file.write(str(add_this) + '\\n') msg_box = QtWidgets.QMessageBox.warning(self, 'List Saved !',",
"layout): if layout is not None: while layout.count(): item = layout.takeAt(0) widget =",
"return False def event_add_song(self): from_who = self.sender().objectName() if from_who == 'add_song_btn': get_input =",
"FROM musiclist\") current_songs = self.cursor.fetchall() if len(current_songs) == 0: msg_box = QtWidgets.QMessageBox.warning(self, 'Empty",
"self.cursor.execute(\"DELETE FROM musiclist\") def clearLayout(self, layout): if layout is not None: while layout.count():",
"import os import urllib.request class song(): def __init__(self,name,artist,album): self.name = name self.artist =",
"= item.widget() if widget is not None: widget.deleteLater() else: self.clearLayout(item.layout()) # app =",
"v_box_2 self.new_h_box.addWidget(new_img) self.new_h_box.addWidget(self.add_label) self.new_h_box.addStretch() self.new_h_box.addWidget(new_del_btn) self.v_box_2.addLayout(self.new_h_box) self.countbtn += 1 new_del_btn.clicked.connect(self.del_selected_item) self.add_song_field.clear() def del_selected_item(self):",
"\"delhbox|{}\".format(number)) song_name = layout.itemAt(1).widget().objectName() self.cursor.execute(\"DELETE FROM musiclist WHERE song_name = ?\",(song_name,)) self.connection.commit() self.clearLayout(layout)",
"PROPERTIES self.setLayout(self.v_box) #self.setGeometry(700,100,750,500) #icon = QtGui.QIcon() #icon.addPixmap(QtGui.QPixmap(\"music.ico\"), QtGui.QIcon.Normal, QtGui.QIcon.Off) #self.setWindowTitle(\"Music List\") def get_song_from_lastfm(self,song_name):",
"track_val = requests.get(track_url).json() url = track_val['track']['album']['image'][1]['#text'] if url == '': new_img.setPixmap(QtGui.QPixmap('music.ico').scaled(64, 64, QtCore.Qt.KeepAspectRatio))",
"is not None: widget.deleteLater() else: self.clearLayout(item.layout()) # app = QtWidgets.QApplication(sys.argv) # ui =",
"elif from_who == 'file_add_song': text, okPressed = QtWidgets.QInputDialog.getText(self, \"Add Song\",\"Song Name: \", QtWidgets.QLineEdit.Normal,",
"QtWidgets.QVBoxLayout() self.v_box.addLayout(h_box) self.v_box.addLayout(self.v_box_2) self.v_box.addStretch() #EVENTS self.add_song.clicked.connect(self.event_add_song) self.delete_list.clicked.connect(self.event_delete_list) self.save_list_txt.clicked.connect(self.event_save_list_to_txt) #WINDOW PROPERTIES self.setLayout(self.v_box) #self.setGeometry(700,100,750,500) #icon",
"Layout self.new_h_box = QtWidgets.QHBoxLayout() self.new_h_box.setObjectName(\"delhbox|{}\".format(self.countbtn)) new_del_btn = QtWidgets.QPushButton(\"X\") new_del_btn.setObjectName(\"delbtn|{}\".format(self.countbtn)) new_img = QtWidgets.QLabel() #Getting",
"musiclist WHERE song_name = ?\",(song_name,)) self.connection.commit() self.clearLayout(layout) def event_save_list_to_txt(self): self.cursor.execute(\"SELECT * FROM musiclist\")",
"26);\") #SETTING LAYOUT h_box = QtWidgets.QHBoxLayout() h_box.addWidget(self.add_song) h_box.addWidget(self.add_song_field) h_box.addWidget(self.delete_list) h_box.addWidget(self.save_list_txt) self.v_box_2 = QtWidgets.QVBoxLayout()",
"i in current_songs: add_this = song(i[0],i[1],i[2]) file.write(str(add_this) + '\\n') msg_box = QtWidgets.QMessageBox.warning(self, 'List",
"self.countbtn = 0 def connectdb(self): try: os.remove(\"musiclist.db\") except FileNotFoundError: pass self.connection = sqlite3.connect(\"musiclist.db\")",
"Song\",\"Song Name: \", QtWidgets.QLineEdit.Normal, \"\") song_info = self.get_song_from_lastfm(text) if song_info == False: pass",
"<reponame>tolgaerdonmez/music-list-lastfm from PyQt5 import QtCore, QtGui, QtWidgets import sqlite3 import sys import requests",
"= song(i[0],i[1],i[2]) file.write(str(add_this) + '\\n') msg_box = QtWidgets.QMessageBox.warning(self, 'List Saved !', \"List Saved",
"None: while layout.count(): item = layout.takeAt(0) widget = item.widget() if widget is not",
"self.add_song_field.clear() def del_selected_item(self): btn = self.sender().objectName() number = btn.split(\"|\")[1] layout = self.findChild(QtCore.QObject, \"delhbox|{}\".format(number))",
"QtWidgets.QPushButton(\"X\") new_del_btn.setObjectName(\"delbtn|{}\".format(self.countbtn)) new_img = QtWidgets.QLabel() #Getting img and create img #getting url track_url",
"pass def event_delete_list(self): self.clearLayout(self.v_box_2) self.countbtn = 0 self.cursor.execute(\"DELETE FROM musiclist\") def clearLayout(self, layout):",
"from_who = self.sender().objectName() if from_who == 'add_song_btn': get_input = self.add_song_field.text() song_info = self.get_song_from_lastfm(get_input)",
"musiclist\") def clearLayout(self, layout): if layout is not None: while layout.count(): item =",
"requests.get(track_url).json() # GETTING ALBUM FROM TRACK album_url = \"http://ws.audioscrobbler.com/2.0/?method=track.getInfo&api_key=31873cc90c27539710b2e41cf3a7ef24&artist={}&track={}&format=json\".format(track_val[\"results\"][\"trackmatches\"][\"track\"][0][\"artist\"],track_val[\"results\"][\"trackmatches\"][\"track\"][0][\"name\"]) album_val = requests.get(album_url).json() song_info",
"QtWidgets.QLabel() #Getting img and create img #getting url track_url = \"http://ws.audioscrobbler.com/2.0/?method=track.getInfo&api_key=31873cc90c27539710b2e41cf3a7ef24&artist={}&track={}&format=json\".format(song_info[1],song_info[0]) track_val =",
"== 0: msg_box = QtWidgets.QMessageBox.warning(self, 'Empty List!', \"Empty List!\", QtWidgets.QMessageBox.Ok) else: save_to_where =",
"if from_who == 'add_song_btn': get_input = self.add_song_field.text() song_info = self.get_song_from_lastfm(get_input) elif from_who ==",
"QtWidgets.QHBoxLayout() self.new_h_box.setObjectName(\"delhbox|{}\".format(self.countbtn)) new_del_btn = QtWidgets.QPushButton(\"X\") new_del_btn.setObjectName(\"delbtn|{}\".format(self.countbtn)) new_img = QtWidgets.QLabel() #Getting img and create",
"0 self.cursor.execute(\"DELETE FROM musiclist\") def clearLayout(self, layout): if layout is not None: while",
"file.write(str(add_this) + '\\n') msg_box = QtWidgets.QMessageBox.warning(self, 'List Saved !', \"List Saved !\", QtWidgets.QMessageBox.Ok)",
"to ?\", os.getenv(\"HOME\"),\"Text Files (*.txt)\") try: with open(save_to_where[0],\"w\",encoding = \"utf-8\") as file: for",
"(*.txt)\") try: with open(save_to_where[0],\"w\",encoding = \"utf-8\") as file: for i in current_songs: add_this",
"= requests.get(album_url).json() song_info = [album_val[\"track\"][\"name\"], album_val[\"track\"][\"artist\"][\"name\"],album_val[\"track\"][\"album\"][\"title\"]] return song_info except: return False def event_add_song(self):",
"self.add_song.clicked.connect(self.event_add_song) self.delete_list.clicked.connect(self.event_delete_list) self.save_list_txt.clicked.connect(self.event_save_list_to_txt) #WINDOW PROPERTIES self.setLayout(self.v_box) #self.setGeometry(700,100,750,500) #icon = QtGui.QIcon() #icon.addPixmap(QtGui.QPixmap(\"music.ico\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)",
"def __init__(self,name,artist,album): self.name = name self.artist = artist self.album = album def __str__(self):",
"urllib.request class song(): def __init__(self,name,artist,album): self.name = name self.artist = artist self.album =",
"= self.add_song_field.text() song_info = self.get_song_from_lastfm(get_input) elif from_who == 'file_add_song': text, okPressed = QtWidgets.QInputDialog.getText(self,",
"QtGui.QIcon.Off) #self.setWindowTitle(\"Music List\") def get_song_from_lastfm(self,song_name): try: # GETTING TRACK & ARTIST track_url =",
"QtWidgets.QLineEdit() #STYLE self.save_list_txt.setStyleSheet(\"color: rgb(162, 16, 26);\") self.delete_list.setStyleSheet(\"color: rgb(162, 16, 26);\") self.add_song.setStyleSheet(\"color: rgb(162, 16,",
"= self.cursor.fetchall() if len(current_songs) == 0: msg_box = QtWidgets.QMessageBox.warning(self, 'Empty List!', \"Empty List!\",",
"# GETTING ALBUM FROM TRACK album_url = \"http://ws.audioscrobbler.com/2.0/?method=track.getInfo&api_key=31873cc90c27539710b2e41cf3a7ef24&artist={}&track={}&format=json\".format(track_val[\"results\"][\"trackmatches\"][\"track\"][0][\"artist\"],track_val[\"results\"][\"trackmatches\"][\"track\"][0][\"name\"]) album_val = requests.get(album_url).json() song_info =",
"#CREATING WIDGETS self.save_list_txt = QtWidgets.QPushButton(\"Save List to TXT\") self.delete_list = QtWidgets.QPushButton(\"Delete Current List\")",
"self.new_h_box.addWidget(new_img) self.new_h_box.addWidget(self.add_label) self.new_h_box.addStretch() self.new_h_box.addWidget(new_del_btn) self.v_box_2.addLayout(self.new_h_box) self.countbtn += 1 new_del_btn.clicked.connect(self.del_selected_item) self.add_song_field.clear() def del_selected_item(self): btn",
"QtGui, QtWidgets import sqlite3 import sys import requests import os import urllib.request class",
"album_val = requests.get(album_url).json() song_info = [album_val[\"track\"][\"name\"], album_val[\"track\"][\"artist\"][\"name\"],album_val[\"track\"][\"album\"][\"title\"]] return song_info except: return False def",
"Values(?,?,?)\",(song_info[0],song_info[1],song_info[2])) self.connection.commit() self.add_label = QtWidgets.QLabel(str(track)) self.add_label.setObjectName(song_info[0]) #Create Widgets & Layout self.new_h_box = QtWidgets.QHBoxLayout()",
"self.v_box.addLayout(h_box) self.v_box.addLayout(self.v_box_2) self.v_box.addStretch() #EVENTS self.add_song.clicked.connect(self.event_add_song) self.delete_list.clicked.connect(self.event_delete_list) self.save_list_txt.clicked.connect(self.event_save_list_to_txt) #WINDOW PROPERTIES self.setLayout(self.v_box) #self.setGeometry(700,100,750,500) #icon =",
"def event_save_list_to_txt(self): self.cursor.execute(\"SELECT * FROM musiclist\") current_songs = self.cursor.fetchall() if len(current_songs) == 0:",
"16, 26);\") #SETTING LAYOUT h_box = QtWidgets.QHBoxLayout() h_box.addWidget(self.add_song) h_box.addWidget(self.add_song_field) h_box.addWidget(self.delete_list) h_box.addWidget(self.save_list_txt) self.v_box_2 =",
"Name: \", QtWidgets.QLineEdit.Normal, \"\") song_info = self.get_song_from_lastfm(text) if song_info == False: pass else:",
"image.loadFromData(data) new_img.setPixmap(QtGui.QPixmap(image)) #Add items to layout & setting to v_box_2 self.new_h_box.addWidget(new_img) self.new_h_box.addWidget(self.add_label) self.new_h_box.addStretch()",
"?\",(song_name,)) self.connection.commit() self.clearLayout(layout) def event_save_list_to_txt(self): self.cursor.execute(\"SELECT * FROM musiclist\") current_songs = self.cursor.fetchall() if",
"?\", os.getenv(\"HOME\"),\"Text Files (*.txt)\") try: with open(save_to_where[0],\"w\",encoding = \"utf-8\") as file: for i",
"#WINDOW PROPERTIES self.setLayout(self.v_box) #self.setGeometry(700,100,750,500) #icon = QtGui.QIcon() #icon.addPixmap(QtGui.QPixmap(\"music.ico\"), QtGui.QIcon.Normal, QtGui.QIcon.Off) #self.setWindowTitle(\"Music List\") def",
"== False: pass else: track = song(song_info[0],song_info[1],song_info[2]) self.cursor.execute(\"INSERT INTO musiclist Values(?,?,?)\",(song_info[0],song_info[1],song_info[2])) self.connection.commit() self.add_label",
"= QtWidgets.QLabel() #Getting img and create img #getting url track_url = \"http://ws.audioscrobbler.com/2.0/?method=track.getInfo&api_key=31873cc90c27539710b2e41cf3a7ef24&artist={}&track={}&format=json\".format(song_info[1],song_info[0]) track_val",
"song_name = ?\",(song_name,)) self.connection.commit() self.clearLayout(layout) def event_save_list_to_txt(self): self.cursor.execute(\"SELECT * FROM musiclist\") current_songs =",
"+= 1 new_del_btn.clicked.connect(self.del_selected_item) self.add_song_field.clear() def del_selected_item(self): btn = self.sender().objectName() number = btn.split(\"|\")[1] layout",
"h_box.addWidget(self.add_song) h_box.addWidget(self.add_song_field) h_box.addWidget(self.delete_list) h_box.addWidget(self.save_list_txt) self.v_box_2 = QtWidgets.QVBoxLayout() self.v_box = QtWidgets.QVBoxLayout() self.v_box.addLayout(h_box) self.v_box.addLayout(self.v_box_2) self.v_box.addStretch()",
"else: data = urllib.request.urlopen(url).read() image = QtGui.QImage() image.loadFromData(data) new_img.setPixmap(QtGui.QPixmap(image)) #Add items to layout",
"layout is not None: while layout.count(): item = layout.takeAt(0) widget = item.widget() if",
"Current List\") self.add_song = QtWidgets.QPushButton(\"Add Song\") self.add_song.setObjectName('add_song_btn') self.add_song_field = QtWidgets.QLineEdit() #STYLE self.save_list_txt.setStyleSheet(\"color: rgb(162,",
"FileNotFoundError: pass self.connection = sqlite3.connect(\"musiclist.db\") self.cursor = self.connection.cursor() self.cursor.execute(\"CREATE TABLE musiclist (song_name TEXT,song_artist",
"new_del_btn.clicked.connect(self.del_selected_item) self.add_song_field.clear() def del_selected_item(self): btn = self.sender().objectName() number = btn.split(\"|\")[1] layout = self.findChild(QtCore.QObject,",
"TXT\") self.delete_list = QtWidgets.QPushButton(\"Delete Current List\") self.add_song = QtWidgets.QPushButton(\"Add Song\") self.add_song.setObjectName('add_song_btn') self.add_song_field =",
"= self.get_song_from_lastfm(get_input) elif from_who == 'file_add_song': text, okPressed = QtWidgets.QInputDialog.getText(self, \"Add Song\",\"Song Name:",
"def setupUi(self): #CREATING WIDGETS self.save_list_txt = QtWidgets.QPushButton(\"Save List to TXT\") self.delete_list = QtWidgets.QPushButton(\"Delete",
"| Album: {}\".format(self.name,self.artist,self.album) class App_Musiclist(QtWidgets.QWidget): def __init__(self): super().__init__() self.setupUi() self.connectdb() self.countbtn = 0",
"QtWidgets import sqlite3 import sys import requests import os import urllib.request class song():",
"ALBUM FROM TRACK album_url = \"http://ws.audioscrobbler.com/2.0/?method=track.getInfo&api_key=31873cc90c27539710b2e41cf3a7ef24&artist={}&track={}&format=json\".format(track_val[\"results\"][\"trackmatches\"][\"track\"][0][\"artist\"],track_val[\"results\"][\"trackmatches\"][\"track\"][0][\"name\"]) album_val = requests.get(album_url).json() song_info = [album_val[\"track\"][\"name\"], album_val[\"track\"][\"artist\"][\"name\"],album_val[\"track\"][\"album\"][\"title\"]]",
"None: widget.deleteLater() else: self.clearLayout(item.layout()) # app = QtWidgets.QApplication(sys.argv) # ui = App_Musiclist() #",
"layout.itemAt(1).widget().objectName() self.cursor.execute(\"DELETE FROM musiclist WHERE song_name = ?\",(song_name,)) self.connection.commit() self.clearLayout(layout) def event_save_list_to_txt(self): self.cursor.execute(\"SELECT",
"artist self.album = album def __str__(self): return \"{} by {} | Album: {}\".format(self.name,self.artist,self.album)",
"and create img #getting url track_url = \"http://ws.audioscrobbler.com/2.0/?method=track.getInfo&api_key=31873cc90c27539710b2e41cf3a7ef24&artist={}&track={}&format=json\".format(song_info[1],song_info[0]) track_val = requests.get(track_url).json() url =",
"Saved !\", QtWidgets.QMessageBox.Ok) except FileNotFoundError: pass def event_delete_list(self): self.clearLayout(self.v_box_2) self.countbtn = 0 self.cursor.execute(\"DELETE",
"INTO musiclist Values(?,?,?)\",(song_info[0],song_info[1],song_info[2])) self.connection.commit() self.add_label = QtWidgets.QLabel(str(track)) self.add_label.setObjectName(song_info[0]) #Create Widgets & Layout self.new_h_box",
"requests.get(album_url).json() song_info = [album_val[\"track\"][\"name\"], album_val[\"track\"][\"artist\"][\"name\"],album_val[\"track\"][\"album\"][\"title\"]] return song_info except: return False def event_add_song(self): from_who",
"= song(song_info[0],song_info[1],song_info[2]) self.cursor.execute(\"INSERT INTO musiclist Values(?,?,?)\",(song_info[0],song_info[1],song_info[2])) self.connection.commit() self.add_label = QtWidgets.QLabel(str(track)) self.add_label.setObjectName(song_info[0]) #Create Widgets",
"super().__init__() self.setupUi() self.connectdb() self.countbtn = 0 def connectdb(self): try: os.remove(\"musiclist.db\") except FileNotFoundError: pass",
"[album_val[\"track\"][\"name\"], album_val[\"track\"][\"artist\"][\"name\"],album_val[\"track\"][\"album\"][\"title\"]] return song_info except: return False def event_add_song(self): from_who = self.sender().objectName() if",
"self.cursor.execute(\"CREATE TABLE musiclist (song_name TEXT,song_artist TEXT,song_album TEXT)\") self.connection.commit() def setupUi(self): #CREATING WIDGETS self.save_list_txt",
"+ '\\n') msg_box = QtWidgets.QMessageBox.warning(self, 'List Saved !', \"List Saved !\", QtWidgets.QMessageBox.Ok) except",
"App_Musiclist(QtWidgets.QWidget): def __init__(self): super().__init__() self.setupUi() self.connectdb() self.countbtn = 0 def connectdb(self): try: os.remove(\"musiclist.db\")",
"self.save_list_txt = QtWidgets.QPushButton(\"Save List to TXT\") self.delete_list = QtWidgets.QPushButton(\"Delete Current List\") self.add_song =",
"= name self.artist = artist self.album = album def __str__(self): return \"{} by",
"url == '': new_img.setPixmap(QtGui.QPixmap('music.ico').scaled(64, 64, QtCore.Qt.KeepAspectRatio)) else: data = urllib.request.urlopen(url).read() image = QtGui.QImage()",
"FROM musiclist\") def clearLayout(self, layout): if layout is not None: while layout.count(): item",
"self.get_song_from_lastfm(text) if song_info == False: pass else: track = song(song_info[0],song_info[1],song_info[2]) self.cursor.execute(\"INSERT INTO musiclist",
"'\\n') msg_box = QtWidgets.QMessageBox.warning(self, 'List Saved !', \"List Saved !\", QtWidgets.QMessageBox.Ok) except FileNotFoundError:",
"event_add_song(self): from_who = self.sender().objectName() if from_who == 'add_song_btn': get_input = self.add_song_field.text() song_info =",
"album def __str__(self): return \"{} by {} | Album: {}\".format(self.name,self.artist,self.album) class App_Musiclist(QtWidgets.QWidget): def",
"layout & setting to v_box_2 self.new_h_box.addWidget(new_img) self.new_h_box.addWidget(self.add_label) self.new_h_box.addStretch() self.new_h_box.addWidget(new_del_btn) self.v_box_2.addLayout(self.new_h_box) self.countbtn += 1",
"self.setupUi() self.connectdb() self.countbtn = 0 def connectdb(self): try: os.remove(\"musiclist.db\") except FileNotFoundError: pass self.connection",
"layout.count(): item = layout.takeAt(0) widget = item.widget() if widget is not None: widget.deleteLater()",
"def __init__(self): super().__init__() self.setupUi() self.connectdb() self.countbtn = 0 def connectdb(self): try: os.remove(\"musiclist.db\") except",
"= 0 self.cursor.execute(\"DELETE FROM musiclist\") def clearLayout(self, layout): if layout is not None:",
"if len(current_songs) == 0: msg_box = QtWidgets.QMessageBox.warning(self, 'Empty List!', \"Empty List!\", QtWidgets.QMessageBox.Ok) else:",
"self.cursor.execute(\"DELETE FROM musiclist WHERE song_name = ?\",(song_name,)) self.connection.commit() self.clearLayout(layout) def event_save_list_to_txt(self): self.cursor.execute(\"SELECT *",
"= \"http://ws.audioscrobbler.com/2.0/?method=track.getInfo&api_key=31873cc90c27539710b2e41cf3a7ef24&artist={}&track={}&format=json\".format(track_val[\"results\"][\"trackmatches\"][\"track\"][0][\"artist\"],track_val[\"results\"][\"trackmatches\"][\"track\"][0][\"name\"]) album_val = requests.get(album_url).json() song_info = [album_val[\"track\"][\"name\"], album_val[\"track\"][\"artist\"][\"name\"],album_val[\"track\"][\"album\"][\"title\"]] return song_info except: return",
"{} | Album: {}\".format(self.name,self.artist,self.album) class App_Musiclist(QtWidgets.QWidget): def __init__(self): super().__init__() self.setupUi() self.connectdb() self.countbtn =",
"QtWidgets.QPushButton(\"Add Song\") self.add_song.setObjectName('add_song_btn') self.add_song_field = QtWidgets.QLineEdit() #STYLE self.save_list_txt.setStyleSheet(\"color: rgb(162, 16, 26);\") self.delete_list.setStyleSheet(\"color: rgb(162,",
"widget.deleteLater() else: self.clearLayout(item.layout()) # app = QtWidgets.QApplication(sys.argv) # ui = App_Musiclist() # sys.exit(app.exec_())",
"self.save_list_txt.clicked.connect(self.event_save_list_to_txt) #WINDOW PROPERTIES self.setLayout(self.v_box) #self.setGeometry(700,100,750,500) #icon = QtGui.QIcon() #icon.addPixmap(QtGui.QPixmap(\"music.ico\"), QtGui.QIcon.Normal, QtGui.QIcon.Off) #self.setWindowTitle(\"Music List\")",
"= 0 def connectdb(self): try: os.remove(\"musiclist.db\") except FileNotFoundError: pass self.connection = sqlite3.connect(\"musiclist.db\") self.cursor",
"try: with open(save_to_where[0],\"w\",encoding = \"utf-8\") as file: for i in current_songs: add_this =",
"h_box.addWidget(self.delete_list) h_box.addWidget(self.save_list_txt) self.v_box_2 = QtWidgets.QVBoxLayout() self.v_box = QtWidgets.QVBoxLayout() self.v_box.addLayout(h_box) self.v_box.addLayout(self.v_box_2) self.v_box.addStretch() #EVENTS self.add_song.clicked.connect(self.event_add_song)",
"= ?\",(song_name,)) self.connection.commit() self.clearLayout(layout) def event_save_list_to_txt(self): self.cursor.execute(\"SELECT * FROM musiclist\") current_songs = self.cursor.fetchall()",
"self.new_h_box.setObjectName(\"delhbox|{}\".format(self.countbtn)) new_del_btn = QtWidgets.QPushButton(\"X\") new_del_btn.setObjectName(\"delbtn|{}\".format(self.countbtn)) new_img = QtWidgets.QLabel() #Getting img and create img",
"TABLE musiclist (song_name TEXT,song_artist TEXT,song_album TEXT)\") self.connection.commit() def setupUi(self): #CREATING WIDGETS self.save_list_txt =",
"except FileNotFoundError: pass def event_delete_list(self): self.clearLayout(self.v_box_2) self.countbtn = 0 self.cursor.execute(\"DELETE FROM musiclist\") def",
"= self.findChild(QtCore.QObject, \"delhbox|{}\".format(number)) song_name = layout.itemAt(1).widget().objectName() self.cursor.execute(\"DELETE FROM musiclist WHERE song_name = ?\",(song_name,))",
"import QtCore, QtGui, QtWidgets import sqlite3 import sys import requests import os import",
"#icon.addPixmap(QtGui.QPixmap(\"music.ico\"), QtGui.QIcon.Normal, QtGui.QIcon.Off) #self.setWindowTitle(\"Music List\") def get_song_from_lastfm(self,song_name): try: # GETTING TRACK & ARTIST",
"self.new_h_box.addWidget(new_del_btn) self.v_box_2.addLayout(self.new_h_box) self.countbtn += 1 new_del_btn.clicked.connect(self.del_selected_item) self.add_song_field.clear() def del_selected_item(self): btn = self.sender().objectName() number",
"save_to_where = QtWidgets.QFileDialog.getSaveFileName(self, \"Save Your List to ?\", os.getenv(\"HOME\"),\"Text Files (*.txt)\") try: with",
"26);\") self.delete_list.setStyleSheet(\"color: rgb(162, 16, 26);\") self.add_song.setStyleSheet(\"color: rgb(162, 16, 26);\") #SETTING LAYOUT h_box =",
"QtGui.QIcon.Normal, QtGui.QIcon.Off) #self.setWindowTitle(\"Music List\") def get_song_from_lastfm(self,song_name): try: # GETTING TRACK & ARTIST track_url",
"= [album_val[\"track\"][\"name\"], album_val[\"track\"][\"artist\"][\"name\"],album_val[\"track\"][\"album\"][\"title\"]] return song_info except: return False def event_add_song(self): from_who = self.sender().objectName()",
"Song\") self.add_song.setObjectName('add_song_btn') self.add_song_field = QtWidgets.QLineEdit() #STYLE self.save_list_txt.setStyleSheet(\"color: rgb(162, 16, 26);\") self.delete_list.setStyleSheet(\"color: rgb(162, 16,",
"= \"utf-8\") as file: for i in current_songs: add_this = song(i[0],i[1],i[2]) file.write(str(add_this) +",
"rgb(162, 16, 26);\") #SETTING LAYOUT h_box = QtWidgets.QHBoxLayout() h_box.addWidget(self.add_song) h_box.addWidget(self.add_song_field) h_box.addWidget(self.delete_list) h_box.addWidget(self.save_list_txt) self.v_box_2",
"QtWidgets.QMessageBox.Ok) except FileNotFoundError: pass def event_delete_list(self): self.clearLayout(self.v_box_2) self.countbtn = 0 self.cursor.execute(\"DELETE FROM musiclist\")",
"List!', \"Empty List!\", QtWidgets.QMessageBox.Ok) else: save_to_where = QtWidgets.QFileDialog.getSaveFileName(self, \"Save Your List to ?\",",
"#getting url track_url = \"http://ws.audioscrobbler.com/2.0/?method=track.getInfo&api_key=31873cc90c27539710b2e41cf3a7ef24&artist={}&track={}&format=json\".format(song_info[1],song_info[0]) track_val = requests.get(track_url).json() url = track_val['track']['album']['image'][1]['#text'] if url",
"= QtWidgets.QVBoxLayout() self.v_box.addLayout(h_box) self.v_box.addLayout(self.v_box_2) self.v_box.addStretch() #EVENTS self.add_song.clicked.connect(self.event_add_song) self.delete_list.clicked.connect(self.event_delete_list) self.save_list_txt.clicked.connect(self.event_save_list_to_txt) #WINDOW PROPERTIES self.setLayout(self.v_box) #self.setGeometry(700,100,750,500)",
"PyQt5 import QtCore, QtGui, QtWidgets import sqlite3 import sys import requests import os",
"else: track = song(song_info[0],song_info[1],song_info[2]) self.cursor.execute(\"INSERT INTO musiclist Values(?,?,?)\",(song_info[0],song_info[1],song_info[2])) self.connection.commit() self.add_label = QtWidgets.QLabel(str(track)) self.add_label.setObjectName(song_info[0])",
"not None: while layout.count(): item = layout.takeAt(0) widget = item.widget() if widget is",
"self.connection.commit() self.clearLayout(layout) def event_save_list_to_txt(self): self.cursor.execute(\"SELECT * FROM musiclist\") current_songs = self.cursor.fetchall() if len(current_songs)",
"class song(): def __init__(self,name,artist,album): self.name = name self.artist = artist self.album = album",
"\"http://ws.audioscrobbler.com/2.0/?method=track.getInfo&api_key=31873cc90c27539710b2e41cf3a7ef24&artist={}&track={}&format=json\".format(track_val[\"results\"][\"trackmatches\"][\"track\"][0][\"artist\"],track_val[\"results\"][\"trackmatches\"][\"track\"][0][\"name\"]) album_val = requests.get(album_url).json() song_info = [album_val[\"track\"][\"name\"], album_val[\"track\"][\"artist\"][\"name\"],album_val[\"track\"][\"album\"][\"title\"]] return song_info except: return False",
"\"Add Song\",\"Song Name: \", QtWidgets.QLineEdit.Normal, \"\") song_info = self.get_song_from_lastfm(text) if song_info == False:",
"rgb(162, 16, 26);\") self.delete_list.setStyleSheet(\"color: rgb(162, 16, 26);\") self.add_song.setStyleSheet(\"color: rgb(162, 16, 26);\") #SETTING LAYOUT",
"FROM TRACK album_url = \"http://ws.audioscrobbler.com/2.0/?method=track.getInfo&api_key=31873cc90c27539710b2e41cf3a7ef24&artist={}&track={}&format=json\".format(track_val[\"results\"][\"trackmatches\"][\"track\"][0][\"artist\"],track_val[\"results\"][\"trackmatches\"][\"track\"][0][\"name\"]) album_val = requests.get(album_url).json() song_info = [album_val[\"track\"][\"name\"], album_val[\"track\"][\"artist\"][\"name\"],album_val[\"track\"][\"album\"][\"title\"]] return",
"self.findChild(QtCore.QObject, \"delhbox|{}\".format(number)) song_name = layout.itemAt(1).widget().objectName() self.cursor.execute(\"DELETE FROM musiclist WHERE song_name = ?\",(song_name,)) self.connection.commit()",
"\"Save Your List to ?\", os.getenv(\"HOME\"),\"Text Files (*.txt)\") try: with open(save_to_where[0],\"w\",encoding = \"utf-8\")",
"1 new_del_btn.clicked.connect(self.del_selected_item) self.add_song_field.clear() def del_selected_item(self): btn = self.sender().objectName() number = btn.split(\"|\")[1] layout =",
"item.widget() if widget is not None: widget.deleteLater() else: self.clearLayout(item.layout()) # app = QtWidgets.QApplication(sys.argv)",
"btn.split(\"|\")[1] layout = self.findChild(QtCore.QObject, \"delhbox|{}\".format(number)) song_name = layout.itemAt(1).widget().objectName() self.cursor.execute(\"DELETE FROM musiclist WHERE song_name",
"self.sender().objectName() number = btn.split(\"|\")[1] layout = self.findChild(QtCore.QObject, \"delhbox|{}\".format(number)) song_name = layout.itemAt(1).widget().objectName() self.cursor.execute(\"DELETE FROM",
"self.save_list_txt.setStyleSheet(\"color: rgb(162, 16, 26);\") self.delete_list.setStyleSheet(\"color: rgb(162, 16, 26);\") self.add_song.setStyleSheet(\"color: rgb(162, 16, 26);\") #SETTING",
"False: pass else: track = song(song_info[0],song_info[1],song_info[2]) self.cursor.execute(\"INSERT INTO musiclist Values(?,?,?)\",(song_info[0],song_info[1],song_info[2])) self.connection.commit() self.add_label =",
"List to TXT\") self.delete_list = QtWidgets.QPushButton(\"Delete Current List\") self.add_song = QtWidgets.QPushButton(\"Add Song\") self.add_song.setObjectName('add_song_btn')",
"add_this = song(i[0],i[1],i[2]) file.write(str(add_this) + '\\n') msg_box = QtWidgets.QMessageBox.warning(self, 'List Saved !', \"List",
"QtWidgets.QFileDialog.getSaveFileName(self, \"Save Your List to ?\", os.getenv(\"HOME\"),\"Text Files (*.txt)\") try: with open(save_to_where[0],\"w\",encoding =",
"else: save_to_where = QtWidgets.QFileDialog.getSaveFileName(self, \"Save Your List to ?\", os.getenv(\"HOME\"),\"Text Files (*.txt)\") try:",
"new_del_btn = QtWidgets.QPushButton(\"X\") new_del_btn.setObjectName(\"delbtn|{}\".format(self.countbtn)) new_img = QtWidgets.QLabel() #Getting img and create img #getting",
"image = QtGui.QImage() image.loadFromData(data) new_img.setPixmap(QtGui.QPixmap(image)) #Add items to layout & setting to v_box_2",
"FROM musiclist WHERE song_name = ?\",(song_name,)) self.connection.commit() self.clearLayout(layout) def event_save_list_to_txt(self): self.cursor.execute(\"SELECT * FROM",
"QtWidgets.QInputDialog.getText(self, \"Add Song\",\"Song Name: \", QtWidgets.QLineEdit.Normal, \"\") song_info = self.get_song_from_lastfm(text) if song_info ==",
"event_delete_list(self): self.clearLayout(self.v_box_2) self.countbtn = 0 self.cursor.execute(\"DELETE FROM musiclist\") def clearLayout(self, layout): if layout",
"to TXT\") self.delete_list = QtWidgets.QPushButton(\"Delete Current List\") self.add_song = QtWidgets.QPushButton(\"Add Song\") self.add_song.setObjectName('add_song_btn') self.add_song_field",
"QtWidgets.QHBoxLayout() h_box.addWidget(self.add_song) h_box.addWidget(self.add_song_field) h_box.addWidget(self.delete_list) h_box.addWidget(self.save_list_txt) self.v_box_2 = QtWidgets.QVBoxLayout() self.v_box = QtWidgets.QVBoxLayout() self.v_box.addLayout(h_box) self.v_box.addLayout(self.v_box_2)",
"h_box.addWidget(self.save_list_txt) self.v_box_2 = QtWidgets.QVBoxLayout() self.v_box = QtWidgets.QVBoxLayout() self.v_box.addLayout(h_box) self.v_box.addLayout(self.v_box_2) self.v_box.addStretch() #EVENTS self.add_song.clicked.connect(self.event_add_song) self.delete_list.clicked.connect(self.event_delete_list)",
"track_val['track']['album']['image'][1]['#text'] if url == '': new_img.setPixmap(QtGui.QPixmap('music.ico').scaled(64, 64, QtCore.Qt.KeepAspectRatio)) else: data = urllib.request.urlopen(url).read() image",
"Saved !', \"List Saved !\", QtWidgets.QMessageBox.Ok) except FileNotFoundError: pass def event_delete_list(self): self.clearLayout(self.v_box_2) self.countbtn",
"os import urllib.request class song(): def __init__(self,name,artist,album): self.name = name self.artist = artist",
"track_url = \"http://ws.audioscrobbler.com/2.0/?method=track.getInfo&api_key=31873cc90c27539710b2e41cf3a7ef24&artist={}&track={}&format=json\".format(song_info[1],song_info[0]) track_val = requests.get(track_url).json() url = track_val['track']['album']['image'][1]['#text'] if url == '':",
"song_info = [album_val[\"track\"][\"name\"], album_val[\"track\"][\"artist\"][\"name\"],album_val[\"track\"][\"album\"][\"title\"]] return song_info except: return False def event_add_song(self): from_who =",
"& setting to v_box_2 self.new_h_box.addWidget(new_img) self.new_h_box.addWidget(self.add_label) self.new_h_box.addStretch() self.new_h_box.addWidget(new_del_btn) self.v_box_2.addLayout(self.new_h_box) self.countbtn += 1 new_del_btn.clicked.connect(self.del_selected_item)",
"except FileNotFoundError: pass self.connection = sqlite3.connect(\"musiclist.db\") self.cursor = self.connection.cursor() self.cursor.execute(\"CREATE TABLE musiclist (song_name",
"\"utf-8\") as file: for i in current_songs: add_this = song(i[0],i[1],i[2]) file.write(str(add_this) + '\\n')",
"from PyQt5 import QtCore, QtGui, QtWidgets import sqlite3 import sys import requests import",
"self.connection.commit() def setupUi(self): #CREATING WIDGETS self.save_list_txt = QtWidgets.QPushButton(\"Save List to TXT\") self.delete_list =",
"sqlite3.connect(\"musiclist.db\") self.cursor = self.connection.cursor() self.cursor.execute(\"CREATE TABLE musiclist (song_name TEXT,song_artist TEXT,song_album TEXT)\") self.connection.commit() def",
"number = btn.split(\"|\")[1] layout = self.findChild(QtCore.QObject, \"delhbox|{}\".format(number)) song_name = layout.itemAt(1).widget().objectName() self.cursor.execute(\"DELETE FROM musiclist",
"song(song_info[0],song_info[1],song_info[2]) self.cursor.execute(\"INSERT INTO musiclist Values(?,?,?)\",(song_info[0],song_info[1],song_info[2])) self.connection.commit() self.add_label = QtWidgets.QLabel(str(track)) self.add_label.setObjectName(song_info[0]) #Create Widgets &",
"if widget is not None: widget.deleteLater() else: self.clearLayout(item.layout()) # app = QtWidgets.QApplication(sys.argv) #",
"self.add_song = QtWidgets.QPushButton(\"Add Song\") self.add_song.setObjectName('add_song_btn') self.add_song_field = QtWidgets.QLineEdit() #STYLE self.save_list_txt.setStyleSheet(\"color: rgb(162, 16, 26);\")",
"QtWidgets.QLineEdit.Normal, \"\") song_info = self.get_song_from_lastfm(text) if song_info == False: pass else: track =",
"get_input = self.add_song_field.text() song_info = self.get_song_from_lastfm(get_input) elif from_who == 'file_add_song': text, okPressed =",
"'file_add_song': text, okPressed = QtWidgets.QInputDialog.getText(self, \"Add Song\",\"Song Name: \", QtWidgets.QLineEdit.Normal, \"\") song_info =",
"import urllib.request class song(): def __init__(self,name,artist,album): self.name = name self.artist = artist self.album",
"= layout.itemAt(1).widget().objectName() self.cursor.execute(\"DELETE FROM musiclist WHERE song_name = ?\",(song_name,)) self.connection.commit() self.clearLayout(layout) def event_save_list_to_txt(self):",
"import requests import os import urllib.request class song(): def __init__(self,name,artist,album): self.name = name",
"url = track_val['track']['album']['image'][1]['#text'] if url == '': new_img.setPixmap(QtGui.QPixmap('music.ico').scaled(64, 64, QtCore.Qt.KeepAspectRatio)) else: data =",
"#Create Widgets & Layout self.new_h_box = QtWidgets.QHBoxLayout() self.new_h_box.setObjectName(\"delhbox|{}\".format(self.countbtn)) new_del_btn = QtWidgets.QPushButton(\"X\") new_del_btn.setObjectName(\"delbtn|{}\".format(self.countbtn)) new_img",
"musiclist\") current_songs = self.cursor.fetchall() if len(current_songs) == 0: msg_box = QtWidgets.QMessageBox.warning(self, 'Empty List!',",
"'Empty List!', \"Empty List!\", QtWidgets.QMessageBox.Ok) else: save_to_where = QtWidgets.QFileDialog.getSaveFileName(self, \"Save Your List to",
"QtWidgets.QLabel(str(track)) self.add_label.setObjectName(song_info[0]) #Create Widgets & Layout self.new_h_box = QtWidgets.QHBoxLayout() self.new_h_box.setObjectName(\"delhbox|{}\".format(self.countbtn)) new_del_btn = QtWidgets.QPushButton(\"X\")",
"QtWidgets.QMessageBox.warning(self, 'List Saved !', \"List Saved !\", QtWidgets.QMessageBox.Ok) except FileNotFoundError: pass def event_delete_list(self):",
"try: # GETTING TRACK & ARTIST track_url = \"http://ws.audioscrobbler.com/2.0/?method=track.search&track={}&api_key=31873cc90c27539710b2e41cf3a7ef24&format=json\".format( song_name) track_val = requests.get(track_url).json()",
"sys import requests import os import urllib.request class song(): def __init__(self,name,artist,album): self.name =",
"QtWidgets.QPushButton(\"Delete Current List\") self.add_song = QtWidgets.QPushButton(\"Add Song\") self.add_song.setObjectName('add_song_btn') self.add_song_field = QtWidgets.QLineEdit() #STYLE self.save_list_txt.setStyleSheet(\"color:",
"0 def connectdb(self): try: os.remove(\"musiclist.db\") except FileNotFoundError: pass self.connection = sqlite3.connect(\"musiclist.db\") self.cursor =",
"current_songs = self.cursor.fetchall() if len(current_songs) == 0: msg_box = QtWidgets.QMessageBox.warning(self, 'Empty List!', \"Empty",
"QtWidgets.QVBoxLayout() self.v_box = QtWidgets.QVBoxLayout() self.v_box.addLayout(h_box) self.v_box.addLayout(self.v_box_2) self.v_box.addStretch() #EVENTS self.add_song.clicked.connect(self.event_add_song) self.delete_list.clicked.connect(self.event_delete_list) self.save_list_txt.clicked.connect(self.event_save_list_to_txt) #WINDOW PROPERTIES",
"__init__(self): super().__init__() self.setupUi() self.connectdb() self.countbtn = 0 def connectdb(self): try: os.remove(\"musiclist.db\") except FileNotFoundError:",
"\"Empty List!\", QtWidgets.QMessageBox.Ok) else: save_to_where = QtWidgets.QFileDialog.getSaveFileName(self, \"Save Your List to ?\", os.getenv(\"HOME\"),\"Text",
"by {} | Album: {}\".format(self.name,self.artist,self.album) class App_Musiclist(QtWidgets.QWidget): def __init__(self): super().__init__() self.setupUi() self.connectdb() self.countbtn",
"class App_Musiclist(QtWidgets.QWidget): def __init__(self): super().__init__() self.setupUi() self.connectdb() self.countbtn = 0 def connectdb(self): try:",
"return \"{} by {} | Album: {}\".format(self.name,self.artist,self.album) class App_Musiclist(QtWidgets.QWidget): def __init__(self): super().__init__() self.setupUi()",
"for i in current_songs: add_this = song(i[0],i[1],i[2]) file.write(str(add_this) + '\\n') msg_box = QtWidgets.QMessageBox.warning(self,",
"import sqlite3 import sys import requests import os import urllib.request class song(): def",
"self.v_box_2 = QtWidgets.QVBoxLayout() self.v_box = QtWidgets.QVBoxLayout() self.v_box.addLayout(h_box) self.v_box.addLayout(self.v_box_2) self.v_box.addStretch() #EVENTS self.add_song.clicked.connect(self.event_add_song) self.delete_list.clicked.connect(self.event_delete_list) self.save_list_txt.clicked.connect(self.event_save_list_to_txt)",
"== 'add_song_btn': get_input = self.add_song_field.text() song_info = self.get_song_from_lastfm(get_input) elif from_who == 'file_add_song': text,",
"self.add_song_field = QtWidgets.QLineEdit() #STYLE self.save_list_txt.setStyleSheet(\"color: rgb(162, 16, 26);\") self.delete_list.setStyleSheet(\"color: rgb(162, 16, 26);\") self.add_song.setStyleSheet(\"color:",
"widget is not None: widget.deleteLater() else: self.clearLayout(item.layout()) # app = QtWidgets.QApplication(sys.argv) # ui",
"self.v_box.addStretch() #EVENTS self.add_song.clicked.connect(self.event_add_song) self.delete_list.clicked.connect(self.event_delete_list) self.save_list_txt.clicked.connect(self.event_save_list_to_txt) #WINDOW PROPERTIES self.setLayout(self.v_box) #self.setGeometry(700,100,750,500) #icon = QtGui.QIcon() #icon.addPixmap(QtGui.QPixmap(\"music.ico\"),",
"track = song(song_info[0],song_info[1],song_info[2]) self.cursor.execute(\"INSERT INTO musiclist Values(?,?,?)\",(song_info[0],song_info[1],song_info[2])) self.connection.commit() self.add_label = QtWidgets.QLabel(str(track)) self.add_label.setObjectName(song_info[0]) #Create",
"try: os.remove(\"musiclist.db\") except FileNotFoundError: pass self.connection = sqlite3.connect(\"musiclist.db\") self.cursor = self.connection.cursor() self.cursor.execute(\"CREATE TABLE",
"img and create img #getting url track_url = \"http://ws.audioscrobbler.com/2.0/?method=track.getInfo&api_key=31873cc90c27539710b2e41cf3a7ef24&artist={}&track={}&format=json\".format(song_info[1],song_info[0]) track_val = requests.get(track_url).json() url",
"self.cursor.execute(\"SELECT * FROM musiclist\") current_songs = self.cursor.fetchall() if len(current_songs) == 0: msg_box =",
"self.v_box = QtWidgets.QVBoxLayout() self.v_box.addLayout(h_box) self.v_box.addLayout(self.v_box_2) self.v_box.addStretch() #EVENTS self.add_song.clicked.connect(self.event_add_song) self.delete_list.clicked.connect(self.event_delete_list) self.save_list_txt.clicked.connect(self.event_save_list_to_txt) #WINDOW PROPERTIES self.setLayout(self.v_box)",
"to layout & setting to v_box_2 self.new_h_box.addWidget(new_img) self.new_h_box.addWidget(self.add_label) self.new_h_box.addStretch() self.new_h_box.addWidget(new_del_btn) self.v_box_2.addLayout(self.new_h_box) self.countbtn +=",
"Your List to ?\", os.getenv(\"HOME\"),\"Text Files (*.txt)\") try: with open(save_to_where[0],\"w\",encoding = \"utf-8\") as",
"in current_songs: add_this = song(i[0],i[1],i[2]) file.write(str(add_this) + '\\n') msg_box = QtWidgets.QMessageBox.warning(self, 'List Saved",
"self.countbtn += 1 new_del_btn.clicked.connect(self.del_selected_item) self.add_song_field.clear() def del_selected_item(self): btn = self.sender().objectName() number = btn.split(\"|\")[1]",
"self.add_label.setObjectName(song_info[0]) #Create Widgets & Layout self.new_h_box = QtWidgets.QHBoxLayout() self.new_h_box.setObjectName(\"delhbox|{}\".format(self.countbtn)) new_del_btn = QtWidgets.QPushButton(\"X\") new_del_btn.setObjectName(\"delbtn|{}\".format(self.countbtn))",
"self.album = album def __str__(self): return \"{} by {} | Album: {}\".format(self.name,self.artist,self.album) class",
"import sys import requests import os import urllib.request class song(): def __init__(self,name,artist,album): self.name",
"= track_val['track']['album']['image'][1]['#text'] if url == '': new_img.setPixmap(QtGui.QPixmap('music.ico').scaled(64, 64, QtCore.Qt.KeepAspectRatio)) else: data = urllib.request.urlopen(url).read()",
"msg_box = QtWidgets.QMessageBox.warning(self, 'Empty List!', \"Empty List!\", QtWidgets.QMessageBox.Ok) else: save_to_where = QtWidgets.QFileDialog.getSaveFileName(self, \"Save",
"= artist self.album = album def __str__(self): return \"{} by {} | Album:",
"#Getting img and create img #getting url track_url = \"http://ws.audioscrobbler.com/2.0/?method=track.getInfo&api_key=31873cc90c27539710b2e41cf3a7ef24&artist={}&track={}&format=json\".format(song_info[1],song_info[0]) track_val = requests.get(track_url).json()",
"get_song_from_lastfm(self,song_name): try: # GETTING TRACK & ARTIST track_url = \"http://ws.audioscrobbler.com/2.0/?method=track.search&track={}&api_key=31873cc90c27539710b2e41cf3a7ef24&format=json\".format( song_name) track_val =",
"WIDGETS self.save_list_txt = QtWidgets.QPushButton(\"Save List to TXT\") self.delete_list = QtWidgets.QPushButton(\"Delete Current List\") self.add_song",
"layout = self.findChild(QtCore.QObject, \"delhbox|{}\".format(number)) song_name = layout.itemAt(1).widget().objectName() self.cursor.execute(\"DELETE FROM musiclist WHERE song_name =",
"track_url = \"http://ws.audioscrobbler.com/2.0/?method=track.search&track={}&api_key=31873cc90c27539710b2e41cf3a7ef24&format=json\".format( song_name) track_val = requests.get(track_url).json() # GETTING ALBUM FROM TRACK album_url",
"item = layout.takeAt(0) widget = item.widget() if widget is not None: widget.deleteLater() else:",
"self.sender().objectName() if from_who == 'add_song_btn': get_input = self.add_song_field.text() song_info = self.get_song_from_lastfm(get_input) elif from_who",
"= urllib.request.urlopen(url).read() image = QtGui.QImage() image.loadFromData(data) new_img.setPixmap(QtGui.QPixmap(image)) #Add items to layout & setting",
"widget = item.widget() if widget is not None: widget.deleteLater() else: self.clearLayout(item.layout()) # app",
"'add_song_btn': get_input = self.add_song_field.text() song_info = self.get_song_from_lastfm(get_input) elif from_who == 'file_add_song': text, okPressed",
"os.remove(\"musiclist.db\") except FileNotFoundError: pass self.connection = sqlite3.connect(\"musiclist.db\") self.cursor = self.connection.cursor() self.cursor.execute(\"CREATE TABLE musiclist",
"new_img.setPixmap(QtGui.QPixmap('music.ico').scaled(64, 64, QtCore.Qt.KeepAspectRatio)) else: data = urllib.request.urlopen(url).read() image = QtGui.QImage() image.loadFromData(data) new_img.setPixmap(QtGui.QPixmap(image)) #Add",
"= \"http://ws.audioscrobbler.com/2.0/?method=track.search&track={}&api_key=31873cc90c27539710b2e41cf3a7ef24&format=json\".format( song_name) track_val = requests.get(track_url).json() # GETTING ALBUM FROM TRACK album_url =",
"# GETTING TRACK & ARTIST track_url = \"http://ws.audioscrobbler.com/2.0/?method=track.search&track={}&api_key=31873cc90c27539710b2e41cf3a7ef24&format=json\".format( song_name) track_val = requests.get(track_url).json() #",
"= QtWidgets.QMessageBox.warning(self, 'List Saved !', \"List Saved !\", QtWidgets.QMessageBox.Ok) except FileNotFoundError: pass def",
"!', \"List Saved !\", QtWidgets.QMessageBox.Ok) except FileNotFoundError: pass def event_delete_list(self): self.clearLayout(self.v_box_2) self.countbtn =",
"TRACK & ARTIST track_url = \"http://ws.audioscrobbler.com/2.0/?method=track.search&track={}&api_key=31873cc90c27539710b2e41cf3a7ef24&format=json\".format( song_name) track_val = requests.get(track_url).json() # GETTING ALBUM",
"data = urllib.request.urlopen(url).read() image = QtGui.QImage() image.loadFromData(data) new_img.setPixmap(QtGui.QPixmap(image)) #Add items to layout &",
"= QtWidgets.QPushButton(\"Add Song\") self.add_song.setObjectName('add_song_btn') self.add_song_field = QtWidgets.QLineEdit() #STYLE self.save_list_txt.setStyleSheet(\"color: rgb(162, 16, 26);\") self.delete_list.setStyleSheet(\"color:",
"__str__(self): return \"{} by {} | Album: {}\".format(self.name,self.artist,self.album) class App_Musiclist(QtWidgets.QWidget): def __init__(self): super().__init__()",
"List!\", QtWidgets.QMessageBox.Ok) else: save_to_where = QtWidgets.QFileDialog.getSaveFileName(self, \"Save Your List to ?\", os.getenv(\"HOME\"),\"Text Files",
"= QtWidgets.QPushButton(\"Delete Current List\") self.add_song = QtWidgets.QPushButton(\"Add Song\") self.add_song.setObjectName('add_song_btn') self.add_song_field = QtWidgets.QLineEdit() #STYLE",
"self.cursor.execute(\"INSERT INTO musiclist Values(?,?,?)\",(song_info[0],song_info[1],song_info[2])) self.connection.commit() self.add_label = QtWidgets.QLabel(str(track)) self.add_label.setObjectName(song_info[0]) #Create Widgets & Layout",
"song_info == False: pass else: track = song(song_info[0],song_info[1],song_info[2]) self.cursor.execute(\"INSERT INTO musiclist Values(?,?,?)\",(song_info[0],song_info[1],song_info[2])) self.connection.commit()",
"\"List Saved !\", QtWidgets.QMessageBox.Ok) except FileNotFoundError: pass def event_delete_list(self): self.clearLayout(self.v_box_2) self.countbtn = 0",
"& ARTIST track_url = \"http://ws.audioscrobbler.com/2.0/?method=track.search&track={}&api_key=31873cc90c27539710b2e41cf3a7ef24&format=json\".format( song_name) track_val = requests.get(track_url).json() # GETTING ALBUM FROM",
"self.connection.cursor() self.cursor.execute(\"CREATE TABLE musiclist (song_name TEXT,song_artist TEXT,song_album TEXT)\") self.connection.commit() def setupUi(self): #CREATING WIDGETS",
"= self.connection.cursor() self.cursor.execute(\"CREATE TABLE musiclist (song_name TEXT,song_artist TEXT,song_album TEXT)\") self.connection.commit() def setupUi(self): #CREATING",
"= QtWidgets.QHBoxLayout() h_box.addWidget(self.add_song) h_box.addWidget(self.add_song_field) h_box.addWidget(self.delete_list) h_box.addWidget(self.save_list_txt) self.v_box_2 = QtWidgets.QVBoxLayout() self.v_box = QtWidgets.QVBoxLayout() self.v_box.addLayout(h_box)",
"url track_url = \"http://ws.audioscrobbler.com/2.0/?method=track.getInfo&api_key=31873cc90c27539710b2e41cf3a7ef24&artist={}&track={}&format=json\".format(song_info[1],song_info[0]) track_val = requests.get(track_url).json() url = track_val['track']['album']['image'][1]['#text'] if url ==",
"= QtWidgets.QInputDialog.getText(self, \"Add Song\",\"Song Name: \", QtWidgets.QLineEdit.Normal, \"\") song_info = self.get_song_from_lastfm(text) if song_info",
"pass else: track = song(song_info[0],song_info[1],song_info[2]) self.cursor.execute(\"INSERT INTO musiclist Values(?,?,?)\",(song_info[0],song_info[1],song_info[2])) self.connection.commit() self.add_label = QtWidgets.QLabel(str(track))",
"& Layout self.new_h_box = QtWidgets.QHBoxLayout() self.new_h_box.setObjectName(\"delhbox|{}\".format(self.countbtn)) new_del_btn = QtWidgets.QPushButton(\"X\") new_del_btn.setObjectName(\"delbtn|{}\".format(self.countbtn)) new_img = QtWidgets.QLabel()",
"= QtGui.QIcon() #icon.addPixmap(QtGui.QPixmap(\"music.ico\"), QtGui.QIcon.Normal, QtGui.QIcon.Off) #self.setWindowTitle(\"Music List\") def get_song_from_lastfm(self,song_name): try: # GETTING TRACK",
"\"\") song_info = self.get_song_from_lastfm(text) if song_info == False: pass else: track = song(song_info[0],song_info[1],song_info[2])",
"64, QtCore.Qt.KeepAspectRatio)) else: data = urllib.request.urlopen(url).read() image = QtGui.QImage() image.loadFromData(data) new_img.setPixmap(QtGui.QPixmap(image)) #Add items",
"* FROM musiclist\") current_songs = self.cursor.fetchall() if len(current_songs) == 0: msg_box = QtWidgets.QMessageBox.warning(self,",
"def __str__(self): return \"{} by {} | Album: {}\".format(self.name,self.artist,self.album) class App_Musiclist(QtWidgets.QWidget): def __init__(self):",
"\"http://ws.audioscrobbler.com/2.0/?method=track.search&track={}&api_key=31873cc90c27539710b2e41cf3a7ef24&format=json\".format( song_name) track_val = requests.get(track_url).json() # GETTING ALBUM FROM TRACK album_url = \"http://ws.audioscrobbler.com/2.0/?method=track.getInfo&api_key=31873cc90c27539710b2e41cf3a7ef24&artist={}&track={}&format=json\".format(track_val[\"results\"][\"trackmatches\"][\"track\"][0][\"artist\"],track_val[\"results\"][\"trackmatches\"][\"track\"][0][\"name\"])",
"(song_name TEXT,song_artist TEXT,song_album TEXT)\") self.connection.commit() def setupUi(self): #CREATING WIDGETS self.save_list_txt = QtWidgets.QPushButton(\"Save List",
"self.clearLayout(self.v_box_2) self.countbtn = 0 self.cursor.execute(\"DELETE FROM musiclist\") def clearLayout(self, layout): if layout is",
"TRACK album_url = \"http://ws.audioscrobbler.com/2.0/?method=track.getInfo&api_key=31873cc90c27539710b2e41cf3a7ef24&artist={}&track={}&format=json\".format(track_val[\"results\"][\"trackmatches\"][\"track\"][0][\"artist\"],track_val[\"results\"][\"trackmatches\"][\"track\"][0][\"name\"]) album_val = requests.get(album_url).json() song_info = [album_val[\"track\"][\"name\"], album_val[\"track\"][\"artist\"][\"name\"],album_val[\"track\"][\"album\"][\"title\"]] return song_info",
"ARTIST track_url = \"http://ws.audioscrobbler.com/2.0/?method=track.search&track={}&api_key=31873cc90c27539710b2e41cf3a7ef24&format=json\".format( song_name) track_val = requests.get(track_url).json() # GETTING ALBUM FROM TRACK",
"QtWidgets.QMessageBox.warning(self, 'Empty List!', \"Empty List!\", QtWidgets.QMessageBox.Ok) else: save_to_where = QtWidgets.QFileDialog.getSaveFileName(self, \"Save Your List",
"QtCore.Qt.KeepAspectRatio)) else: data = urllib.request.urlopen(url).read() image = QtGui.QImage() image.loadFromData(data) new_img.setPixmap(QtGui.QPixmap(image)) #Add items to",
"def event_delete_list(self): self.clearLayout(self.v_box_2) self.countbtn = 0 self.cursor.execute(\"DELETE FROM musiclist\") def clearLayout(self, layout): if",
"song_info = self.get_song_from_lastfm(text) if song_info == False: pass else: track = song(song_info[0],song_info[1],song_info[2]) self.cursor.execute(\"INSERT",
"self.connection = sqlite3.connect(\"musiclist.db\") self.cursor = self.connection.cursor() self.cursor.execute(\"CREATE TABLE musiclist (song_name TEXT,song_artist TEXT,song_album TEXT)\")",
"self.artist = artist self.album = album def __str__(self): return \"{} by {} |",
"#self.setGeometry(700,100,750,500) #icon = QtGui.QIcon() #icon.addPixmap(QtGui.QPixmap(\"music.ico\"), QtGui.QIcon.Normal, QtGui.QIcon.Off) #self.setWindowTitle(\"Music List\") def get_song_from_lastfm(self,song_name): try: #",
"= QtWidgets.QPushButton(\"Save List to TXT\") self.delete_list = QtWidgets.QPushButton(\"Delete Current List\") self.add_song = QtWidgets.QPushButton(\"Add",
"if url == '': new_img.setPixmap(QtGui.QPixmap('music.ico').scaled(64, 64, QtCore.Qt.KeepAspectRatio)) else: data = urllib.request.urlopen(url).read() image =",
"album_val[\"track\"][\"artist\"][\"name\"],album_val[\"track\"][\"album\"][\"title\"]] return song_info except: return False def event_add_song(self): from_who = self.sender().objectName() if from_who",
"\"{} by {} | Album: {}\".format(self.name,self.artist,self.album) class App_Musiclist(QtWidgets.QWidget): def __init__(self): super().__init__() self.setupUi() self.connectdb()",
"self.add_song_field.text() song_info = self.get_song_from_lastfm(get_input) elif from_who == 'file_add_song': text, okPressed = QtWidgets.QInputDialog.getText(self, \"Add",
"Widgets & Layout self.new_h_box = QtWidgets.QHBoxLayout() self.new_h_box.setObjectName(\"delhbox|{}\".format(self.countbtn)) new_del_btn = QtWidgets.QPushButton(\"X\") new_del_btn.setObjectName(\"delbtn|{}\".format(self.countbtn)) new_img =",
"False def event_add_song(self): from_who = self.sender().objectName() if from_who == 'add_song_btn': get_input = self.add_song_field.text()",
"WHERE song_name = ?\",(song_name,)) self.connection.commit() self.clearLayout(layout) def event_save_list_to_txt(self): self.cursor.execute(\"SELECT * FROM musiclist\") current_songs",
"self.connectdb() self.countbtn = 0 def connectdb(self): try: os.remove(\"musiclist.db\") except FileNotFoundError: pass self.connection =",
"create img #getting url track_url = \"http://ws.audioscrobbler.com/2.0/?method=track.getInfo&api_key=31873cc90c27539710b2e41cf3a7ef24&artist={}&track={}&format=json\".format(song_info[1],song_info[0]) track_val = requests.get(track_url).json() url = track_val['track']['album']['image'][1]['#text']",
"msg_box = QtWidgets.QMessageBox.warning(self, 'List Saved !', \"List Saved !\", QtWidgets.QMessageBox.Ok) except FileNotFoundError: pass",
"QtCore, QtGui, QtWidgets import sqlite3 import sys import requests import os import urllib.request",
"okPressed = QtWidgets.QInputDialog.getText(self, \"Add Song\",\"Song Name: \", QtWidgets.QLineEdit.Normal, \"\") song_info = self.get_song_from_lastfm(text) if",
"= layout.takeAt(0) widget = item.widget() if widget is not None: widget.deleteLater() else: self.clearLayout(item.layout())"
] |
[
"= 50 LEARNING_RATE = 2e-2 NUM_OCTAVES = 6 OCTAVE_SCALE = 1.4 SAVE_INTERVAL =",
"'''config file''' MEANS = [0.485, 0.456, 0.406] STDS = [0.229, 0.224, 0.225] MAX_JITTER",
"[0.229, 0.224, 0.225] MAX_JITTER = 32 MAX_ITERS = 50 LEARNING_RATE = 2e-2 NUM_OCTAVES",
"MAX_JITTER = 32 MAX_ITERS = 50 LEARNING_RATE = 2e-2 NUM_OCTAVES = 6 OCTAVE_SCALE",
"2e-2 NUM_OCTAVES = 6 OCTAVE_SCALE = 1.4 SAVE_INTERVAL = 10 SAVEDIR = 'results'",
"0.224, 0.225] MAX_JITTER = 32 MAX_ITERS = 50 LEARNING_RATE = 2e-2 NUM_OCTAVES =",
"MEANS = [0.485, 0.456, 0.406] STDS = [0.229, 0.224, 0.225] MAX_JITTER = 32",
"file''' MEANS = [0.485, 0.456, 0.406] STDS = [0.229, 0.224, 0.225] MAX_JITTER =",
"[0.485, 0.456, 0.406] STDS = [0.229, 0.224, 0.225] MAX_JITTER = 32 MAX_ITERS =",
"= [0.485, 0.456, 0.406] STDS = [0.229, 0.224, 0.225] MAX_JITTER = 32 MAX_ITERS",
"MAX_ITERS = 50 LEARNING_RATE = 2e-2 NUM_OCTAVES = 6 OCTAVE_SCALE = 1.4 SAVE_INTERVAL",
"0.456, 0.406] STDS = [0.229, 0.224, 0.225] MAX_JITTER = 32 MAX_ITERS = 50",
"32 MAX_ITERS = 50 LEARNING_RATE = 2e-2 NUM_OCTAVES = 6 OCTAVE_SCALE = 1.4",
"50 LEARNING_RATE = 2e-2 NUM_OCTAVES = 6 OCTAVE_SCALE = 1.4 SAVE_INTERVAL = 10",
"= 32 MAX_ITERS = 50 LEARNING_RATE = 2e-2 NUM_OCTAVES = 6 OCTAVE_SCALE =",
"0.225] MAX_JITTER = 32 MAX_ITERS = 50 LEARNING_RATE = 2e-2 NUM_OCTAVES = 6",
"LEARNING_RATE = 2e-2 NUM_OCTAVES = 6 OCTAVE_SCALE = 1.4 SAVE_INTERVAL = 10 SAVEDIR",
"0.406] STDS = [0.229, 0.224, 0.225] MAX_JITTER = 32 MAX_ITERS = 50 LEARNING_RATE",
"STDS = [0.229, 0.224, 0.225] MAX_JITTER = 32 MAX_ITERS = 50 LEARNING_RATE =",
"= 2e-2 NUM_OCTAVES = 6 OCTAVE_SCALE = 1.4 SAVE_INTERVAL = 10 SAVEDIR =",
"= [0.229, 0.224, 0.225] MAX_JITTER = 32 MAX_ITERS = 50 LEARNING_RATE = 2e-2"
] |
[
"_T = TypeVar(\"_T\", FusionSegment, Segment) class Notes(ReprMixin, EqMixin): \"\"\"This is a class stores",
"Optional, Sequence, Type, TypeVar, Union, overload from ..label import Catalog from ..utility import",
"bound=\"Notes\") _repr_attrs = (\"is_continuous\", \"bin_point_cloud_fields\") def __init__( self, is_continuous: bool = False, bin_point_cloud_fields:",
"pylint: disable=too-many-ancestors \"\"\"This class defines the concept of a basic dataset. DatasetBase represents",
"of :class:`~tensorbay.dataset.segment.Segment`. :class:`FusionDataset` is made up of data collected from multiple sensors. It",
"common_loads from .segment import FusionSegment, Segment _T = TypeVar(\"_T\", FusionSegment, Segment) class Notes(ReprMixin,",
"notes of the dataset. Returns: The class:`Notes` of the dataset. \"\"\" return self._notes",
"to the given name. Arguments: name: The name of the request segment. Returns:",
"\"\"\"This is a class stores the basic information of :class:`DatasetBase`. Arguments: is_continuous: Whether",
"\"\"\" self._segments.add(segment) class Dataset(DatasetBase[Segment]): \"\"\"This class defines the concept of dataset. Dataset is",
"Attributes: catalog: The :class:`~tensorbay.label.catalog.Catalog` of the dataset. notes: The :class:`Notes` of the dataset.",
"to the dataset. Arguments: segment: The segment to be added. \"\"\" self._segments.add(segment) class",
"Arguments: filepath: The path of the json file which contains the catalog information.",
"\"\"\"Return the notes of the dataset. Returns: The class:`Notes` of the dataset. \"\"\"",
":class:`Notes` instance from the given contents. Arguments: contents: The given dict containing the",
"\"\"\" return self._segments.get_from_name(name) def add_segment(self, segment: _T) -> None: \"\"\"Add a segment to",
"point cloud files in the dataset. \"\"\" _T = TypeVar(\"_T\", bound=\"Notes\") _repr_attrs =",
"Copyright 2021 Graviti. Licensed under MIT License. # \"\"\"Notes, DatasetBase, Dataset and FusionDataset.",
"class of :class:`Dataset` and :class:`FusionDataset`. :class:`Dataset` is made up of data collected from",
"contents: The given dict containing the dataset notes:: { \"isContinuous\": <boolean> \"binPointCloudFields\": [",
"bool = False, bin_point_cloud_fields: Optional[Iterable[str]] = None ) -> None: self.is_continuous = is_continuous",
"class defines the concept of dataset. Dataset is made up of data collected",
"Catalog: \"\"\"Return the catalog of the dataset. Returns: The :class:`~tensorbay.label.catalog.Catalog` of the dataset.",
"segment to create, which default value is an empty string. Returns: The created",
"Returns: The class:`Notes` of the dataset. \"\"\" return self._notes def load_catalog(self, filepath: str)",
"a :class:`DatasetBase`. :class:`DatasetBase` defines the basic concept of a dataset, which is the",
"or data without sensor information. It consists of a list of :class:`~tensorbay.dataset.segment.Segment`. :class:`FusionDataset`",
"\"\"\"Return the segment corresponding to the given name. Arguments: name: The name of",
"of the request segment. Returns: The segment which matches the input name. \"\"\"",
"contains several segments and is the base class of :class:`Dataset` and :class:`FusionDataset`. :class:`Dataset`",
"catalog information. \"\"\" with open(filepath, \"r\") as fp: contents = json.load(fp) self._catalog =",
"None: self.is_continuous = contents[\"isContinuous\"] self.bin_point_cloud_fields = contents.get(\"binPointCloudFields\") @classmethod def loads(cls: Type[_T], contents: Dict[str,",
"is made up of data collected from multiple sensors. It consists of a",
"def __init__( self, is_continuous: bool = False, bin_point_cloud_fields: Optional[Iterable[str]] = None ) ->",
"__getitem__(self, key: str) -> Any: try: return getattr(self, key) except AttributeError as error:",
"base class of :class:`Dataset` and :class:`FusionDataset`. A dataset with labels should contain a",
"valid keys within the notes. Returns: The valid keys within the notes. \"\"\"",
"_loads(self, contents: Dict[str, Any]) -> None: self.is_continuous = contents[\"isContinuous\"] self.bin_point_cloud_fields = contents.get(\"binPointCloudFields\") @classmethod",
"self.bin_point_cloud_fields = contents.get(\"binPointCloudFields\") @classmethod def loads(cls: Type[_T], contents: Dict[str, Any]) -> _T: \"\"\"Loads",
"Any]) -> None: self.is_continuous = contents[\"isContinuous\"] self.bin_point_cloud_fields = contents.get(\"binPointCloudFields\") @classmethod def loads(cls: Type[_T],",
"Whether the data inside the dataset is time-continuous. bin_point_cloud_fields: The field names of",
"and is the base class of :class:`Dataset` and :class:`FusionDataset`. A dataset with labels",
"represents a whole dataset contains several segments and is the base class of",
"from typing import Any, Dict, Iterable, KeysView, Optional, Sequence, Type, TypeVar, Union, overload",
"of data collected from multiple sensors. It consists of a list of :class:`~tensorbay.dataset.segment.FusionSegment`.",
"names of the bin point cloud files in the dataset. \"\"\" _T =",
"__getitem__(self, index: int) -> _T: ... @overload def __getitem__(self, index: slice) -> Sequence[_T]:",
":class:`Dataset` and :class:`FusionDataset`. :class:`Dataset` is made up of data collected from only one",
"self.is_continuous = is_continuous self.bin_point_cloud_fields = bin_point_cloud_fields def __getitem__(self, key: str) -> Any: try:",
"file which contains the catalog information. \"\"\" with open(filepath, \"r\") as fp: contents",
"information of :class:`DatasetBase`. Arguments: is_continuous: Whether the data inside the dataset is time-continuous.",
"the dataset. \"\"\" return self._notes def load_catalog(self, filepath: str) -> None: \"\"\"Load catalog",
"def add_segment(self, segment: _T) -> None: \"\"\"Add a segment to the dataset. Arguments:",
"contains the catalog information. \"\"\" with open(filepath, \"r\") as fp: contents = json.load(fp)",
"base class of :class:`Dataset` and :class:`FusionDataset`. :class:`Dataset` is made up of data collected",
":class:`Notes` of the dataset. \"\"\" _repr_type = ReprType.SEQUENCE def __init__(self, name: str) ->",
"\"r\") as fp: contents = json.load(fp) self._catalog = Catalog.loads(contents) def get_segment_by_name(self, name: str)",
"_repr_type = ReprType.SEQUENCE def __init__(self, name: str) -> None: super().__init__(name) self._segments: NameSortedList[_T] =",
":class:`Notes` instance. \"\"\" return common_loads(cls, contents) def keys(self) -> KeysView[str]: \"\"\"Return the valid",
"dataset. \"\"\" _T = TypeVar(\"_T\", bound=\"Notes\") _repr_attrs = (\"is_continuous\", \"bin_point_cloud_fields\") def __init__( self,",
"\"\") -> FusionSegment: \"\"\"Create a fusion segment with the given name. Arguments: segment_name:",
"DatasetBase, Dataset and FusionDataset. :class:`Notes` contains the basic information of a :class:`DatasetBase`. :class:`DatasetBase`",
"_T) -> None: \"\"\"Add a segment to the dataset. Arguments: segment: The segment",
":class:`~tensorbay.dataset.segment.FusionSegment`. \"\"\" def create_segment(self, segment_name: str = \"\") -> FusionSegment: \"\"\"Create a fusion",
"\"\"\" return common_loads(cls, contents) def keys(self) -> KeysView[str]: \"\"\"Return the valid keys within",
"Optional[Iterable[str]] = None ) -> None: self.is_continuous = is_continuous self.bin_point_cloud_fields = bin_point_cloud_fields def",
"catalog of the dataset. Returns: The :class:`~tensorbay.label.catalog.Catalog` of the dataset. \"\"\" return self._catalog",
"files, labels and other additional information. It represents a whole dataset contains several",
"multiple sensors. It consists of a list of :class:`~tensorbay.dataset.segment.FusionSegment`. \"\"\" import json from",
"Notes: \"\"\"Return the notes of the dataset. Returns: The class:`Notes` of the dataset.",
"which default value is an empty string. Returns: The created :class:`~tensorbay.dataset.segment.FusionSegment`. \"\"\" segment",
"of the fusion segment to create, which default value is an empty string.",
"dataset. Dataset is made up of data collected from only one sensor or",
"= is_continuous self.bin_point_cloud_fields = bin_point_cloud_fields def __getitem__(self, key: str) -> Any: try: return",
"[ <array> or null <field_name>, <str> ... ] } \"\"\" contents: Dict[str, Any]",
"def catalog(self) -> Catalog: \"\"\"Return the catalog of the dataset. Returns: The :class:`~tensorbay.label.catalog.Catalog`",
"class FusionDataset(DatasetBase[FusionSegment]): \"\"\"This class defines the concept of fusion dataset. FusionDataset is made",
"EqMixin, NameMixin, NameSortedList, ReprMixin, ReprType, common_loads from .segment import FusionSegment, Segment _T =",
"the given name. Arguments: name: The name of the request segment. Returns: The",
"-> None: \"\"\"Load catalog from a json file. Arguments: filepath: The path of",
"self._segments.__len__() @overload def __getitem__(self, index: int) -> _T: ... @overload def __getitem__(self, index:",
"DatasetBase(NameMixin, Sequence[_T]): # pylint: disable=too-many-ancestors \"\"\"This class defines the concept of a basic",
"of the dataset. Returns: The :class:`~tensorbay.label.catalog.Catalog` of the dataset. \"\"\" return self._catalog @property",
"self._notes def load_catalog(self, filepath: str) -> None: \"\"\"Load catalog from a json file.",
"def notes(self) -> Notes: \"\"\"Return the notes of the dataset. Returns: The class:`Notes`",
"= json.load(fp) self._catalog = Catalog.loads(contents) def get_segment_by_name(self, name: str) -> _T: \"\"\"Return the",
"from multiple sensors. It consists of a list of :class:`~tensorbay.dataset.segment.FusionSegment`. \"\"\" import json",
":class:`Dataset` is made up of data collected from only one sensor or data",
"self._catalog: Catalog = Catalog() self._notes = Notes() def __len__(self) -> int: return self._segments.__len__()",
"-> None: super().__init__(name) self._segments: NameSortedList[_T] = NameSortedList() self._catalog: Catalog = Catalog() self._notes =",
"return segment class FusionDataset(DatasetBase[FusionSegment]): \"\"\"This class defines the concept of fusion dataset. FusionDataset",
"file. Arguments: filepath: The path of the json file which contains the catalog",
"= self.bin_point_cloud_fields return contents class DatasetBase(NameMixin, Sequence[_T]): # pylint: disable=too-many-ancestors \"\"\"This class defines",
"Union[int, slice]) -> Union[Sequence[_T], _T]: return self._segments.__getitem__(index) @property def catalog(self) -> Catalog: \"\"\"Return",
"of the Notes:: { \"isContinuous\": <boolean> \"binPointCloudFields\": [ <array> or null <field_name>, <str>",
"of a list of :class:`~tensorbay.dataset.segment.Segment`. \"\"\" def create_segment(self, segment_name: str = \"\") ->",
"\"\"\" return KeysView(self._repr_attrs) # type: ignore[arg-type] def dumps(self) -> Dict[str, Any]: \"\"\"Dumps the",
"notes into a dict. Returns: A dict containing all the information of the",
"def _loads(self, contents: Dict[str, Any]) -> None: self.is_continuous = contents[\"isContinuous\"] self.bin_point_cloud_fields = contents.get(\"binPointCloudFields\")",
"containing all the information of the Notes:: { \"isContinuous\": <boolean> \"binPointCloudFields\": [ <array>",
"-> KeysView[str]: \"\"\"Return the valid keys within the notes. Returns: The valid keys",
") -> None: self.is_continuous = is_continuous self.bin_point_cloud_fields = bin_point_cloud_fields def __getitem__(self, key: str)",
"Returns: The created :class:`~tensorbay.dataset.segment.Segment`. \"\"\" segment = Segment(segment_name) self._segments.add(segment) return segment class FusionDataset(DatasetBase[FusionSegment]):",
"the fusion segment to create, which default value is an empty string. Returns:",
"\"\"\"Create a segment with the given name. Arguments: segment_name: The name of the",
"The :class:`~tensorbay.label.catalog.Catalog` of the dataset. \"\"\" return self._catalog @property def notes(self) -> Notes:",
"json.load(fp) self._catalog = Catalog.loads(contents) def get_segment_by_name(self, name: str) -> _T: \"\"\"Return the segment",
"the dataset. Returns: The class:`Notes` of the dataset. \"\"\" return self._notes def load_catalog(self,",
"Segment) class Notes(ReprMixin, EqMixin): \"\"\"This is a class stores the basic information of",
"_repr_attrs = (\"is_continuous\", \"bin_point_cloud_fields\") def __init__( self, is_continuous: bool = False, bin_point_cloud_fields: Optional[Iterable[str]]",
"segment = Segment(segment_name) self._segments.add(segment) return segment class FusionDataset(DatasetBase[FusionSegment]): \"\"\"This class defines the concept",
"Arguments: name: The name of the request segment. Returns: The segment which matches",
"with the given name. Arguments: segment_name: The name of the segment to create,",
"given name. Arguments: name: The name of the request segment. Returns: The segment",
"be added. \"\"\" self._segments.add(segment) class Dataset(DatasetBase[Segment]): \"\"\"This class defines the concept of dataset.",
"to be added. \"\"\" self._segments.add(segment) class Dataset(DatasetBase[Segment]): \"\"\"This class defines the concept of",
"json from typing import Any, Dict, Iterable, KeysView, Optional, Sequence, Type, TypeVar, Union,",
"the notes of the dataset. Returns: The class:`Notes` of the dataset. \"\"\" return",
"the given name. Arguments: segment_name: The name of the fusion segment to create,",
"defines the concept of a basic dataset. DatasetBase represents a whole dataset contains",
"the catalog of the dataset. Returns: The :class:`~tensorbay.label.catalog.Catalog` of the dataset. \"\"\" return",
"_T: \"\"\"Loads a :class:`Notes` instance from the given contents. Arguments: contents: The given",
"\"\"\" with open(filepath, \"r\") as fp: contents = json.load(fp) self._catalog = Catalog.loads(contents) def",
"data without sensor information. It consists of a list of :class:`~tensorbay.dataset.segment.Segment`. :class:`FusionDataset` is",
"defines the concept of dataset. Dataset is made up of data collected from",
"-> _T: \"\"\"Return the segment corresponding to the given name. Arguments: name: The",
"data without sensor information. It consists of a list of :class:`~tensorbay.dataset.segment.Segment`. \"\"\" def",
"the bin point cloud files in the dataset. \"\"\" _T = TypeVar(\"_T\", bound=\"Notes\")",
"\"\"\" def create_segment(self, segment_name: str = \"\") -> Segment: \"\"\"Create a segment with",
"KeysView(self._repr_attrs) # type: ignore[arg-type] def dumps(self) -> Dict[str, Any]: \"\"\"Dumps the notes into",
"Graviti. Licensed under MIT License. # \"\"\"Notes, DatasetBase, Dataset and FusionDataset. :class:`Notes` contains",
"NameSortedList[_T] = NameSortedList() self._catalog: Catalog = Catalog() self._notes = Notes() def __len__(self) ->",
"False, bin_point_cloud_fields: Optional[Iterable[str]] = None ) -> None: self.is_continuous = is_continuous self.bin_point_cloud_fields =",
"The :class:`Notes` of the dataset. \"\"\" _repr_type = ReprType.SEQUENCE def __init__(self, name: str)",
"@classmethod def loads(cls: Type[_T], contents: Dict[str, Any]) -> _T: \"\"\"Loads a :class:`Notes` instance",
"given contents. Arguments: contents: The given dict containing the dataset notes:: { \"isContinuous\":",
"value is an empty string. Returns: The created :class:`~tensorbay.dataset.segment.FusionSegment`. \"\"\" segment = FusionSegment(segment_name)",
"def __getitem__(self, key: str) -> Any: try: return getattr(self, key) except AttributeError as",
"return self._notes def load_catalog(self, filepath: str) -> None: \"\"\"Load catalog from a json",
"name. Arguments: name: The name of the request segment. Returns: The segment which",
"ReprType, common_loads from .segment import FusionSegment, Segment _T = TypeVar(\"_T\", FusionSegment, Segment) class",
"Dict[str, Any]: \"\"\"Dumps the notes into a dict. Returns: A dict containing all",
"\"\"\"Dumps the notes into a dict. Returns: A dict containing all the information",
"contents.get(\"binPointCloudFields\") @classmethod def loads(cls: Type[_T], contents: Dict[str, Any]) -> _T: \"\"\"Loads a :class:`Notes`",
"should contain a :class:`~tensorbay.label.catalog.Catalog` indicating all the possible values of the labels. Arguments:",
"which matches the input name. \"\"\" return self._segments.get_from_name(name) def add_segment(self, segment: _T) ->",
"The segment to be added. \"\"\" self._segments.add(segment) class Dataset(DatasetBase[Segment]): \"\"\"This class defines the",
"json file. Arguments: filepath: The path of the json file which contains the",
"EqMixin): \"\"\"This is a class stores the basic information of :class:`DatasetBase`. Arguments: is_continuous:",
"a json file. Arguments: filepath: The path of the json file which contains",
"list of :class:`~tensorbay.dataset.segment.Segment`. :class:`FusionDataset` is made up of data collected from multiple sensors.",
"of a dataset, which is the top-level structure to handle your data files,",
"the labels. Arguments: name: The name of the dataset. Attributes: catalog: The :class:`~tensorbay.label.catalog.Catalog`",
"only one sensor or data without sensor information. It consists of a list",
"of :class:`~tensorbay.dataset.segment.FusionSegment`. \"\"\" import json from typing import Any, Dict, Iterable, KeysView, Optional,",
"None ) -> None: self.is_continuous = is_continuous self.bin_point_cloud_fields = bin_point_cloud_fields def __getitem__(self, key:",
"an empty string. Returns: The created :class:`~tensorbay.dataset.segment.Segment`. \"\"\" segment = Segment(segment_name) self._segments.add(segment) return",
"a dataset, which is the top-level structure to handle your data files, labels",
"the basic information of :class:`DatasetBase`. Arguments: is_continuous: Whether the data inside the dataset",
"__getitem__(self, index: Union[int, slice]) -> Union[Sequence[_T], _T]: return self._segments.__getitem__(index) @property def catalog(self) ->",
"Dataset and FusionDataset. :class:`Notes` contains the basic information of a :class:`DatasetBase`. :class:`DatasetBase` defines",
"= ReprType.SEQUENCE def __init__(self, name: str) -> None: super().__init__(name) self._segments: NameSortedList[_T] = NameSortedList()",
"try: return getattr(self, key) except AttributeError as error: raise KeyError(key) from error def",
"concept of fusion dataset. FusionDataset is made up of data collected from multiple",
"is made up of data collected from only one sensor or data without",
"return self._catalog @property def notes(self) -> Notes: \"\"\"Return the notes of the dataset.",
"-> Segment: \"\"\"Create a segment with the given name. Arguments: segment_name: The name",
"or null <field_name>, <str> ... ] } \"\"\" contents: Dict[str, Any] = {\"isContinuous\":",
"keys within the notes. \"\"\" return KeysView(self._repr_attrs) # type: ignore[arg-type] def dumps(self) ->",
"from error def _loads(self, contents: Dict[str, Any]) -> None: self.is_continuous = contents[\"isContinuous\"] self.bin_point_cloud_fields",
"handle your data files, labels and other additional information. It represents a whole",
"dumps(self) -> Dict[str, Any]: \"\"\"Dumps the notes into a dict. Returns: A dict",
"None: \"\"\"Add a segment to the dataset. Arguments: segment: The segment to be",
"name: The name of the dataset. Attributes: catalog: The :class:`~tensorbay.label.catalog.Catalog` of the dataset.",
"= NameSortedList() self._catalog: Catalog = Catalog() self._notes = Notes() def __len__(self) -> int:",
"corresponding to the given name. Arguments: name: The name of the request segment.",
"def __getitem__(self, index: slice) -> Sequence[_T]: ... def __getitem__(self, index: Union[int, slice]) ->",
"It consists of a list of :class:`~tensorbay.dataset.segment.FusionSegment`. \"\"\" def create_segment(self, segment_name: str =",
"name of the request segment. Returns: The segment which matches the input name.",
"The class:`Notes` of the dataset. \"\"\" return self._notes def load_catalog(self, filepath: str) ->",
"under MIT License. # \"\"\"Notes, DatasetBase, Dataset and FusionDataset. :class:`Notes` contains the basic",
"collected from only one sensor or data without sensor information. It consists of",
"the given name. Arguments: segment_name: The name of the segment to create, which",
"a list of :class:`~tensorbay.dataset.segment.Segment`. \"\"\" def create_segment(self, segment_name: str = \"\") -> Segment:",
"name of the segment to create, which default value is an empty string.",
"<boolean> \"binPointCloudFields\": [ <array> or null <field_name>, <str> ... ] } Returns: The",
"(\"is_continuous\", \"bin_point_cloud_fields\") def __init__( self, is_continuous: bool = False, bin_point_cloud_fields: Optional[Iterable[str]] = None",
"segment corresponding to the given name. Arguments: name: The name of the request",
"of :class:`~tensorbay.dataset.segment.FusionSegment`. \"\"\" def create_segment(self, segment_name: str = \"\") -> FusionSegment: \"\"\"Create a",
"list of :class:`~tensorbay.dataset.segment.FusionSegment`. \"\"\" def create_segment(self, segment_name: str = \"\") -> FusionSegment: \"\"\"Create",
"dict containing all the information of the Notes:: { \"isContinuous\": <boolean> \"binPointCloudFields\": [",
"of a list of :class:`~tensorbay.dataset.segment.Segment`. :class:`FusionDataset` is made up of data collected from",
"default value is an empty string. Returns: The created :class:`~tensorbay.dataset.segment.FusionSegment`. \"\"\" segment =",
".segment import FusionSegment, Segment _T = TypeVar(\"_T\", FusionSegment, Segment) class Notes(ReprMixin, EqMixin): \"\"\"This",
"of fusion dataset. FusionDataset is made up of data collected from multiple sensors.",
"segment with the given name. Arguments: segment_name: The name of the segment to",
"..utility import EqMixin, NameMixin, NameSortedList, ReprMixin, ReprType, common_loads from .segment import FusionSegment, Segment",
"open(filepath, \"r\") as fp: contents = json.load(fp) self._catalog = Catalog.loads(contents) def get_segment_by_name(self, name:",
"return self._segments.__getitem__(index) @property def catalog(self) -> Catalog: \"\"\"Return the catalog of the dataset.",
"-> Sequence[_T]: ... def __getitem__(self, index: Union[int, slice]) -> Union[Sequence[_T], _T]: return self._segments.__getitem__(index)",
"the segment corresponding to the given name. Arguments: name: The name of the",
"is_continuous self.bin_point_cloud_fields = bin_point_cloud_fields def __getitem__(self, key: str) -> Any: try: return getattr(self,",
"containing the dataset notes:: { \"isContinuous\": <boolean> \"binPointCloudFields\": [ <array> or null <field_name>,",
"Dict[str, Any]) -> None: self.is_continuous = contents[\"isContinuous\"] self.bin_point_cloud_fields = contents.get(\"binPointCloudFields\") @classmethod def loads(cls:",
"field names of the bin point cloud files in the dataset. \"\"\" _T",
"return self._segments.get_from_name(name) def add_segment(self, segment: _T) -> None: \"\"\"Add a segment to the",
"a list of :class:`~tensorbay.dataset.segment.Segment`. :class:`FusionDataset` is made up of data collected from multiple",
"} Returns: The loaded :class:`Notes` instance. \"\"\" return common_loads(cls, contents) def keys(self) ->",
"The name of the dataset. Attributes: catalog: The :class:`~tensorbay.label.catalog.Catalog` of the dataset. notes:",
"Any] = {\"isContinuous\": self.is_continuous} if self.bin_point_cloud_fields: contents[\"binPointCloudFields\"] = self.bin_point_cloud_fields return contents class DatasetBase(NameMixin,",
"Dataset is made up of data collected from only one sensor or data",
"top-level structure to handle your data files, labels and other additional information. It",
"Segment(segment_name) self._segments.add(segment) return segment class FusionDataset(DatasetBase[FusionSegment]): \"\"\"This class defines the concept of fusion",
"= TypeVar(\"_T\", bound=\"Notes\") _repr_attrs = (\"is_continuous\", \"bin_point_cloud_fields\") def __init__( self, is_continuous: bool =",
"segment: The segment to be added. \"\"\" self._segments.add(segment) class Dataset(DatasetBase[Segment]): \"\"\"This class defines",
"dataset. \"\"\" _repr_type = ReprType.SEQUENCE def __init__(self, name: str) -> None: super().__init__(name) self._segments:",
"= None ) -> None: self.is_continuous = is_continuous self.bin_point_cloud_fields = bin_point_cloud_fields def __getitem__(self,",
"FusionSegment: \"\"\"Create a fusion segment with the given name. Arguments: segment_name: The name",
"\"\"\"Load catalog from a json file. Arguments: filepath: The path of the json",
"is an empty string. Returns: The created :class:`~tensorbay.dataset.segment.Segment`. \"\"\" segment = Segment(segment_name) self._segments.add(segment)",
"name. Arguments: segment_name: The name of the fusion segment to create, which default",
"of :class:`Dataset` and :class:`FusionDataset`. :class:`Dataset` is made up of data collected from only",
":class:`FusionDataset` is made up of data collected from multiple sensors. It consists of",
"with open(filepath, \"r\") as fp: contents = json.load(fp) self._catalog = Catalog.loads(contents) def get_segment_by_name(self,",
"Arguments: name: The name of the dataset. Attributes: catalog: The :class:`~tensorbay.label.catalog.Catalog` of the",
"dataset. Returns: The :class:`~tensorbay.label.catalog.Catalog` of the dataset. \"\"\" return self._catalog @property def notes(self)",
"basic dataset. DatasetBase represents a whole dataset contains several segments and is the",
"sensor or data without sensor information. It consists of a list of :class:`~tensorbay.dataset.segment.Segment`.",
"\"binPointCloudFields\": [ <array> or null <field_name>, <str> ... ] } \"\"\" contents: Dict[str,",
"KeysView, Optional, Sequence, Type, TypeVar, Union, overload from ..label import Catalog from ..utility",
"= False, bin_point_cloud_fields: Optional[Iterable[str]] = None ) -> None: self.is_continuous = is_continuous self.bin_point_cloud_fields",
":class:`FusionDataset`. :class:`Dataset` is made up of data collected from only one sensor or",
"= \"\") -> Segment: \"\"\"Create a segment with the given name. Arguments: segment_name:",
"It represents a whole dataset contains several segments and is the base class",
"and :class:`FusionDataset`. :class:`Dataset` is made up of data collected from only one sensor",
"except AttributeError as error: raise KeyError(key) from error def _loads(self, contents: Dict[str, Any])",
"dataset contains several segments and is the base class of :class:`Dataset` and :class:`FusionDataset`.",
"dataset. \"\"\" return self._catalog @property def notes(self) -> Notes: \"\"\"Return the notes of",
"error def _loads(self, contents: Dict[str, Any]) -> None: self.is_continuous = contents[\"isContinuous\"] self.bin_point_cloud_fields =",
"default value is an empty string. Returns: The created :class:`~tensorbay.dataset.segment.Segment`. \"\"\" segment =",
"basic information of a :class:`DatasetBase`. :class:`DatasetBase` defines the basic concept of a dataset,",
"@overload def __getitem__(self, index: int) -> _T: ... @overload def __getitem__(self, index: slice)",
"the dataset. Returns: The :class:`~tensorbay.label.catalog.Catalog` of the dataset. \"\"\" return self._catalog @property def",
"None: self.is_continuous = is_continuous self.bin_point_cloud_fields = bin_point_cloud_fields def __getitem__(self, key: str) -> Any:",
"loads(cls: Type[_T], contents: Dict[str, Any]) -> _T: \"\"\"Loads a :class:`Notes` instance from the",
"dataset. \"\"\" return self._notes def load_catalog(self, filepath: str) -> None: \"\"\"Load catalog from",
"name of the dataset. Attributes: catalog: The :class:`~tensorbay.label.catalog.Catalog` of the dataset. notes: The",
"of :class:`~tensorbay.dataset.segment.Segment`. \"\"\" def create_segment(self, segment_name: str = \"\") -> Segment: \"\"\"Create a",
"the notes. Returns: The valid keys within the notes. \"\"\" return KeysView(self._repr_attrs) #",
"indicating all the possible values of the labels. Arguments: name: The name of",
"as fp: contents = json.load(fp) self._catalog = Catalog.loads(contents) def get_segment_by_name(self, name: str) ->",
"fp: contents = json.load(fp) self._catalog = Catalog.loads(contents) def get_segment_by_name(self, name: str) -> _T:",
"str) -> None: super().__init__(name) self._segments: NameSortedList[_T] = NameSortedList() self._catalog: Catalog = Catalog() self._notes",
"self.is_continuous = contents[\"isContinuous\"] self.bin_point_cloud_fields = contents.get(\"binPointCloudFields\") @classmethod def loads(cls: Type[_T], contents: Dict[str, Any])",
"The segment which matches the input name. \"\"\" return self._segments.get_from_name(name) def add_segment(self, segment:",
"a whole dataset contains several segments and is the base class of :class:`Dataset`",
"getattr(self, key) except AttributeError as error: raise KeyError(key) from error def _loads(self, contents:",
"disable=too-many-ancestors \"\"\"This class defines the concept of a basic dataset. DatasetBase represents a",
"def get_segment_by_name(self, name: str) -> _T: \"\"\"Return the segment corresponding to the given",
"create_segment(self, segment_name: str = \"\") -> Segment: \"\"\"Create a segment with the given",
"\"\"\" def create_segment(self, segment_name: str = \"\") -> FusionSegment: \"\"\"Create a fusion segment",
"slice) -> Sequence[_T]: ... def __getitem__(self, index: Union[int, slice]) -> Union[Sequence[_T], _T]: return",
"{ \"isContinuous\": <boolean> \"binPointCloudFields\": [ <array> or null <field_name>, <str> ... ] }",
"basic information of :class:`DatasetBase`. Arguments: is_continuous: Whether the data inside the dataset is",
"possible values of the labels. Arguments: name: The name of the dataset. Attributes:",
"return getattr(self, key) except AttributeError as error: raise KeyError(key) from error def _loads(self,",
"self.is_continuous} if self.bin_point_cloud_fields: contents[\"binPointCloudFields\"] = self.bin_point_cloud_fields return contents class DatasetBase(NameMixin, Sequence[_T]): # pylint:",
"\"\"\" segment = Segment(segment_name) self._segments.add(segment) return segment class FusionDataset(DatasetBase[FusionSegment]): \"\"\"This class defines the",
"of a :class:`DatasetBase`. :class:`DatasetBase` defines the basic concept of a dataset, which is",
"-> None: self.is_continuous = is_continuous self.bin_point_cloud_fields = bin_point_cloud_fields def __getitem__(self, key: str) ->",
"consists of a list of :class:`~tensorbay.dataset.segment.Segment`. :class:`FusionDataset` is made up of data collected",
"Segment _T = TypeVar(\"_T\", FusionSegment, Segment) class Notes(ReprMixin, EqMixin): \"\"\"This is a class",
"of the dataset. notes: The :class:`Notes` of the dataset. \"\"\" _repr_type = ReprType.SEQUENCE",
"NameSortedList, ReprMixin, ReprType, common_loads from .segment import FusionSegment, Segment _T = TypeVar(\"_T\", FusionSegment,",
"if self.bin_point_cloud_fields: contents[\"binPointCloudFields\"] = self.bin_point_cloud_fields return contents class DatasetBase(NameMixin, Sequence[_T]): # pylint: disable=too-many-ancestors",
"The name of the request segment. Returns: The segment which matches the input",
"created :class:`~tensorbay.dataset.segment.Segment`. \"\"\" segment = Segment(segment_name) self._segments.add(segment) return segment class FusionDataset(DatasetBase[FusionSegment]): \"\"\"This class",
"contents: Dict[str, Any]) -> _T: \"\"\"Loads a :class:`Notes` instance from the given contents.",
"Returns: A dict containing all the information of the Notes:: { \"isContinuous\": <boolean>",
"and other additional information. It represents a whole dataset contains several segments and",
"a list of :class:`~tensorbay.dataset.segment.FusionSegment`. \"\"\" import json from typing import Any, Dict, Iterable,",
"ReprMixin, ReprType, common_loads from .segment import FusionSegment, Segment _T = TypeVar(\"_T\", FusionSegment, Segment)",
"= {\"isContinuous\": self.is_continuous} if self.bin_point_cloud_fields: contents[\"binPointCloudFields\"] = self.bin_point_cloud_fields return contents class DatasetBase(NameMixin, Sequence[_T]):",
"fusion segment with the given name. Arguments: segment_name: The name of the fusion",
"super().__init__(name) self._segments: NameSortedList[_T] = NameSortedList() self._catalog: Catalog = Catalog() self._notes = Notes() def",
"= bin_point_cloud_fields def __getitem__(self, key: str) -> Any: try: return getattr(self, key) except",
"stores the basic information of :class:`DatasetBase`. Arguments: is_continuous: Whether the data inside the",
"def keys(self) -> KeysView[str]: \"\"\"Return the valid keys within the notes. Returns: The",
"def __getitem__(self, index: Union[int, slice]) -> Union[Sequence[_T], _T]: return self._segments.__getitem__(index) @property def catalog(self)",
"contents: Dict[str, Any]) -> None: self.is_continuous = contents[\"isContinuous\"] self.bin_point_cloud_fields = contents.get(\"binPointCloudFields\") @classmethod def",
"int) -> _T: ... @overload def __getitem__(self, index: slice) -> Sequence[_T]: ... def",
"contents: Dict[str, Any] = {\"isContinuous\": self.is_continuous} if self.bin_point_cloud_fields: contents[\"binPointCloudFields\"] = self.bin_point_cloud_fields return contents",
"segment which matches the input name. \"\"\" return self._segments.get_from_name(name) def add_segment(self, segment: _T)",
"the possible values of the labels. Arguments: name: The name of the dataset.",
"__getitem__(self, index: slice) -> Sequence[_T]: ... def __getitem__(self, index: Union[int, slice]) -> Union[Sequence[_T],",
"the concept of fusion dataset. FusionDataset is made up of data collected from",
"It consists of a list of :class:`~tensorbay.dataset.segment.FusionSegment`. \"\"\" import json from typing import",
"Returns: The segment which matches the input name. \"\"\" return self._segments.get_from_name(name) def add_segment(self,",
"and is the base class of :class:`Dataset` and :class:`FusionDataset`. :class:`Dataset` is made up",
"input name. \"\"\" return self._segments.get_from_name(name) def add_segment(self, segment: _T) -> None: \"\"\"Add a",
"Notes(ReprMixin, EqMixin): \"\"\"This is a class stores the basic information of :class:`DatasetBase`. Arguments:",
"all the information of the Notes:: { \"isContinuous\": <boolean> \"binPointCloudFields\": [ <array> or",
"slice]) -> Union[Sequence[_T], _T]: return self._segments.__getitem__(index) @property def catalog(self) -> Catalog: \"\"\"Return the",
"DatasetBase represents a whole dataset contains several segments and is the base class",
"dataset. Returns: The class:`Notes` of the dataset. \"\"\" return self._notes def load_catalog(self, filepath:",
"MIT License. # \"\"\"Notes, DatasetBase, Dataset and FusionDataset. :class:`Notes` contains the basic information",
"dataset. FusionDataset is made up of data collected from multiple sensors. It consists",
"the data inside the dataset is time-continuous. bin_point_cloud_fields: The field names of the",
"Dict[str, Any] = {\"isContinuous\": self.is_continuous} if self.bin_point_cloud_fields: contents[\"binPointCloudFields\"] = self.bin_point_cloud_fields return contents class",
"contains several segments and is the base class of :class:`Dataset` and :class:`FusionDataset`. A",
"return contents class DatasetBase(NameMixin, Sequence[_T]): # pylint: disable=too-many-ancestors \"\"\"This class defines the concept",
"the json file which contains the catalog information. \"\"\" with open(filepath, \"r\") as",
"is the top-level structure to handle your data files, labels and other additional",
"name of the fusion segment to create, which default value is an empty",
"Union[Sequence[_T], _T]: return self._segments.__getitem__(index) @property def catalog(self) -> Catalog: \"\"\"Return the catalog of",
"labels should contain a :class:`~tensorbay.label.catalog.Catalog` indicating all the possible values of the labels.",
"segment_name: str = \"\") -> FusionSegment: \"\"\"Create a fusion segment with the given",
"} \"\"\" contents: Dict[str, Any] = {\"isContinuous\": self.is_continuous} if self.bin_point_cloud_fields: contents[\"binPointCloudFields\"] = self.bin_point_cloud_fields",
"\"\"\" return self._catalog @property def notes(self) -> Notes: \"\"\"Return the notes of the",
":class:`~tensorbay.label.catalog.Catalog` of the dataset. \"\"\" return self._catalog @property def notes(self) -> Notes: \"\"\"Return",
"<boolean> \"binPointCloudFields\": [ <array> or null <field_name>, <str> ... ] } \"\"\" contents:",
"index: Union[int, slice]) -> Union[Sequence[_T], _T]: return self._segments.__getitem__(index) @property def catalog(self) -> Catalog:",
"\"\"\"Notes, DatasetBase, Dataset and FusionDataset. :class:`Notes` contains the basic information of a :class:`DatasetBase`.",
"sensors. It consists of a list of :class:`~tensorbay.dataset.segment.FusionSegment`. \"\"\" def create_segment(self, segment_name: str",
"of the dataset. \"\"\" return self._notes def load_catalog(self, filepath: str) -> None: \"\"\"Load",
"str = \"\") -> FusionSegment: \"\"\"Create a fusion segment with the given name.",
"_T = TypeVar(\"_T\", bound=\"Notes\") _repr_attrs = (\"is_continuous\", \"bin_point_cloud_fields\") def __init__( self, is_continuous: bool",
"within the notes. \"\"\" return KeysView(self._repr_attrs) # type: ignore[arg-type] def dumps(self) -> Dict[str,",
"or data without sensor information. It consists of a list of :class:`~tensorbay.dataset.segment.Segment`. \"\"\"",
"notes. Returns: The valid keys within the notes. \"\"\" return KeysView(self._repr_attrs) # type:",
"is_continuous: Whether the data inside the dataset is time-continuous. bin_point_cloud_fields: The field names",
"The valid keys within the notes. \"\"\" return KeysView(self._repr_attrs) # type: ignore[arg-type] def",
"contain a :class:`~tensorbay.label.catalog.Catalog` indicating all the possible values of the labels. Arguments: name:",
"str) -> None: \"\"\"Load catalog from a json file. Arguments: filepath: The path",
"__init__(self, name: str) -> None: super().__init__(name) self._segments: NameSortedList[_T] = NameSortedList() self._catalog: Catalog =",
"collected from multiple sensors. It consists of a list of :class:`~tensorbay.dataset.segment.FusionSegment`. \"\"\" import",
"data collected from multiple sensors. It consists of a list of :class:`~tensorbay.dataset.segment.FusionSegment`. \"\"\"",
":class:`DatasetBase`. Arguments: is_continuous: Whether the data inside the dataset is time-continuous. bin_point_cloud_fields: The",
"up of data collected from multiple sensors. It consists of a list of",
"one sensor or data without sensor information. It consists of a list of",
"<array> or null <field_name>, <str> ... ] } Returns: The loaded :class:`Notes` instance.",
"contents[\"binPointCloudFields\"] = self.bin_point_cloud_fields return contents class DatasetBase(NameMixin, Sequence[_T]): # pylint: disable=too-many-ancestors \"\"\"This class",
"@overload def __getitem__(self, index: slice) -> Sequence[_T]: ... def __getitem__(self, index: Union[int, slice])",
"str = \"\") -> Segment: \"\"\"Create a segment with the given name. Arguments:",
"FusionDataset is made up of data collected from multiple sensors. It consists of",
"= Segment(segment_name) self._segments.add(segment) return segment class FusionDataset(DatasetBase[FusionSegment]): \"\"\"This class defines the concept of",
"of the segment to create, which default value is an empty string. Returns:",
"ReprType.SEQUENCE def __init__(self, name: str) -> None: super().__init__(name) self._segments: NameSortedList[_T] = NameSortedList() self._catalog:",
"-> Notes: \"\"\"Return the notes of the dataset. Returns: The class:`Notes` of the",
"dataset notes:: { \"isContinuous\": <boolean> \"binPointCloudFields\": [ <array> or null <field_name>, <str> ...",
"= Catalog.loads(contents) def get_segment_by_name(self, name: str) -> _T: \"\"\"Return the segment corresponding to",
"up of data collected from only one sensor or data without sensor information.",
"import EqMixin, NameMixin, NameSortedList, ReprMixin, ReprType, common_loads from .segment import FusionSegment, Segment _T",
"create_segment(self, segment_name: str = \"\") -> FusionSegment: \"\"\"Create a fusion segment with the",
"2021 Graviti. Licensed under MIT License. # \"\"\"Notes, DatasetBase, Dataset and FusionDataset. :class:`Notes`",
"import Catalog from ..utility import EqMixin, NameMixin, NameSortedList, ReprMixin, ReprType, common_loads from .segment",
"request segment. Returns: The segment which matches the input name. \"\"\" return self._segments.get_from_name(name)",
"fusion dataset. FusionDataset is made up of data collected from multiple sensors. It",
"made up of data collected from multiple sensors. It consists of a list",
"class Dataset(DatasetBase[Segment]): \"\"\"This class defines the concept of dataset. Dataset is made up",
"of the dataset. Attributes: catalog: The :class:`~tensorbay.label.catalog.Catalog` of the dataset. notes: The :class:`Notes`",
"a segment to the dataset. Arguments: segment: The segment to be added. \"\"\"",
"made up of data collected from only one sensor or data without sensor",
"data inside the dataset is time-continuous. bin_point_cloud_fields: The field names of the bin",
"files in the dataset. \"\"\" _T = TypeVar(\"_T\", bound=\"Notes\") _repr_attrs = (\"is_continuous\", \"bin_point_cloud_fields\")",
"-> Any: try: return getattr(self, key) except AttributeError as error: raise KeyError(key) from",
"from the given contents. Arguments: contents: The given dict containing the dataset notes::",
"load_catalog(self, filepath: str) -> None: \"\"\"Load catalog from a json file. Arguments: filepath:",
"the notes into a dict. Returns: A dict containing all the information of",
":class:`DatasetBase`. :class:`DatasetBase` defines the basic concept of a dataset, which is the top-level",
"segment. Returns: The segment which matches the input name. \"\"\" return self._segments.get_from_name(name) def",
"the dataset. \"\"\" _repr_type = ReprType.SEQUENCE def __init__(self, name: str) -> None: super().__init__(name)",
"dataset with labels should contain a :class:`~tensorbay.label.catalog.Catalog` indicating all the possible values of",
"data collected from only one sensor or data without sensor information. It consists",
"KeyError(key) from error def _loads(self, contents: Dict[str, Any]) -> None: self.is_continuous = contents[\"isContinuous\"]",
"Sequence[_T]): # pylint: disable=too-many-ancestors \"\"\"This class defines the concept of a basic dataset.",
"without sensor information. It consists of a list of :class:`~tensorbay.dataset.segment.Segment`. \"\"\" def create_segment(self,",
"\"\") -> Segment: \"\"\"Create a segment with the given name. Arguments: segment_name: The",
"defines the basic concept of a dataset, which is the top-level structure to",
"raise KeyError(key) from error def _loads(self, contents: Dict[str, Any]) -> None: self.is_continuous =",
"_T: \"\"\"Return the segment corresponding to the given name. Arguments: name: The name",
"Returns: The loaded :class:`Notes` instance. \"\"\" return common_loads(cls, contents) def keys(self) -> KeysView[str]:",
"Arguments: segment_name: The name of the segment to create, which default value is",
"The given dict containing the dataset notes:: { \"isContinuous\": <boolean> \"binPointCloudFields\": [ <array>",
"Catalog = Catalog() self._notes = Notes() def __len__(self) -> int: return self._segments.__len__() @overload",
"Catalog.loads(contents) def get_segment_by_name(self, name: str) -> _T: \"\"\"Return the segment corresponding to the",
"catalog from a json file. Arguments: filepath: The path of the json file",
"\"isContinuous\": <boolean> \"binPointCloudFields\": [ <array> or null <field_name>, <str> ... ] } Returns:",
"python3 # # Copyright 2021 Graviti. Licensed under MIT License. # \"\"\"Notes, DatasetBase,",
"data files, labels and other additional information. It represents a whole dataset contains",
"self._catalog = Catalog.loads(contents) def get_segment_by_name(self, name: str) -> _T: \"\"\"Return the segment corresponding",
"Dict, Iterable, KeysView, Optional, Sequence, Type, TypeVar, Union, overload from ..label import Catalog",
"dataset. DatasetBase represents a whole dataset contains several segments and is the base",
":class:`~tensorbay.label.catalog.Catalog` indicating all the possible values of the labels. Arguments: name: The name",
"dataset, which is the top-level structure to handle your data files, labels and",
"_T]: return self._segments.__getitem__(index) @property def catalog(self) -> Catalog: \"\"\"Return the catalog of the",
"Arguments: segment_name: The name of the fusion segment to create, which default value",
"<field_name>, <str> ... ] } Returns: The loaded :class:`Notes` instance. \"\"\" return common_loads(cls,",
"FusionSegment, Segment _T = TypeVar(\"_T\", FusionSegment, Segment) class Notes(ReprMixin, EqMixin): \"\"\"This is a",
"def __len__(self) -> int: return self._segments.__len__() @overload def __getitem__(self, index: int) -> _T:",
"information. \"\"\" with open(filepath, \"r\") as fp: contents = json.load(fp) self._catalog = Catalog.loads(contents)",
"-> _T: ... @overload def __getitem__(self, index: slice) -> Sequence[_T]: ... def __getitem__(self,",
"notes. \"\"\" return KeysView(self._repr_attrs) # type: ignore[arg-type] def dumps(self) -> Dict[str, Any]: \"\"\"Dumps",
"a :class:`~tensorbay.label.catalog.Catalog` indicating all the possible values of the labels. Arguments: name: The",
"given name. Arguments: segment_name: The name of the segment to create, which default",
"NameMixin, NameSortedList, ReprMixin, ReprType, common_loads from .segment import FusionSegment, Segment _T = TypeVar(\"_T\",",
"self.bin_point_cloud_fields return contents class DatasetBase(NameMixin, Sequence[_T]): # pylint: disable=too-many-ancestors \"\"\"This class defines the",
"the dataset. \"\"\" _T = TypeVar(\"_T\", bound=\"Notes\") _repr_attrs = (\"is_continuous\", \"bin_point_cloud_fields\") def __init__(",
"self._notes = Notes() def __len__(self) -> int: return self._segments.__len__() @overload def __getitem__(self, index:",
"\"\"\"Return the catalog of the dataset. Returns: The :class:`~tensorbay.label.catalog.Catalog` of the dataset. \"\"\"",
"list of :class:`~tensorbay.dataset.segment.FusionSegment`. \"\"\" import json from typing import Any, Dict, Iterable, KeysView,",
"a class stores the basic information of :class:`DatasetBase`. Arguments: is_continuous: Whether the data",
"fusion segment to create, which default value is an empty string. Returns: The",
"self._segments.add(segment) class Dataset(DatasetBase[Segment]): \"\"\"This class defines the concept of dataset. Dataset is made",
":class:`~tensorbay.dataset.segment.Segment`. :class:`FusionDataset` is made up of data collected from multiple sensors. It consists",
"sensor information. It consists of a list of :class:`~tensorbay.dataset.segment.Segment`. \"\"\" def create_segment(self, segment_name:",
"Catalog from ..utility import EqMixin, NameMixin, NameSortedList, ReprMixin, ReprType, common_loads from .segment import",
"string. Returns: The created :class:`~tensorbay.dataset.segment.Segment`. \"\"\" segment = Segment(segment_name) self._segments.add(segment) return segment class",
"values of the labels. Arguments: name: The name of the dataset. Attributes: catalog:",
"the basic information of a :class:`DatasetBase`. :class:`DatasetBase` defines the basic concept of a",
"__init__( self, is_continuous: bool = False, bin_point_cloud_fields: Optional[Iterable[str]] = None ) -> None:",
"FusionSegment, Segment) class Notes(ReprMixin, EqMixin): \"\"\"This is a class stores the basic information",
"Sequence[_T]: ... def __getitem__(self, index: Union[int, slice]) -> Union[Sequence[_T], _T]: return self._segments.__getitem__(index) @property",
"It consists of a list of :class:`~tensorbay.dataset.segment.Segment`. :class:`FusionDataset` is made up of data",
"name. Arguments: segment_name: The name of the segment to create, which default value",
"which contains the catalog information. \"\"\" with open(filepath, \"r\") as fp: contents =",
"information. It consists of a list of :class:`~tensorbay.dataset.segment.Segment`. \"\"\" def create_segment(self, segment_name: str",
"of a list of :class:`~tensorbay.dataset.segment.FusionSegment`. \"\"\" import json from typing import Any, Dict,",
"the request segment. Returns: The segment which matches the input name. \"\"\" return",
"the concept of dataset. Dataset is made up of data collected from only",
"basic concept of a dataset, which is the top-level structure to handle your",
"segment with the given name. Arguments: segment_name: The name of the fusion segment",
"of the labels. Arguments: name: The name of the dataset. Attributes: catalog: The",
"given dict containing the dataset notes:: { \"isContinuous\": <boolean> \"binPointCloudFields\": [ <array> or",
"segment_name: The name of the segment to create, which default value is an",
"\"\"\" _T = TypeVar(\"_T\", bound=\"Notes\") _repr_attrs = (\"is_continuous\", \"bin_point_cloud_fields\") def __init__( self, is_continuous:",
"from only one sensor or data without sensor information. It consists of a",
"with labels should contain a :class:`~tensorbay.label.catalog.Catalog` indicating all the possible values of the",
"_T: ... @overload def __getitem__(self, index: slice) -> Sequence[_T]: ... def __getitem__(self, index:",
"labels. Arguments: name: The name of the dataset. Attributes: catalog: The :class:`~tensorbay.label.catalog.Catalog` of",
"-> None: \"\"\"Add a segment to the dataset. Arguments: segment: The segment to",
"concept of a basic dataset. DatasetBase represents a whole dataset contains several segments",
"class Notes(ReprMixin, EqMixin): \"\"\"This is a class stores the basic information of :class:`DatasetBase`.",
"of a basic dataset. DatasetBase represents a whole dataset contains several segments and",
"an empty string. Returns: The created :class:`~tensorbay.dataset.segment.FusionSegment`. \"\"\" segment = FusionSegment(segment_name) self._segments.add(segment) return",
"dataset. Arguments: segment: The segment to be added. \"\"\" self._segments.add(segment) class Dataset(DatasetBase[Segment]): \"\"\"This",
"common_loads(cls, contents) def keys(self) -> KeysView[str]: \"\"\"Return the valid keys within the notes.",
"Any: try: return getattr(self, key) except AttributeError as error: raise KeyError(key) from error",
"the base class of :class:`Dataset` and :class:`FusionDataset`. :class:`Dataset` is made up of data",
"<str> ... ] } Returns: The loaded :class:`Notes` instance. \"\"\" return common_loads(cls, contents)",
"concept of a dataset, which is the top-level structure to handle your data",
"key) except AttributeError as error: raise KeyError(key) from error def _loads(self, contents: Dict[str,",
"without sensor information. It consists of a list of :class:`~tensorbay.dataset.segment.Segment`. :class:`FusionDataset` is made",
":class:`~tensorbay.label.catalog.Catalog` of the dataset. notes: The :class:`Notes` of the dataset. \"\"\" _repr_type =",
"def load_catalog(self, filepath: str) -> None: \"\"\"Load catalog from a json file. Arguments:",
"of data collected from only one sensor or data without sensor information. It",
"get_segment_by_name(self, name: str) -> _T: \"\"\"Return the segment corresponding to the given name.",
"The :class:`~tensorbay.label.catalog.Catalog` of the dataset. notes: The :class:`Notes` of the dataset. \"\"\" _repr_type",
"self._segments: NameSortedList[_T] = NameSortedList() self._catalog: Catalog = Catalog() self._notes = Notes() def __len__(self)",
"The loaded :class:`Notes` instance. \"\"\" return common_loads(cls, contents) def keys(self) -> KeysView[str]: \"\"\"Return",
"str) -> Any: try: return getattr(self, key) except AttributeError as error: raise KeyError(key)",
"the concept of a basic dataset. DatasetBase represents a whole dataset contains several",
"-> FusionSegment: \"\"\"Create a fusion segment with the given name. Arguments: segment_name: The",
"import Any, Dict, Iterable, KeysView, Optional, Sequence, Type, TypeVar, Union, overload from ..label",
"other additional information. It represents a whole dataset contains several segments and is",
"TypeVar, Union, overload from ..label import Catalog from ..utility import EqMixin, NameMixin, NameSortedList,",
"class:`Notes` of the dataset. \"\"\" return self._notes def load_catalog(self, filepath: str) -> None:",
"\"\"\"Add a segment to the dataset. Arguments: segment: The segment to be added.",
":class:`~tensorbay.dataset.segment.Segment`. \"\"\" segment = Segment(segment_name) self._segments.add(segment) return segment class FusionDataset(DatasetBase[FusionSegment]): \"\"\"This class defines",
"-> _T: \"\"\"Loads a :class:`Notes` instance from the given contents. Arguments: contents: The",
"Licensed under MIT License. # \"\"\"Notes, DatasetBase, Dataset and FusionDataset. :class:`Notes` contains the",
"the dataset is time-continuous. bin_point_cloud_fields: The field names of the bin point cloud",
"a list of :class:`~tensorbay.dataset.segment.FusionSegment`. \"\"\" def create_segment(self, segment_name: str = \"\") -> FusionSegment:",
"<array> or null <field_name>, <str> ... ] } \"\"\" contents: Dict[str, Any] =",
"consists of a list of :class:`~tensorbay.dataset.segment.FusionSegment`. \"\"\" import json from typing import Any,",
"the segment to create, which default value is an empty string. Returns: The",
"keys within the notes. Returns: The valid keys within the notes. \"\"\" return",
"\"\"\"Create a fusion segment with the given name. Arguments: segment_name: The name of",
"Catalog() self._notes = Notes() def __len__(self) -> int: return self._segments.__len__() @overload def __getitem__(self,",
"TypeVar(\"_T\", bound=\"Notes\") _repr_attrs = (\"is_continuous\", \"bin_point_cloud_fields\") def __init__( self, is_continuous: bool = False,",
"Returns: The valid keys within the notes. \"\"\" return KeysView(self._repr_attrs) # type: ignore[arg-type]",
"is the base class of :class:`Dataset` and :class:`FusionDataset`. A dataset with labels should",
"from ..label import Catalog from ..utility import EqMixin, NameMixin, NameSortedList, ReprMixin, ReprType, common_loads",
"and :class:`FusionDataset`. A dataset with labels should contain a :class:`~tensorbay.label.catalog.Catalog` indicating all the",
"instance from the given contents. Arguments: contents: The given dict containing the dataset",
"is the base class of :class:`Dataset` and :class:`FusionDataset`. :class:`Dataset` is made up of",
"segment to be added. \"\"\" self._segments.add(segment) class Dataset(DatasetBase[Segment]): \"\"\"This class defines the concept",
"the given contents. Arguments: contents: The given dict containing the dataset notes:: {",
"add_segment(self, segment: _T) -> None: \"\"\"Add a segment to the dataset. Arguments: segment:",
"notes(self) -> Notes: \"\"\"Return the notes of the dataset. Returns: The class:`Notes` of",
"the basic concept of a dataset, which is the top-level structure to handle",
"consists of a list of :class:`~tensorbay.dataset.segment.FusionSegment`. \"\"\" def create_segment(self, segment_name: str = \"\")",
"the dataset notes:: { \"isContinuous\": <boolean> \"binPointCloudFields\": [ <array> or null <field_name>, <str>",
"# \"\"\"Notes, DatasetBase, Dataset and FusionDataset. :class:`Notes` contains the basic information of a",
"within the notes. Returns: The valid keys within the notes. \"\"\" return KeysView(self._repr_attrs)",
"all the possible values of the labels. Arguments: name: The name of the",
"which default value is an empty string. Returns: The created :class:`~tensorbay.dataset.segment.Segment`. \"\"\" segment",
"contents[\"isContinuous\"] self.bin_point_cloud_fields = contents.get(\"binPointCloudFields\") @classmethod def loads(cls: Type[_T], contents: Dict[str, Any]) -> _T:",
"type: ignore[arg-type] def dumps(self) -> Dict[str, Any]: \"\"\"Dumps the notes into a dict.",
"information. It represents a whole dataset contains several segments and is the base",
"sensors. It consists of a list of :class:`~tensorbay.dataset.segment.FusionSegment`. \"\"\" import json from typing",
"which is the top-level structure to handle your data files, labels and other",
"the information of the Notes:: { \"isContinuous\": <boolean> \"binPointCloudFields\": [ <array> or null",
"= \"\") -> FusionSegment: \"\"\"Create a fusion segment with the given name. Arguments:",
"Iterable, KeysView, Optional, Sequence, Type, TypeVar, Union, overload from ..label import Catalog from",
"list of :class:`~tensorbay.dataset.segment.Segment`. \"\"\" def create_segment(self, segment_name: str = \"\") -> Segment: \"\"\"Create",
"sensor information. It consists of a list of :class:`~tensorbay.dataset.segment.Segment`. :class:`FusionDataset` is made up",
"the dataset. Arguments: segment: The segment to be added. \"\"\" self._segments.add(segment) class Dataset(DatasetBase[Segment]):",
"..label import Catalog from ..utility import EqMixin, NameMixin, NameSortedList, ReprMixin, ReprType, common_loads from",
"-> int: return self._segments.__len__() @overload def __getitem__(self, index: int) -> _T: ... @overload",
"index: slice) -> Sequence[_T]: ... def __getitem__(self, index: Union[int, slice]) -> Union[Sequence[_T], _T]:",
"<reponame>YiweiLi4/tensorbay-python-sdk #!/usr/bin/env python3 # # Copyright 2021 Graviti. Licensed under MIT License. #",
"The field names of the bin point cloud files in the dataset. \"\"\"",
"... @overload def __getitem__(self, index: slice) -> Sequence[_T]: ... def __getitem__(self, index: Union[int,",
"\"\"\"This class defines the concept of dataset. Dataset is made up of data",
":class:`Notes` contains the basic information of a :class:`DatasetBase`. :class:`DatasetBase` defines the basic concept",
"index: int) -> _T: ... @overload def __getitem__(self, index: slice) -> Sequence[_T]: ...",
"\"binPointCloudFields\": [ <array> or null <field_name>, <str> ... ] } Returns: The loaded",
"of the bin point cloud files in the dataset. \"\"\" _T = TypeVar(\"_T\",",
"A dict containing all the information of the Notes:: { \"isContinuous\": <boolean> \"binPointCloudFields\":",
"The path of the json file which contains the catalog information. \"\"\" with",
"class of :class:`Dataset` and :class:`FusionDataset`. A dataset with labels should contain a :class:`~tensorbay.label.catalog.Catalog`",
":class:`~tensorbay.dataset.segment.FusionSegment`. \"\"\" import json from typing import Any, Dict, Iterable, KeysView, Optional, Sequence,",
":class:`Dataset` and :class:`FusionDataset`. A dataset with labels should contain a :class:`~tensorbay.label.catalog.Catalog` indicating all",
"-> Union[Sequence[_T], _T]: return self._segments.__getitem__(index) @property def catalog(self) -> Catalog: \"\"\"Return the catalog",
"null <field_name>, <str> ... ] } \"\"\" contents: Dict[str, Any] = {\"isContinuous\": self.is_continuous}",
"your data files, labels and other additional information. It represents a whole dataset",
"segment class FusionDataset(DatasetBase[FusionSegment]): \"\"\"This class defines the concept of fusion dataset. FusionDataset is",
"a fusion segment with the given name. Arguments: segment_name: The name of the",
"Type[_T], contents: Dict[str, Any]) -> _T: \"\"\"Loads a :class:`Notes` instance from the given",
"of dataset. Dataset is made up of data collected from only one sensor",
"contents) def keys(self) -> KeysView[str]: \"\"\"Return the valid keys within the notes. Returns:",
"information. It consists of a list of :class:`~tensorbay.dataset.segment.Segment`. :class:`FusionDataset` is made up of",
"return KeysView(self._repr_attrs) # type: ignore[arg-type] def dumps(self) -> Dict[str, Any]: \"\"\"Dumps the notes",
"the dataset. notes: The :class:`Notes` of the dataset. \"\"\" _repr_type = ReprType.SEQUENCE def",
"the dataset. \"\"\" return self._catalog @property def notes(self) -> Notes: \"\"\"Return the notes",
"to create, which default value is an empty string. Returns: The created :class:`~tensorbay.dataset.segment.FusionSegment`.",
"create, which default value is an empty string. Returns: The created :class:`~tensorbay.dataset.segment.FusionSegment`. \"\"\"",
"segments and is the base class of :class:`Dataset` and :class:`FusionDataset`. :class:`Dataset` is made",
"create, which default value is an empty string. Returns: The created :class:`~tensorbay.dataset.segment.Segment`. \"\"\"",
"segment_name: str = \"\") -> Segment: \"\"\"Create a segment with the given name.",
"catalog(self) -> Catalog: \"\"\"Return the catalog of the dataset. Returns: The :class:`~tensorbay.label.catalog.Catalog` of",
"Type, TypeVar, Union, overload from ..label import Catalog from ..utility import EqMixin, NameMixin,",
"information of the Notes:: { \"isContinuous\": <boolean> \"binPointCloudFields\": [ <array> or null <field_name>,",
"time-continuous. bin_point_cloud_fields: The field names of the bin point cloud files in the",
"name: str) -> _T: \"\"\"Return the segment corresponding to the given name. Arguments:",
"Any, Dict, Iterable, KeysView, Optional, Sequence, Type, TypeVar, Union, overload from ..label import",
"def __getitem__(self, index: int) -> _T: ... @overload def __getitem__(self, index: slice) ->",
"the valid keys within the notes. Returns: The valid keys within the notes.",
"# pylint: disable=too-many-ancestors \"\"\"This class defines the concept of a basic dataset. DatasetBase",
"self._segments.__getitem__(index) @property def catalog(self) -> Catalog: \"\"\"Return the catalog of the dataset. Returns:",
"a :class:`Notes` instance from the given contents. Arguments: contents: The given dict containing",
"is a class stores the basic information of :class:`DatasetBase`. Arguments: is_continuous: Whether the",
"... ] } \"\"\" contents: Dict[str, Any] = {\"isContinuous\": self.is_continuous} if self.bin_point_cloud_fields: contents[\"binPointCloudFields\"]",
"def loads(cls: Type[_T], contents: Dict[str, Any]) -> _T: \"\"\"Loads a :class:`Notes` instance from",
"name: str) -> None: super().__init__(name) self._segments: NameSortedList[_T] = NameSortedList() self._catalog: Catalog = Catalog()",
"a basic dataset. DatasetBase represents a whole dataset contains several segments and is",
"contents. Arguments: contents: The given dict containing the dataset notes:: { \"isContinuous\": <boolean>",
"\"\"\"This class defines the concept of fusion dataset. FusionDataset is made up of",
"bin_point_cloud_fields: Optional[Iterable[str]] = None ) -> None: self.is_continuous = is_continuous self.bin_point_cloud_fields = bin_point_cloud_fields",
"empty string. Returns: The created :class:`~tensorbay.dataset.segment.FusionSegment`. \"\"\" segment = FusionSegment(segment_name) self._segments.add(segment) return segment",
"dataset is time-continuous. bin_point_cloud_fields: The field names of the bin point cloud files",
"bin point cloud files in the dataset. \"\"\" _T = TypeVar(\"_T\", bound=\"Notes\") _repr_attrs",
"instance. \"\"\" return common_loads(cls, contents) def keys(self) -> KeysView[str]: \"\"\"Return the valid keys",
"\"isContinuous\": <boolean> \"binPointCloudFields\": [ <array> or null <field_name>, <str> ... ] } \"\"\"",
"keys(self) -> KeysView[str]: \"\"\"Return the valid keys within the notes. Returns: The valid",
"matches the input name. \"\"\" return self._segments.get_from_name(name) def add_segment(self, segment: _T) -> None:",
"a segment with the given name. Arguments: segment_name: The name of the segment",
"the notes. \"\"\" return KeysView(self._repr_attrs) # type: ignore[arg-type] def dumps(self) -> Dict[str, Any]:",
"from a json file. Arguments: filepath: The path of the json file which",
"The name of the fusion segment to create, which default value is an",
"of the dataset. \"\"\" _repr_type = ReprType.SEQUENCE def __init__(self, name: str) -> None:",
"FusionDataset(DatasetBase[FusionSegment]): \"\"\"This class defines the concept of fusion dataset. FusionDataset is made up",
"notes:: { \"isContinuous\": <boolean> \"binPointCloudFields\": [ <array> or null <field_name>, <str> ... ]",
"defines the concept of fusion dataset. FusionDataset is made up of data collected",
"to handle your data files, labels and other additional information. It represents a",
"import FusionSegment, Segment _T = TypeVar(\"_T\", FusionSegment, Segment) class Notes(ReprMixin, EqMixin): \"\"\"This is",
"Notes:: { \"isContinuous\": <boolean> \"binPointCloudFields\": [ <array> or null <field_name>, <str> ... ]",
"the top-level structure to handle your data files, labels and other additional information.",
"self.bin_point_cloud_fields = bin_point_cloud_fields def __getitem__(self, key: str) -> Any: try: return getattr(self, key)",
"= contents.get(\"binPointCloudFields\") @classmethod def loads(cls: Type[_T], contents: Dict[str, Any]) -> _T: \"\"\"Loads a",
"the input name. \"\"\" return self._segments.get_from_name(name) def add_segment(self, segment: _T) -> None: \"\"\"Add",
"A dataset with labels should contain a :class:`~tensorbay.label.catalog.Catalog` indicating all the possible values",
"of the dataset. Returns: The class:`Notes` of the dataset. \"\"\" return self._notes def",
"empty string. Returns: The created :class:`~tensorbay.dataset.segment.Segment`. \"\"\" segment = Segment(segment_name) self._segments.add(segment) return segment",
"several segments and is the base class of :class:`Dataset` and :class:`FusionDataset`. :class:`Dataset` is",
"return common_loads(cls, contents) def keys(self) -> KeysView[str]: \"\"\"Return the valid keys within the",
":class:`~tensorbay.dataset.segment.Segment`. \"\"\" def create_segment(self, segment_name: str = \"\") -> Segment: \"\"\"Create a segment",
"self.bin_point_cloud_fields: contents[\"binPointCloudFields\"] = self.bin_point_cloud_fields return contents class DatasetBase(NameMixin, Sequence[_T]): # pylint: disable=too-many-ancestors \"\"\"This",
"segment to the dataset. Arguments: segment: The segment to be added. \"\"\" self._segments.add(segment)",
"a dict. Returns: A dict containing all the information of the Notes:: {",
"\"\"\" _repr_type = ReprType.SEQUENCE def __init__(self, name: str) -> None: super().__init__(name) self._segments: NameSortedList[_T]",
"class stores the basic information of :class:`DatasetBase`. Arguments: is_continuous: Whether the data inside",
"def __init__(self, name: str) -> None: super().__init__(name) self._segments: NameSortedList[_T] = NameSortedList() self._catalog: Catalog",
"valid keys within the notes. \"\"\" return KeysView(self._repr_attrs) # type: ignore[arg-type] def dumps(self)",
"key: str) -> Any: try: return getattr(self, key) except AttributeError as error: raise",
"return self._segments.__len__() @overload def __getitem__(self, index: int) -> _T: ... @overload def __getitem__(self,",
"= TypeVar(\"_T\", FusionSegment, Segment) class Notes(ReprMixin, EqMixin): \"\"\"This is a class stores the",
"Any]) -> _T: \"\"\"Loads a :class:`Notes` instance from the given contents. Arguments: contents:",
"\"\"\"Loads a :class:`Notes` instance from the given contents. Arguments: contents: The given dict",
"contains the basic information of a :class:`DatasetBase`. :class:`DatasetBase` defines the basic concept of",
"is time-continuous. bin_point_cloud_fields: The field names of the bin point cloud files in",
"self._segments.get_from_name(name) def add_segment(self, segment: _T) -> None: \"\"\"Add a segment to the dataset.",
"as error: raise KeyError(key) from error def _loads(self, contents: Dict[str, Any]) -> None:",
"consists of a list of :class:`~tensorbay.dataset.segment.Segment`. \"\"\" def create_segment(self, segment_name: str = \"\")",
"from multiple sensors. It consists of a list of :class:`~tensorbay.dataset.segment.FusionSegment`. \"\"\" def create_segment(self,",
"import json from typing import Any, Dict, Iterable, KeysView, Optional, Sequence, Type, TypeVar,",
"Arguments: is_continuous: Whether the data inside the dataset is time-continuous. bin_point_cloud_fields: The field",
"self._catalog @property def notes(self) -> Notes: \"\"\"Return the notes of the dataset. Returns:",
"name: The name of the request segment. Returns: The segment which matches the",
"Notes() def __len__(self) -> int: return self._segments.__len__() @overload def __getitem__(self, index: int) ->",
"None: \"\"\"Load catalog from a json file. Arguments: filepath: The path of the",
"# # Copyright 2021 Graviti. Licensed under MIT License. # \"\"\"Notes, DatasetBase, Dataset",
"Arguments: segment: The segment to be added. \"\"\" self._segments.add(segment) class Dataset(DatasetBase[Segment]): \"\"\"This class",
"{\"isContinuous\": self.is_continuous} if self.bin_point_cloud_fields: contents[\"binPointCloudFields\"] = self.bin_point_cloud_fields return contents class DatasetBase(NameMixin, Sequence[_T]): #",
"with the given name. Arguments: segment_name: The name of the fusion segment to",
"segments and is the base class of :class:`Dataset` and :class:`FusionDataset`. A dataset with",
"dataset. notes: The :class:`Notes` of the dataset. \"\"\" _repr_type = ReprType.SEQUENCE def __init__(self,",
"typing import Any, Dict, Iterable, KeysView, Optional, Sequence, Type, TypeVar, Union, overload from",
"notes: The :class:`Notes` of the dataset. \"\"\" _repr_type = ReprType.SEQUENCE def __init__(self, name:",
"\"\"\" contents: Dict[str, Any] = {\"isContinuous\": self.is_continuous} if self.bin_point_cloud_fields: contents[\"binPointCloudFields\"] = self.bin_point_cloud_fields return",
"\"bin_point_cloud_fields\") def __init__( self, is_continuous: bool = False, bin_point_cloud_fields: Optional[Iterable[str]] = None )",
"or null <field_name>, <str> ... ] } Returns: The loaded :class:`Notes` instance. \"\"\"",
"Any]: \"\"\"Dumps the notes into a dict. Returns: A dict containing all the",
"added. \"\"\" self._segments.add(segment) class Dataset(DatasetBase[Segment]): \"\"\"This class defines the concept of dataset. Dataset",
"Dict[str, Any]) -> _T: \"\"\"Loads a :class:`Notes` instance from the given contents. Arguments:",
"additional information. It represents a whole dataset contains several segments and is the",
"Union, overload from ..label import Catalog from ..utility import EqMixin, NameMixin, NameSortedList, ReprMixin,",
"= Catalog() self._notes = Notes() def __len__(self) -> int: return self._segments.__len__() @overload def",
"bin_point_cloud_fields def __getitem__(self, key: str) -> Any: try: return getattr(self, key) except AttributeError",
"segment_name: The name of the fusion segment to create, which default value is",
"@property def catalog(self) -> Catalog: \"\"\"Return the catalog of the dataset. Returns: The",
"catalog: The :class:`~tensorbay.label.catalog.Catalog` of the dataset. notes: The :class:`Notes` of the dataset. \"\"\"",
"class defines the concept of fusion dataset. FusionDataset is made up of data",
"It consists of a list of :class:`~tensorbay.dataset.segment.Segment`. \"\"\" def create_segment(self, segment_name: str =",
"bin_point_cloud_fields: The field names of the bin point cloud files in the dataset.",
"structure to handle your data files, labels and other additional information. It represents",
"... def __getitem__(self, index: Union[int, slice]) -> Union[Sequence[_T], _T]: return self._segments.__getitem__(index) @property def",
"is_continuous: bool = False, bin_point_cloud_fields: Optional[Iterable[str]] = None ) -> None: self.is_continuous =",
"TypeVar(\"_T\", FusionSegment, Segment) class Notes(ReprMixin, EqMixin): \"\"\"This is a class stores the basic",
"class defines the concept of a basic dataset. DatasetBase represents a whole dataset",
"collected from multiple sensors. It consists of a list of :class:`~tensorbay.dataset.segment.FusionSegment`. \"\"\" def",
"int: return self._segments.__len__() @overload def __getitem__(self, index: int) -> _T: ... @overload def",
"of :class:`Dataset` and :class:`FusionDataset`. A dataset with labels should contain a :class:`~tensorbay.label.catalog.Catalog` indicating",
"error: raise KeyError(key) from error def _loads(self, contents: Dict[str, Any]) -> None: self.is_continuous",
"\"\"\"Return the valid keys within the notes. Returns: The valid keys within the",
"in the dataset. \"\"\" _T = TypeVar(\"_T\", bound=\"Notes\") _repr_attrs = (\"is_continuous\", \"bin_point_cloud_fields\") def",
"of the json file which contains the catalog information. \"\"\" with open(filepath, \"r\")",
"= contents[\"isContinuous\"] self.bin_point_cloud_fields = contents.get(\"binPointCloudFields\") @classmethod def loads(cls: Type[_T], contents: Dict[str, Any]) ->",
"segment: _T) -> None: \"\"\"Add a segment to the dataset. Arguments: segment: The",
"contents = json.load(fp) self._catalog = Catalog.loads(contents) def get_segment_by_name(self, name: str) -> _T: \"\"\"Return",
"and FusionDataset. :class:`Notes` contains the basic information of a :class:`DatasetBase`. :class:`DatasetBase` defines the",
":class:`DatasetBase` defines the basic concept of a dataset, which is the top-level structure",
"dict containing the dataset notes:: { \"isContinuous\": <boolean> \"binPointCloudFields\": [ <array> or null",
"Returns: The :class:`~tensorbay.label.catalog.Catalog` of the dataset. \"\"\" return self._catalog @property def notes(self) ->",
"\"\"\"This class defines the concept of a basic dataset. DatasetBase represents a whole",
"__len__(self) -> int: return self._segments.__len__() @overload def __getitem__(self, index: int) -> _T: ...",
"the catalog information. \"\"\" with open(filepath, \"r\") as fp: contents = json.load(fp) self._catalog",
"of a list of :class:`~tensorbay.dataset.segment.FusionSegment`. \"\"\" def create_segment(self, segment_name: str = \"\") ->",
"concept of dataset. Dataset is made up of data collected from only one",
"of the dataset. \"\"\" return self._catalog @property def notes(self) -> Notes: \"\"\"Return the",
"dataset. Attributes: catalog: The :class:`~tensorbay.label.catalog.Catalog` of the dataset. notes: The :class:`Notes` of the",
"# Copyright 2021 Graviti. Licensed under MIT License. # \"\"\"Notes, DatasetBase, Dataset and",
"<str> ... ] } \"\"\" contents: Dict[str, Any] = {\"isContinuous\": self.is_continuous} if self.bin_point_cloud_fields:",
"Segment: \"\"\"Create a segment with the given name. Arguments: segment_name: The name of",
"class DatasetBase(NameMixin, Sequence[_T]): # pylint: disable=too-many-ancestors \"\"\"This class defines the concept of a",
"value is an empty string. Returns: The created :class:`~tensorbay.dataset.segment.Segment`. \"\"\" segment = Segment(segment_name)",
"License. # \"\"\"Notes, DatasetBase, Dataset and FusionDataset. :class:`Notes` contains the basic information of",
"from ..utility import EqMixin, NameMixin, NameSortedList, ReprMixin, ReprType, common_loads from .segment import FusionSegment,",
"contents class DatasetBase(NameMixin, Sequence[_T]): # pylint: disable=too-many-ancestors \"\"\"This class defines the concept of",
"given name. Arguments: segment_name: The name of the fusion segment to create, which",
"Sequence, Type, TypeVar, Union, overload from ..label import Catalog from ..utility import EqMixin,",
"= (\"is_continuous\", \"bin_point_cloud_fields\") def __init__( self, is_continuous: bool = False, bin_point_cloud_fields: Optional[Iterable[str]] =",
"... ] } Returns: The loaded :class:`Notes` instance. \"\"\" return common_loads(cls, contents) def",
":class:`FusionDataset`. A dataset with labels should contain a :class:`~tensorbay.label.catalog.Catalog` indicating all the possible",
"-> Dict[str, Any]: \"\"\"Dumps the notes into a dict. Returns: A dict containing",
"of :class:`DatasetBase`. Arguments: is_continuous: Whether the data inside the dataset is time-continuous. bin_point_cloud_fields:",
"the Notes:: { \"isContinuous\": <boolean> \"binPointCloudFields\": [ <array> or null <field_name>, <str> ...",
"from .segment import FusionSegment, Segment _T = TypeVar(\"_T\", FusionSegment, Segment) class Notes(ReprMixin, EqMixin):",
"[ <array> or null <field_name>, <str> ... ] } Returns: The loaded :class:`Notes`",
"AttributeError as error: raise KeyError(key) from error def _loads(self, contents: Dict[str, Any]) ->",
"several segments and is the base class of :class:`Dataset` and :class:`FusionDataset`. A dataset",
"= Notes() def __len__(self) -> int: return self._segments.__len__() @overload def __getitem__(self, index: int)",
"labels and other additional information. It represents a whole dataset contains several segments",
"is an empty string. Returns: The created :class:`~tensorbay.dataset.segment.FusionSegment`. \"\"\" segment = FusionSegment(segment_name) self._segments.add(segment)",
"@property def notes(self) -> Notes: \"\"\"Return the notes of the dataset. Returns: The",
"json file which contains the catalog information. \"\"\" with open(filepath, \"r\") as fp:",
"name. \"\"\" return self._segments.get_from_name(name) def add_segment(self, segment: _T) -> None: \"\"\"Add a segment",
"def dumps(self) -> Dict[str, Any]: \"\"\"Dumps the notes into a dict. Returns: A",
"self, is_continuous: bool = False, bin_point_cloud_fields: Optional[Iterable[str]] = None ) -> None: self.is_continuous",
"Dataset(DatasetBase[Segment]): \"\"\"This class defines the concept of dataset. Dataset is made up of",
"\"\"\" import json from typing import Any, Dict, Iterable, KeysView, Optional, Sequence, Type,",
"#!/usr/bin/env python3 # # Copyright 2021 Graviti. Licensed under MIT License. # \"\"\"Notes,",
"# type: ignore[arg-type] def dumps(self) -> Dict[str, Any]: \"\"\"Dumps the notes into a",
"-> None: self.is_continuous = contents[\"isContinuous\"] self.bin_point_cloud_fields = contents.get(\"binPointCloudFields\") @classmethod def loads(cls: Type[_T], contents:",
"ignore[arg-type] def dumps(self) -> Dict[str, Any]: \"\"\"Dumps the notes into a dict. Returns:",
"None: super().__init__(name) self._segments: NameSortedList[_T] = NameSortedList() self._catalog: Catalog = Catalog() self._notes = Notes()",
"filepath: The path of the json file which contains the catalog information. \"\"\"",
"cloud files in the dataset. \"\"\" _T = TypeVar(\"_T\", bound=\"Notes\") _repr_attrs = (\"is_continuous\",",
"] } \"\"\" contents: Dict[str, Any] = {\"isContinuous\": self.is_continuous} if self.bin_point_cloud_fields: contents[\"binPointCloudFields\"] =",
"whole dataset contains several segments and is the base class of :class:`Dataset` and",
"dict. Returns: A dict containing all the information of the Notes:: { \"isContinuous\":",
"Arguments: contents: The given dict containing the dataset notes:: { \"isContinuous\": <boolean> \"binPointCloudFields\":",
"filepath: str) -> None: \"\"\"Load catalog from a json file. Arguments: filepath: The",
"multiple sensors. It consists of a list of :class:`~tensorbay.dataset.segment.FusionSegment`. \"\"\" def create_segment(self, segment_name:",
"NameSortedList() self._catalog: Catalog = Catalog() self._notes = Notes() def __len__(self) -> int: return",
"\"\"\" return self._notes def load_catalog(self, filepath: str) -> None: \"\"\"Load catalog from a",
"null <field_name>, <str> ... ] } Returns: The loaded :class:`Notes` instance. \"\"\" return",
"inside the dataset is time-continuous. bin_point_cloud_fields: The field names of the bin point",
"the dataset. Attributes: catalog: The :class:`~tensorbay.label.catalog.Catalog` of the dataset. notes: The :class:`Notes` of",
"loaded :class:`Notes` instance. \"\"\" return common_loads(cls, contents) def keys(self) -> KeysView[str]: \"\"\"Return the",
"def create_segment(self, segment_name: str = \"\") -> Segment: \"\"\"Create a segment with the",
"<field_name>, <str> ... ] } \"\"\" contents: Dict[str, Any] = {\"isContinuous\": self.is_continuous} if",
"The name of the segment to create, which default value is an empty",
"self._segments.add(segment) return segment class FusionDataset(DatasetBase[FusionSegment]): \"\"\"This class defines the concept of fusion dataset.",
"into a dict. Returns: A dict containing all the information of the Notes::",
"-> Catalog: \"\"\"Return the catalog of the dataset. Returns: The :class:`~tensorbay.label.catalog.Catalog` of the",
"path of the json file which contains the catalog information. \"\"\" with open(filepath,",
"to create, which default value is an empty string. Returns: The created :class:`~tensorbay.dataset.segment.Segment`.",
"the base class of :class:`Dataset` and :class:`FusionDataset`. A dataset with labels should contain",
"FusionDataset. :class:`Notes` contains the basic information of a :class:`DatasetBase`. :class:`DatasetBase` defines the basic",
"information of a :class:`DatasetBase`. :class:`DatasetBase` defines the basic concept of a dataset, which",
"overload from ..label import Catalog from ..utility import EqMixin, NameMixin, NameSortedList, ReprMixin, ReprType,",
"str) -> _T: \"\"\"Return the segment corresponding to the given name. Arguments: name:",
"The created :class:`~tensorbay.dataset.segment.Segment`. \"\"\" segment = Segment(segment_name) self._segments.add(segment) return segment class FusionDataset(DatasetBase[FusionSegment]): \"\"\"This",
"def create_segment(self, segment_name: str = \"\") -> FusionSegment: \"\"\"Create a fusion segment with",
"] } Returns: The loaded :class:`Notes` instance. \"\"\" return common_loads(cls, contents) def keys(self)",
"KeysView[str]: \"\"\"Return the valid keys within the notes. Returns: The valid keys within"
] |
[
"import islice from typing import List, Optional, Iterable import attr from attr.validators import",
"match from osdu_commons.clients.delivery_client import DeliveryClient, GetResourcesResponseSuccess, \\ GetResourcesResponseNotFound, GetResourcesResultItem from osdu_commons.model.aws import S3Location",
"data: Optional[dict] = attr.ib(validator=optional(instance_of(dict)), default=None) s3_location: Optional[S3Location] = attr.ib(validator=optional(instance_of(S3Location)), default=None) temporary_credentials: Optional[dict] =",
"not_found_resources: List[DeliveredResource] = attr.ib(validator=list_of(instance_of(DeliveredResource))) unprocessed_srn: List[SRN] = attr.ib(validator=list_of(instance_of(SRN))) class DeliveryServiceException(object): pass class DeliveryService:",
"def get_resources_batch_unordered(self, resource_ids: List[SRN]) -> Iterable[DeliveredResource]: srns_to_fetch = set(resource_ids) for i in range(MAX_RESOURCES_FETCHING_ATTEMPTS):",
"List[DeliveredResource] = attr.ib(validator=list_of(instance_of(DeliveredResource))) unprocessed_srn: List[SRN] = attr.ib(validator=list_of(instance_of(SRN))) class DeliveryServiceException(object): pass class DeliveryService: MAX_GET_RESOURCES_BATCH_SIZE",
"len(srns_to_fetch) > 0: raise Exception(f'Cannot fetch srns: {srns_to_fetch}') def get_resources_batch_unordered_response(self, resource_ids: Iterable[SRN]) ->",
"from osdu_commons.utils import convert from osdu_commons.utils.srn import SRN from osdu_commons.utils.validators import list_of logger",
"GetResourcesResponseSuccess) -> DeliveredResponse: result = get_resources_response.result credentials = get_resources_response.temporary_credentials delivery_resources = [ DeliveredResource.from_get_resource_result_item(res_item,",
"get_resources_response = self._delivery_client.get_resources(srns_to_fetch) return match( get_resources_response, GetResourcesResponseSuccess, self.handle_get_resources_success, GetResourcesResponseNotFound, partial(self.handle_get_resources_not_found, srn_to_fetch=srns_to_fetch), ) @staticmethod",
"@staticmethod def handle_get_resources_not_found(get_resources_response: GetResourcesResponseNotFound, srn_to_fetch: List[SRN]) -> DeliveredResponse: not_found_srns = get_resources_response.not_found_resource_ids not_found_delivery_resources =",
"DeliveryClient, GetResourcesResponseSuccess, \\ GetResourcesResponseNotFound, GetResourcesResultItem from osdu_commons.model.aws import S3Location from osdu_commons.utils import convert",
"components_ids = [SRN.from_string(item) for item in resource.data['GroupTypeProperties']['Components']] components_ids_with_requested_type = [ component_id for component_id",
"logging.getLogger(__name__) MAX_RESOURCES_FETCHING_ATTEMPTS = 5 @attr.s(frozen=True) class DeliveredResource: srn: SRN = attr.ib(validator=instance_of(SRN), converter=convert.srn) exists:",
"= 100 def __init__(self, delivery_client: DeliveryClient): self._delivery_client = delivery_client def get_resources(self, resource_ids: Iterable[SRN])",
"from_json(cls, json_object, credentials, exists=True): return cls( srn=json_object['SRN'], data=json_object.get('Data'), s3_location=json_object.get('S3Location'), temporary_credentials=credentials, exists=exists ) @classmethod",
"import time from functools import partial from itertools import islice from typing import",
"logger.debug(f'Unprocessed srns: {delivered_response.unprocessed_srn} after {i} attempt') if i < MAX_RESOURCES_FETCHING_ATTEMPTS - 1: time.sleep(2",
"SRN, component_type: str) -> Iterable[DeliveredResource]: resource = self.get_resource(resource_id) components_ids = [SRN.from_string(item) for item",
"converter=convert.srn) exists: bool = attr.ib(validator=instance_of(bool)) data: Optional[dict] = attr.ib(validator=optional(instance_of(dict)), default=None) s3_location: Optional[S3Location] =",
"GetResourcesResponseSuccess, self.handle_get_resources_success, GetResourcesResponseNotFound, partial(self.handle_get_resources_not_found, srn_to_fetch=srns_to_fetch), ) @staticmethod def handle_get_resources_success(get_resources_response: GetResourcesResponseSuccess) -> DeliveredResponse: result",
"while len(srns_to_fetch) > 0: yield from self.get_resources_batch_unordered(srns_to_fetch) srns_to_fetch = list(islice(resource_ids, self.MAX_GET_RESOURCES_BATCH_SIZE)) def get_resources_batch_unordered(self,",
"= attr.ib(validator=optional(instance_of(dict)), default=None) @classmethod def from_json(cls, json_object, credentials, exists=True): return cls( srn=json_object['SRN'], data=json_object.get('Data'),",
"yield from self.get_resources_batch_unordered(srns_to_fetch) srns_to_fetch = list(islice(resource_ids, self.MAX_GET_RESOURCES_BATCH_SIZE)) def get_resources_batch_unordered(self, resource_ids: List[SRN]) -> Iterable[DeliveredResource]:",
"DeliveredResponse: result = get_resources_response.result credentials = get_resources_response.temporary_credentials delivery_resources = [ DeliveredResource.from_get_resource_result_item(res_item, credentials) for",
"osdu_commons.utils.srn import SRN from osdu_commons.utils.validators import list_of logger = logging.getLogger(__name__) MAX_RESOURCES_FETCHING_ATTEMPTS = 5",
"= [ DeliveredResource.from_get_resource_result_item(res_item, credentials) for res_item in result] return DeliveredResponse( delivery_resources=delivery_resources, not_found_resources=[], unprocessed_srn=get_resources_response.unprocessed_srn",
"res_item in result] return DeliveredResponse( delivery_resources=delivery_resources, not_found_resources=[], unprocessed_srn=get_resources_response.unprocessed_srn ) @staticmethod def handle_get_resources_not_found(get_resources_response: GetResourcesResponseNotFound,",
"delivered_response.unprocessed_srn if len(srns_to_fetch) == 0: break logger.debug(f'Unprocessed srns: {delivered_response.unprocessed_srn} after {i} attempt') if",
"DeliveredResponse( delivery_resources=delivery_resources, not_found_resources=[], unprocessed_srn=get_resources_response.unprocessed_srn ) @staticmethod def handle_get_resources_not_found(get_resources_response: GetResourcesResponseNotFound, srn_to_fetch: List[SRN]) -> DeliveredResponse:",
"GetResourcesResponseSuccess, \\ GetResourcesResponseNotFound, GetResourcesResultItem from osdu_commons.model.aws import S3Location from osdu_commons.utils import convert from",
"pampy import match from osdu_commons.clients.delivery_client import DeliveryClient, GetResourcesResponseSuccess, \\ GetResourcesResponseNotFound, GetResourcesResultItem from osdu_commons.model.aws",
"-> DeliveredResource: get_resources_result = list(self.get_resources([resource_id])) assert len(get_resources_result) == 1 return get_resources_result[0] def check_if_resources_exist(self,",
"import partial from itertools import islice from typing import List, Optional, Iterable import",
"DeliveredResponse: not_found_srns = get_resources_response.not_found_resource_ids not_found_delivery_resources = [DeliveredResource(srn=srn, exists=False) for srn in not_found_srns] unprocessed_srn",
"Exception(f'Cannot fetch srns: {srns_to_fetch}') def get_resources_batch_unordered_response(self, resource_ids: Iterable[SRN]) -> DeliveredResponse: srns_to_fetch = list(resource_ids)",
"for item in resource.data['GroupTypeProperties']['Components']] components_ids_with_requested_type = [ component_id for component_id in components_ids if",
"s3_location=json_object.get('S3Location'), temporary_credentials=credentials, exists=exists ) @classmethod def from_get_resource_result_item(cls, item: GetResourcesResultItem, credentials, exists=True): return cls(",
"functools import partial from itertools import islice from typing import List, Optional, Iterable",
"osdu_commons.model.aws import S3Location from osdu_commons.utils import convert from osdu_commons.utils.srn import SRN from osdu_commons.utils.validators",
"100 def __init__(self, delivery_client: DeliveryClient): self._delivery_client = delivery_client def get_resources(self, resource_ids: Iterable[SRN]) ->",
"get_resources_batch_unordered(self, resource_ids: List[SRN]) -> Iterable[DeliveredResource]: srns_to_fetch = set(resource_ids) for i in range(MAX_RESOURCES_FETCHING_ATTEMPTS): delivered_response",
"= self.get_resources(resource_ids) return all(resource.exists for resource in resources) def get_components_of_type(self, resource_id: SRN, component_type:",
"self.get_resources_batch_unordered_response(srns_to_fetch) yield from delivered_response.delivery_resources yield from delivered_response.not_found_resources srns_to_fetch = delivered_response.unprocessed_srn if len(srns_to_fetch) ==",
"from_get_resource_result_item(cls, item: GetResourcesResultItem, credentials, exists=True): return cls( srn=item.srn, data=item.data, s3_location=item.s3_location, temporary_credentials=credentials, exists=exists, )",
"Optional[dict] = attr.ib(validator=optional(instance_of(dict)), default=None) s3_location: Optional[S3Location] = attr.ib(validator=optional(instance_of(S3Location)), default=None) temporary_credentials: Optional[dict] = attr.ib(validator=optional(instance_of(dict)),",
"delivered_response.not_found_resources srns_to_fetch = delivered_response.unprocessed_srn if len(srns_to_fetch) == 0: break logger.debug(f'Unprocessed srns: {delivered_response.unprocessed_srn} after",
"break logger.debug(f'Unprocessed srns: {delivered_response.unprocessed_srn} after {i} attempt') if i < MAX_RESOURCES_FETCHING_ATTEMPTS - 1:",
"= list(resource_ids) get_resources_response = self._delivery_client.get_resources(srns_to_fetch) return match( get_resources_response, GetResourcesResponseSuccess, self.handle_get_resources_success, GetResourcesResponseNotFound, partial(self.handle_get_resources_not_found, srn_to_fetch=srns_to_fetch),",
"DeliveryServiceException(object): pass class DeliveryService: MAX_GET_RESOURCES_BATCH_SIZE = 100 def __init__(self, delivery_client: DeliveryClient): self._delivery_client =",
"srns: {srns_to_fetch}') def get_resources_batch_unordered_response(self, resource_ids: Iterable[SRN]) -> DeliveredResponse: srns_to_fetch = list(resource_ids) get_resources_response =",
"srns_to_fetch = list(islice(resource_ids, self.MAX_GET_RESOURCES_BATCH_SIZE)) while len(srns_to_fetch) > 0: yield from self.get_resources_batch_unordered(srns_to_fetch) srns_to_fetch =",
"get_resources_response.temporary_credentials delivery_resources = [ DeliveredResource.from_get_resource_result_item(res_item, credentials) for res_item in result] return DeliveredResponse( delivery_resources=delivery_resources,",
"delivered_response = self.get_resources_batch_unordered_response(srns_to_fetch) yield from delivered_response.delivery_resources yield from delivered_response.not_found_resources srns_to_fetch = delivered_response.unprocessed_srn if",
"unprocessed_srn: List[SRN] = attr.ib(validator=list_of(instance_of(SRN))) class DeliveryServiceException(object): pass class DeliveryService: MAX_GET_RESOURCES_BATCH_SIZE = 100 def",
"component_id in components_ids if component_id.type == component_type ] return self.get_resources(components_ids_with_requested_type) def get_component_of_type(self, resource_id:",
"self.get_resources(resource_ids) return all(resource.exists for resource in resources) def get_components_of_type(self, resource_id: SRN, component_type: str)",
") @staticmethod def handle_get_resources_not_found(get_resources_response: GetResourcesResponseNotFound, srn_to_fetch: List[SRN]) -> DeliveredResponse: not_found_srns = get_resources_response.not_found_resource_ids not_found_delivery_resources",
"import DeliveryClient, GetResourcesResponseSuccess, \\ GetResourcesResponseNotFound, GetResourcesResultItem from osdu_commons.model.aws import S3Location from osdu_commons.utils import",
"get_resources_response, GetResourcesResponseSuccess, self.handle_get_resources_success, GetResourcesResponseNotFound, partial(self.handle_get_resources_not_found, srn_to_fetch=srns_to_fetch), ) @staticmethod def handle_get_resources_success(get_resources_response: GetResourcesResponseSuccess) -> DeliveredResponse:",
"osdu_commons.utils import convert from osdu_commons.utils.srn import SRN from osdu_commons.utils.validators import list_of logger =",
"class DeliveryService: MAX_GET_RESOURCES_BATCH_SIZE = 100 def __init__(self, delivery_client: DeliveryClient): self._delivery_client = delivery_client def",
"DeliveredResponse( delivery_resources=[], not_found_resources=not_found_delivery_resources, unprocessed_srn=unprocessed_srn ) def get_resource(self, resource_id: SRN) -> DeliveredResource: get_resources_result =",
"0: yield from self.get_resources_batch_unordered(srns_to_fetch) srns_to_fetch = list(islice(resource_ids, self.MAX_GET_RESOURCES_BATCH_SIZE)) def get_resources_batch_unordered(self, resource_ids: List[SRN]) ->",
"s3_location=item.s3_location, temporary_credentials=credentials, exists=exists, ) @attr.s(frozen=True) class DeliveredResponse: delivery_resources: List[DeliveredResource] = attr.ib(validator=list_of(instance_of(DeliveredResource))) not_found_resources: List[DeliveredResource]",
"get_resources_response.not_found_resource_ids not_found_delivery_resources = [DeliveredResource(srn=srn, exists=False) for srn in not_found_srns] unprocessed_srn = list(set(srn_to_fetch) -",
"from delivered_response.delivery_resources yield from delivered_response.not_found_resources srns_to_fetch = delivered_response.unprocessed_srn if len(srns_to_fetch) == 0: break",
"SRN = attr.ib(validator=instance_of(SRN), converter=convert.srn) exists: bool = attr.ib(validator=instance_of(bool)) data: Optional[dict] = attr.ib(validator=optional(instance_of(dict)), default=None)",
"GetResourcesResultItem, credentials, exists=True): return cls( srn=item.srn, data=item.data, s3_location=item.s3_location, temporary_credentials=credentials, exists=exists, ) @attr.s(frozen=True) class",
"resources = self.get_resources(resource_ids) return all(resource.exists for resource in resources) def get_components_of_type(self, resource_id: SRN,",
"resource_ids: Iterable[SRN]) -> DeliveredResponse: srns_to_fetch = list(resource_ids) get_resources_response = self._delivery_client.get_resources(srns_to_fetch) return match( get_resources_response,",
"return cls( srn=item.srn, data=item.data, s3_location=item.s3_location, temporary_credentials=credentials, exists=exists, ) @attr.s(frozen=True) class DeliveredResponse: delivery_resources: List[DeliveredResource]",
"return DeliveredResponse( delivery_resources=[], not_found_resources=not_found_delivery_resources, unprocessed_srn=unprocessed_srn ) def get_resource(self, resource_id: SRN) -> DeliveredResource: get_resources_result",
"instance_of, optional from pampy import match from osdu_commons.clients.delivery_client import DeliveryClient, GetResourcesResponseSuccess, \\ GetResourcesResponseNotFound,",
"GetResourcesResponseNotFound, srn_to_fetch: List[SRN]) -> DeliveredResponse: not_found_srns = get_resources_response.not_found_resource_ids not_found_delivery_resources = [DeliveredResource(srn=srn, exists=False) for",
"resource_id: SRN) -> DeliveredResource: get_resources_result = list(self.get_resources([resource_id])) assert len(get_resources_result) == 1 return get_resources_result[0]",
"get_component_of_type(self, resource_id: SRN, component_type: str) -> DeliveredResource: get_components_of_type_result = list(self.get_components_of_type(resource_id, component_type)) assert len(get_components_of_type_result)",
"= get_resources_response.result credentials = get_resources_response.temporary_credentials delivery_resources = [ DeliveredResource.from_get_resource_result_item(res_item, credentials) for res_item in",
"logging import time from functools import partial from itertools import islice from typing",
"= list(islice(resource_ids, self.MAX_GET_RESOURCES_BATCH_SIZE)) def get_resources_batch_unordered(self, resource_ids: List[SRN]) -> Iterable[DeliveredResource]: srns_to_fetch = set(resource_ids) for",
"srn_to_fetch: List[SRN]) -> DeliveredResponse: not_found_srns = get_resources_response.not_found_resource_ids not_found_delivery_resources = [DeliveredResource(srn=srn, exists=False) for srn",
"from self.get_resources_batch_unordered(srns_to_fetch) srns_to_fetch = list(islice(resource_ids, self.MAX_GET_RESOURCES_BATCH_SIZE)) def get_resources_batch_unordered(self, resource_ids: List[SRN]) -> Iterable[DeliveredResource]: srns_to_fetch",
"= iter(resource_ids) srns_to_fetch = list(islice(resource_ids, self.MAX_GET_RESOURCES_BATCH_SIZE)) while len(srns_to_fetch) > 0: yield from self.get_resources_batch_unordered(srns_to_fetch)",
"yield from delivered_response.delivery_resources yield from delivered_response.not_found_resources srns_to_fetch = delivered_response.unprocessed_srn if len(srns_to_fetch) == 0:",
"def check_if_resources_exist(self, resource_ids: Iterable[SRN]) -> bool: resources = self.get_resources(resource_ids) return all(resource.exists for resource",
"from osdu_commons.model.aws import S3Location from osdu_commons.utils import convert from osdu_commons.utils.srn import SRN from",
"json_object, credentials, exists=True): return cls( srn=json_object['SRN'], data=json_object.get('Data'), s3_location=json_object.get('S3Location'), temporary_credentials=credentials, exists=exists ) @classmethod def",
"from delivered_response.not_found_resources srns_to_fetch = delivered_response.unprocessed_srn if len(srns_to_fetch) == 0: break logger.debug(f'Unprocessed srns: {delivered_response.unprocessed_srn}",
"return all(resource.exists for resource in resources) def get_components_of_type(self, resource_id: SRN, component_type: str) ->",
"assert len(get_resources_result) == 1 return get_resources_result[0] def check_if_resources_exist(self, resource_ids: Iterable[SRN]) -> bool: resources",
"= attr.ib(validator=list_of(instance_of(DeliveredResource))) not_found_resources: List[DeliveredResource] = attr.ib(validator=list_of(instance_of(DeliveredResource))) unprocessed_srn: List[SRN] = attr.ib(validator=list_of(instance_of(SRN))) class DeliveryServiceException(object): pass",
"attr.ib(validator=list_of(instance_of(SRN))) class DeliveryServiceException(object): pass class DeliveryService: MAX_GET_RESOURCES_BATCH_SIZE = 100 def __init__(self, delivery_client: DeliveryClient):",
"import list_of logger = logging.getLogger(__name__) MAX_RESOURCES_FETCHING_ATTEMPTS = 5 @attr.s(frozen=True) class DeliveredResource: srn: SRN",
"get_resources_response.result credentials = get_resources_response.temporary_credentials delivery_resources = [ DeliveredResource.from_get_resource_result_item(res_item, credentials) for res_item in result]",
"import match from osdu_commons.clients.delivery_client import DeliveryClient, GetResourcesResponseSuccess, \\ GetResourcesResponseNotFound, GetResourcesResultItem from osdu_commons.model.aws import",
"logger = logging.getLogger(__name__) MAX_RESOURCES_FETCHING_ATTEMPTS = 5 @attr.s(frozen=True) class DeliveredResource: srn: SRN = attr.ib(validator=instance_of(SRN),",
"get_components_of_type(self, resource_id: SRN, component_type: str) -> Iterable[DeliveredResource]: resource = self.get_resource(resource_id) components_ids = [SRN.from_string(item)",
"for res_item in result] return DeliveredResponse( delivery_resources=delivery_resources, not_found_resources=[], unprocessed_srn=get_resources_response.unprocessed_srn ) @staticmethod def handle_get_resources_not_found(get_resources_response:",
"list_of logger = logging.getLogger(__name__) MAX_RESOURCES_FETCHING_ATTEMPTS = 5 @attr.s(frozen=True) class DeliveredResource: srn: SRN =",
"0: break logger.debug(f'Unprocessed srns: {delivered_response.unprocessed_srn} after {i} attempt') if i < MAX_RESOURCES_FETCHING_ATTEMPTS -",
"return match( get_resources_response, GetResourcesResponseSuccess, self.handle_get_resources_success, GetResourcesResponseNotFound, partial(self.handle_get_resources_not_found, srn_to_fetch=srns_to_fetch), ) @staticmethod def handle_get_resources_success(get_resources_response: GetResourcesResponseSuccess)",
"unprocessed_srn = list(set(srn_to_fetch) - set(not_found_srns)) return DeliveredResponse( delivery_resources=[], not_found_resources=not_found_delivery_resources, unprocessed_srn=unprocessed_srn ) def get_resource(self,",
"class DeliveryServiceException(object): pass class DeliveryService: MAX_GET_RESOURCES_BATCH_SIZE = 100 def __init__(self, delivery_client: DeliveryClient): self._delivery_client",
"attr from attr.validators import instance_of, optional from pampy import match from osdu_commons.clients.delivery_client import",
"in range(MAX_RESOURCES_FETCHING_ATTEMPTS): delivered_response = self.get_resources_batch_unordered_response(srns_to_fetch) yield from delivered_response.delivery_resources yield from delivered_response.not_found_resources srns_to_fetch =",
"== 1 return get_resources_result[0] def check_if_resources_exist(self, resource_ids: Iterable[SRN]) -> bool: resources = self.get_resources(resource_ids)",
"item in resource.data['GroupTypeProperties']['Components']] components_ids_with_requested_type = [ component_id for component_id in components_ids if component_id.type",
"in resources) def get_components_of_type(self, resource_id: SRN, component_type: str) -> Iterable[DeliveredResource]: resource = self.get_resource(resource_id)",
"resource_ids: Iterable[SRN]) -> bool: resources = self.get_resources(resource_ids) return all(resource.exists for resource in resources)",
"from itertools import islice from typing import List, Optional, Iterable import attr from",
"def __init__(self, delivery_client: DeliveryClient): self._delivery_client = delivery_client def get_resources(self, resource_ids: Iterable[SRN]) -> Iterable[DeliveredResource]:",
"components_ids if component_id.type == component_type ] return self.get_resources(components_ids_with_requested_type) def get_component_of_type(self, resource_id: SRN, component_type:",
"- set(not_found_srns)) return DeliveredResponse( delivery_resources=[], not_found_resources=not_found_delivery_resources, unprocessed_srn=unprocessed_srn ) def get_resource(self, resource_id: SRN) ->",
"List[DeliveredResource] = attr.ib(validator=list_of(instance_of(DeliveredResource))) not_found_resources: List[DeliveredResource] = attr.ib(validator=list_of(instance_of(DeliveredResource))) unprocessed_srn: List[SRN] = attr.ib(validator=list_of(instance_of(SRN))) class DeliveryServiceException(object):",
"component_type: str) -> Iterable[DeliveredResource]: resource = self.get_resource(resource_id) components_ids = [SRN.from_string(item) for item in",
"in result] return DeliveredResponse( delivery_resources=delivery_resources, not_found_resources=[], unprocessed_srn=get_resources_response.unprocessed_srn ) @staticmethod def handle_get_resources_not_found(get_resources_response: GetResourcesResponseNotFound, srn_to_fetch:",
"optional from pampy import match from osdu_commons.clients.delivery_client import DeliveryClient, GetResourcesResponseSuccess, \\ GetResourcesResponseNotFound, GetResourcesResultItem",
"delivery_client: DeliveryClient): self._delivery_client = delivery_client def get_resources(self, resource_ids: Iterable[SRN]) -> Iterable[DeliveredResource]: resource_ids =",
"if component_id.type == component_type ] return self.get_resources(components_ids_with_requested_type) def get_component_of_type(self, resource_id: SRN, component_type: str)",
"== 0: break logger.debug(f'Unprocessed srns: {delivered_response.unprocessed_srn} after {i} attempt') if i < MAX_RESOURCES_FETCHING_ATTEMPTS",
"Iterable[SRN]) -> bool: resources = self.get_resources(resource_ids) return all(resource.exists for resource in resources) def",
"class DeliveredResponse: delivery_resources: List[DeliveredResource] = attr.ib(validator=list_of(instance_of(DeliveredResource))) not_found_resources: List[DeliveredResource] = attr.ib(validator=list_of(instance_of(DeliveredResource))) unprocessed_srn: List[SRN] =",
"resource_id: SRN, component_type: str) -> DeliveredResource: get_components_of_type_result = list(self.get_components_of_type(resource_id, component_type)) assert len(get_components_of_type_result) ==",
"import SRN from osdu_commons.utils.validators import list_of logger = logging.getLogger(__name__) MAX_RESOURCES_FETCHING_ATTEMPTS = 5 @attr.s(frozen=True)",
"default=None) @classmethod def from_json(cls, json_object, credentials, exists=True): return cls( srn=json_object['SRN'], data=json_object.get('Data'), s3_location=json_object.get('S3Location'), temporary_credentials=credentials,",
"GetResourcesResultItem from osdu_commons.model.aws import S3Location from osdu_commons.utils import convert from osdu_commons.utils.srn import SRN",
"List[SRN]) -> DeliveredResponse: not_found_srns = get_resources_response.not_found_resource_ids not_found_delivery_resources = [DeliveredResource(srn=srn, exists=False) for srn in",
"<gh_stars>0 import logging import time from functools import partial from itertools import islice",
"[SRN.from_string(item) for item in resource.data['GroupTypeProperties']['Components']] components_ids_with_requested_type = [ component_id for component_id in components_ids",
"get_resources_result[0] def check_if_resources_exist(self, resource_ids: Iterable[SRN]) -> bool: resources = self.get_resources(resource_ids) return all(resource.exists for",
"-> Iterable[DeliveredResource]: resource = self.get_resource(resource_id) components_ids = [SRN.from_string(item) for item in resource.data['GroupTypeProperties']['Components']] components_ids_with_requested_type",
"str) -> Iterable[DeliveredResource]: resource = self.get_resource(resource_id) components_ids = [SRN.from_string(item) for item in resource.data['GroupTypeProperties']['Components']]",
"component_type: str) -> DeliveredResource: get_components_of_type_result = list(self.get_components_of_type(resource_id, component_type)) assert len(get_components_of_type_result) == 1 return",
"delivery_resources = [ DeliveredResource.from_get_resource_result_item(res_item, credentials) for res_item in result] return DeliveredResponse( delivery_resources=delivery_resources, not_found_resources=[],",
"= list(self.get_resources([resource_id])) assert len(get_resources_result) == 1 return get_resources_result[0] def check_if_resources_exist(self, resource_ids: Iterable[SRN]) ->",
"component_id.type == component_type ] return self.get_resources(components_ids_with_requested_type) def get_component_of_type(self, resource_id: SRN, component_type: str) ->",
"[DeliveredResource(srn=srn, exists=False) for srn in not_found_srns] unprocessed_srn = list(set(srn_to_fetch) - set(not_found_srns)) return DeliveredResponse(",
"Optional[dict] = attr.ib(validator=optional(instance_of(dict)), default=None) @classmethod def from_json(cls, json_object, credentials, exists=True): return cls( srn=json_object['SRN'],",
"resource_ids: List[SRN]) -> Iterable[DeliveredResource]: srns_to_fetch = set(resource_ids) for i in range(MAX_RESOURCES_FETCHING_ATTEMPTS): delivered_response =",
"-> Iterable[DeliveredResource]: resource_ids = iter(resource_ids) srns_to_fetch = list(islice(resource_ids, self.MAX_GET_RESOURCES_BATCH_SIZE)) while len(srns_to_fetch) > 0:",
"i < MAX_RESOURCES_FETCHING_ATTEMPTS - 1: time.sleep(2 ** i) if len(srns_to_fetch) > 0: raise",
"for component_id in components_ids if component_id.type == component_type ] return self.get_resources(components_ids_with_requested_type) def get_component_of_type(self,",
"Iterable[SRN]) -> DeliveredResponse: srns_to_fetch = list(resource_ids) get_resources_response = self._delivery_client.get_resources(srns_to_fetch) return match( get_resources_response, GetResourcesResponseSuccess,",
"bool: resources = self.get_resources(resource_ids) return all(resource.exists for resource in resources) def get_components_of_type(self, resource_id:",
"component_id for component_id in components_ids if component_id.type == component_type ] return self.get_resources(components_ids_with_requested_type) def",
"return get_resources_result[0] def check_if_resources_exist(self, resource_ids: Iterable[SRN]) -> bool: resources = self.get_resources(resource_ids) return all(resource.exists",
"DeliveredResponse: delivery_resources: List[DeliveredResource] = attr.ib(validator=list_of(instance_of(DeliveredResource))) not_found_resources: List[DeliveredResource] = attr.ib(validator=list_of(instance_of(DeliveredResource))) unprocessed_srn: List[SRN] = attr.ib(validator=list_of(instance_of(SRN)))",
"list(islice(resource_ids, self.MAX_GET_RESOURCES_BATCH_SIZE)) while len(srns_to_fetch) > 0: yield from self.get_resources_batch_unordered(srns_to_fetch) srns_to_fetch = list(islice(resource_ids, self.MAX_GET_RESOURCES_BATCH_SIZE))",
"** i) if len(srns_to_fetch) > 0: raise Exception(f'Cannot fetch srns: {srns_to_fetch}') def get_resources_batch_unordered_response(self,",
"self.get_resources_batch_unordered(srns_to_fetch) srns_to_fetch = list(islice(resource_ids, self.MAX_GET_RESOURCES_BATCH_SIZE)) def get_resources_batch_unordered(self, resource_ids: List[SRN]) -> Iterable[DeliveredResource]: srns_to_fetch =",
"str) -> DeliveredResource: get_components_of_type_result = list(self.get_components_of_type(resource_id, component_type)) assert len(get_components_of_type_result) == 1 return get_components_of_type_result[0]",
"MAX_GET_RESOURCES_BATCH_SIZE = 100 def __init__(self, delivery_client: DeliveryClient): self._delivery_client = delivery_client def get_resources(self, resource_ids:",
"5 @attr.s(frozen=True) class DeliveredResource: srn: SRN = attr.ib(validator=instance_of(SRN), converter=convert.srn) exists: bool = attr.ib(validator=instance_of(bool))",
"@attr.s(frozen=True) class DeliveredResource: srn: SRN = attr.ib(validator=instance_of(SRN), converter=convert.srn) exists: bool = attr.ib(validator=instance_of(bool)) data:",
"exists: bool = attr.ib(validator=instance_of(bool)) data: Optional[dict] = attr.ib(validator=optional(instance_of(dict)), default=None) s3_location: Optional[S3Location] = attr.ib(validator=optional(instance_of(S3Location)),",
"result] return DeliveredResponse( delivery_resources=delivery_resources, not_found_resources=[], unprocessed_srn=get_resources_response.unprocessed_srn ) @staticmethod def handle_get_resources_not_found(get_resources_response: GetResourcesResponseNotFound, srn_to_fetch: List[SRN])",
"-> DeliveredResponse: srns_to_fetch = list(resource_ids) get_resources_response = self._delivery_client.get_resources(srns_to_fetch) return match( get_resources_response, GetResourcesResponseSuccess, self.handle_get_resources_success,",
"attr.ib(validator=optional(instance_of(S3Location)), default=None) temporary_credentials: Optional[dict] = attr.ib(validator=optional(instance_of(dict)), default=None) @classmethod def from_json(cls, json_object, credentials, exists=True):",
"@attr.s(frozen=True) class DeliveredResponse: delivery_resources: List[DeliveredResource] = attr.ib(validator=list_of(instance_of(DeliveredResource))) not_found_resources: List[DeliveredResource] = attr.ib(validator=list_of(instance_of(DeliveredResource))) unprocessed_srn: List[SRN]",
"def from_get_resource_result_item(cls, item: GetResourcesResultItem, credentials, exists=True): return cls( srn=item.srn, data=item.data, s3_location=item.s3_location, temporary_credentials=credentials, exists=exists,",
"partial from itertools import islice from typing import List, Optional, Iterable import attr",
"@staticmethod def handle_get_resources_success(get_resources_response: GetResourcesResponseSuccess) -> DeliveredResponse: result = get_resources_response.result credentials = get_resources_response.temporary_credentials delivery_resources",
"typing import List, Optional, Iterable import attr from attr.validators import instance_of, optional from",
"check_if_resources_exist(self, resource_ids: Iterable[SRN]) -> bool: resources = self.get_resources(resource_ids) return all(resource.exists for resource in",
"import instance_of, optional from pampy import match from osdu_commons.clients.delivery_client import DeliveryClient, GetResourcesResponseSuccess, \\",
"self.MAX_GET_RESOURCES_BATCH_SIZE)) while len(srns_to_fetch) > 0: yield from self.get_resources_batch_unordered(srns_to_fetch) srns_to_fetch = list(islice(resource_ids, self.MAX_GET_RESOURCES_BATCH_SIZE)) def",
"self._delivery_client.get_resources(srns_to_fetch) return match( get_resources_response, GetResourcesResponseSuccess, self.handle_get_resources_success, GetResourcesResponseNotFound, partial(self.handle_get_resources_not_found, srn_to_fetch=srns_to_fetch), ) @staticmethod def handle_get_resources_success(get_resources_response:",
"return DeliveredResponse( delivery_resources=delivery_resources, not_found_resources=[], unprocessed_srn=get_resources_response.unprocessed_srn ) @staticmethod def handle_get_resources_not_found(get_resources_response: GetResourcesResponseNotFound, srn_to_fetch: List[SRN]) ->",
"components_ids_with_requested_type = [ component_id for component_id in components_ids if component_id.type == component_type ]",
"= delivered_response.unprocessed_srn if len(srns_to_fetch) == 0: break logger.debug(f'Unprocessed srns: {delivered_response.unprocessed_srn} after {i} attempt')",
"= get_resources_response.not_found_resource_ids not_found_delivery_resources = [DeliveredResource(srn=srn, exists=False) for srn in not_found_srns] unprocessed_srn = list(set(srn_to_fetch)",
"= attr.ib(validator=instance_of(SRN), converter=convert.srn) exists: bool = attr.ib(validator=instance_of(bool)) data: Optional[dict] = attr.ib(validator=optional(instance_of(dict)), default=None) s3_location:",
"not_found_srns = get_resources_response.not_found_resource_ids not_found_delivery_resources = [DeliveredResource(srn=srn, exists=False) for srn in not_found_srns] unprocessed_srn =",
"bool = attr.ib(validator=instance_of(bool)) data: Optional[dict] = attr.ib(validator=optional(instance_of(dict)), default=None) s3_location: Optional[S3Location] = attr.ib(validator=optional(instance_of(S3Location)), default=None)",
"List, Optional, Iterable import attr from attr.validators import instance_of, optional from pampy import",
"unprocessed_srn=unprocessed_srn ) def get_resource(self, resource_id: SRN) -> DeliveredResource: get_resources_result = list(self.get_resources([resource_id])) assert len(get_resources_result)",
"def get_resources_batch_unordered_response(self, resource_ids: Iterable[SRN]) -> DeliveredResponse: srns_to_fetch = list(resource_ids) get_resources_response = self._delivery_client.get_resources(srns_to_fetch) return",
"raise Exception(f'Cannot fetch srns: {srns_to_fetch}') def get_resources_batch_unordered_response(self, resource_ids: Iterable[SRN]) -> DeliveredResponse: srns_to_fetch =",
"= 5 @attr.s(frozen=True) class DeliveredResource: srn: SRN = attr.ib(validator=instance_of(SRN), converter=convert.srn) exists: bool =",
"osdu_commons.clients.delivery_client import DeliveryClient, GetResourcesResponseSuccess, \\ GetResourcesResponseNotFound, GetResourcesResultItem from osdu_commons.model.aws import S3Location from osdu_commons.utils",
"attr.ib(validator=optional(instance_of(dict)), default=None) @classmethod def from_json(cls, json_object, credentials, exists=True): return cls( srn=json_object['SRN'], data=json_object.get('Data'), s3_location=json_object.get('S3Location'),",
"not_found_delivery_resources = [DeliveredResource(srn=srn, exists=False) for srn in not_found_srns] unprocessed_srn = list(set(srn_to_fetch) - set(not_found_srns))",
"pass class DeliveryService: MAX_GET_RESOURCES_BATCH_SIZE = 100 def __init__(self, delivery_client: DeliveryClient): self._delivery_client = delivery_client",
"List[SRN]) -> Iterable[DeliveredResource]: srns_to_fetch = set(resource_ids) for i in range(MAX_RESOURCES_FETCHING_ATTEMPTS): delivered_response = self.get_resources_batch_unordered_response(srns_to_fetch)",
"credentials, exists=True): return cls( srn=item.srn, data=item.data, s3_location=item.s3_location, temporary_credentials=credentials, exists=exists, ) @attr.s(frozen=True) class DeliveredResponse:",
"resource in resources) def get_components_of_type(self, resource_id: SRN, component_type: str) -> Iterable[DeliveredResource]: resource =",
"match( get_resources_response, GetResourcesResponseSuccess, self.handle_get_resources_success, GetResourcesResponseNotFound, partial(self.handle_get_resources_not_found, srn_to_fetch=srns_to_fetch), ) @staticmethod def handle_get_resources_success(get_resources_response: GetResourcesResponseSuccess) ->",
"= [ component_id for component_id in components_ids if component_id.type == component_type ] return",
"credentials, exists=True): return cls( srn=json_object['SRN'], data=json_object.get('Data'), s3_location=json_object.get('S3Location'), temporary_credentials=credentials, exists=exists ) @classmethod def from_get_resource_result_item(cls,",
"self.get_resource(resource_id) components_ids = [SRN.from_string(item) for item in resource.data['GroupTypeProperties']['Components']] components_ids_with_requested_type = [ component_id for",
"set(resource_ids) for i in range(MAX_RESOURCES_FETCHING_ATTEMPTS): delivered_response = self.get_resources_batch_unordered_response(srns_to_fetch) yield from delivered_response.delivery_resources yield from",
"S3Location from osdu_commons.utils import convert from osdu_commons.utils.srn import SRN from osdu_commons.utils.validators import list_of",
"len(get_resources_result) == 1 return get_resources_result[0] def check_if_resources_exist(self, resource_ids: Iterable[SRN]) -> bool: resources =",
"exists=True): return cls( srn=item.srn, data=item.data, s3_location=item.s3_location, temporary_credentials=credentials, exists=exists, ) @attr.s(frozen=True) class DeliveredResponse: delivery_resources:",
"DeliveredResource.from_get_resource_result_item(res_item, credentials) for res_item in result] return DeliveredResponse( delivery_resources=delivery_resources, not_found_resources=[], unprocessed_srn=get_resources_response.unprocessed_srn ) @staticmethod",
") def get_resource(self, resource_id: SRN) -> DeliveredResource: get_resources_result = list(self.get_resources([resource_id])) assert len(get_resources_result) ==",
"resources) def get_components_of_type(self, resource_id: SRN, component_type: str) -> Iterable[DeliveredResource]: resource = self.get_resource(resource_id) components_ids",
"[ component_id for component_id in components_ids if component_id.type == component_type ] return self.get_resources(components_ids_with_requested_type)",
"from typing import List, Optional, Iterable import attr from attr.validators import instance_of, optional",
"= delivery_client def get_resources(self, resource_ids: Iterable[SRN]) -> Iterable[DeliveredResource]: resource_ids = iter(resource_ids) srns_to_fetch =",
"import logging import time from functools import partial from itertools import islice from",
"convert from osdu_commons.utils.srn import SRN from osdu_commons.utils.validators import list_of logger = logging.getLogger(__name__) MAX_RESOURCES_FETCHING_ATTEMPTS",
"all(resource.exists for resource in resources) def get_components_of_type(self, resource_id: SRN, component_type: str) -> Iterable[DeliveredResource]:",
"attr.ib(validator=list_of(instance_of(DeliveredResource))) not_found_resources: List[DeliveredResource] = attr.ib(validator=list_of(instance_of(DeliveredResource))) unprocessed_srn: List[SRN] = attr.ib(validator=list_of(instance_of(SRN))) class DeliveryServiceException(object): pass class",
"range(MAX_RESOURCES_FETCHING_ATTEMPTS): delivered_response = self.get_resources_batch_unordered_response(srns_to_fetch) yield from delivered_response.delivery_resources yield from delivered_response.not_found_resources srns_to_fetch = delivered_response.unprocessed_srn",
"yield from delivered_response.not_found_resources srns_to_fetch = delivered_response.unprocessed_srn if len(srns_to_fetch) == 0: break logger.debug(f'Unprocessed srns:",
"{delivered_response.unprocessed_srn} after {i} attempt') if i < MAX_RESOURCES_FETCHING_ATTEMPTS - 1: time.sleep(2 ** i)",
"time.sleep(2 ** i) if len(srns_to_fetch) > 0: raise Exception(f'Cannot fetch srns: {srns_to_fetch}') def",
"srn=json_object['SRN'], data=json_object.get('Data'), s3_location=json_object.get('S3Location'), temporary_credentials=credentials, exists=exists ) @classmethod def from_get_resource_result_item(cls, item: GetResourcesResultItem, credentials, exists=True):",
"= set(resource_ids) for i in range(MAX_RESOURCES_FETCHING_ATTEMPTS): delivered_response = self.get_resources_batch_unordered_response(srns_to_fetch) yield from delivered_response.delivery_resources yield",
"self.MAX_GET_RESOURCES_BATCH_SIZE)) def get_resources_batch_unordered(self, resource_ids: List[SRN]) -> Iterable[DeliveredResource]: srns_to_fetch = set(resource_ids) for i in",
"partial(self.handle_get_resources_not_found, srn_to_fetch=srns_to_fetch), ) @staticmethod def handle_get_resources_success(get_resources_response: GetResourcesResponseSuccess) -> DeliveredResponse: result = get_resources_response.result credentials",
"attr.ib(validator=instance_of(SRN), converter=convert.srn) exists: bool = attr.ib(validator=instance_of(bool)) data: Optional[dict] = attr.ib(validator=optional(instance_of(dict)), default=None) s3_location: Optional[S3Location]",
"-> DeliveredResponse: not_found_srns = get_resources_response.not_found_resource_ids not_found_delivery_resources = [DeliveredResource(srn=srn, exists=False) for srn in not_found_srns]",
"not_found_resources=[], unprocessed_srn=get_resources_response.unprocessed_srn ) @staticmethod def handle_get_resources_not_found(get_resources_response: GetResourcesResponseNotFound, srn_to_fetch: List[SRN]) -> DeliveredResponse: not_found_srns =",
"import attr from attr.validators import instance_of, optional from pampy import match from osdu_commons.clients.delivery_client",
"srns_to_fetch = set(resource_ids) for i in range(MAX_RESOURCES_FETCHING_ATTEMPTS): delivered_response = self.get_resources_batch_unordered_response(srns_to_fetch) yield from delivered_response.delivery_resources",
"DeliveredResource: get_resources_result = list(self.get_resources([resource_id])) assert len(get_resources_result) == 1 return get_resources_result[0] def check_if_resources_exist(self, resource_ids:",
"temporary_credentials=credentials, exists=exists, ) @attr.s(frozen=True) class DeliveredResponse: delivery_resources: List[DeliveredResource] = attr.ib(validator=list_of(instance_of(DeliveredResource))) not_found_resources: List[DeliveredResource] =",
"not_found_resources=not_found_delivery_resources, unprocessed_srn=unprocessed_srn ) def get_resource(self, resource_id: SRN) -> DeliveredResource: get_resources_result = list(self.get_resources([resource_id])) assert",
"cls( srn=item.srn, data=item.data, s3_location=item.s3_location, temporary_credentials=credentials, exists=exists, ) @attr.s(frozen=True) class DeliveredResponse: delivery_resources: List[DeliveredResource] =",
"unprocessed_srn=get_resources_response.unprocessed_srn ) @staticmethod def handle_get_resources_not_found(get_resources_response: GetResourcesResponseNotFound, srn_to_fetch: List[SRN]) -> DeliveredResponse: not_found_srns = get_resources_response.not_found_resource_ids",
"from functools import partial from itertools import islice from typing import List, Optional,",
"after {i} attempt') if i < MAX_RESOURCES_FETCHING_ATTEMPTS - 1: time.sleep(2 ** i) if",
"{i} attempt') if i < MAX_RESOURCES_FETCHING_ATTEMPTS - 1: time.sleep(2 ** i) if len(srns_to_fetch)",
"= attr.ib(validator=instance_of(bool)) data: Optional[dict] = attr.ib(validator=optional(instance_of(dict)), default=None) s3_location: Optional[S3Location] = attr.ib(validator=optional(instance_of(S3Location)), default=None) temporary_credentials:",
"{srns_to_fetch}') def get_resources_batch_unordered_response(self, resource_ids: Iterable[SRN]) -> DeliveredResponse: srns_to_fetch = list(resource_ids) get_resources_response = self._delivery_client.get_resources(srns_to_fetch)",
"cls( srn=json_object['SRN'], data=json_object.get('Data'), s3_location=json_object.get('S3Location'), temporary_credentials=credentials, exists=exists ) @classmethod def from_get_resource_result_item(cls, item: GetResourcesResultItem, credentials,",
"> 0: raise Exception(f'Cannot fetch srns: {srns_to_fetch}') def get_resources_batch_unordered_response(self, resource_ids: Iterable[SRN]) -> DeliveredResponse:",
"item: GetResourcesResultItem, credentials, exists=True): return cls( srn=item.srn, data=item.data, s3_location=item.s3_location, temporary_credentials=credentials, exists=exists, ) @attr.s(frozen=True)",
"= attr.ib(validator=optional(instance_of(dict)), default=None) s3_location: Optional[S3Location] = attr.ib(validator=optional(instance_of(S3Location)), default=None) temporary_credentials: Optional[dict] = attr.ib(validator=optional(instance_of(dict)), default=None)",
"DeliveryService: MAX_GET_RESOURCES_BATCH_SIZE = 100 def __init__(self, delivery_client: DeliveryClient): self._delivery_client = delivery_client def get_resources(self,",
"credentials) for res_item in result] return DeliveredResponse( delivery_resources=delivery_resources, not_found_resources=[], unprocessed_srn=get_resources_response.unprocessed_srn ) @staticmethod def",
"s3_location: Optional[S3Location] = attr.ib(validator=optional(instance_of(S3Location)), default=None) temporary_credentials: Optional[dict] = attr.ib(validator=optional(instance_of(dict)), default=None) @classmethod def from_json(cls,",
"Iterable[DeliveredResource]: resource_ids = iter(resource_ids) srns_to_fetch = list(islice(resource_ids, self.MAX_GET_RESOURCES_BATCH_SIZE)) while len(srns_to_fetch) > 0: yield",
"srns_to_fetch = list(resource_ids) get_resources_response = self._delivery_client.get_resources(srns_to_fetch) return match( get_resources_response, GetResourcesResponseSuccess, self.handle_get_resources_success, GetResourcesResponseNotFound, partial(self.handle_get_resources_not_found,",
"delivered_response.delivery_resources yield from delivered_response.not_found_resources srns_to_fetch = delivered_response.unprocessed_srn if len(srns_to_fetch) == 0: break logger.debug(f'Unprocessed",
"credentials = get_resources_response.temporary_credentials delivery_resources = [ DeliveredResource.from_get_resource_result_item(res_item, credentials) for res_item in result] return",
"def get_component_of_type(self, resource_id: SRN, component_type: str) -> DeliveredResource: get_components_of_type_result = list(self.get_components_of_type(resource_id, component_type)) assert",
"attr.ib(validator=list_of(instance_of(DeliveredResource))) unprocessed_srn: List[SRN] = attr.ib(validator=list_of(instance_of(SRN))) class DeliveryServiceException(object): pass class DeliveryService: MAX_GET_RESOURCES_BATCH_SIZE = 100",
"exists=exists ) @classmethod def from_get_resource_result_item(cls, item: GetResourcesResultItem, credentials, exists=True): return cls( srn=item.srn, data=item.data,",
"@classmethod def from_get_resource_result_item(cls, item: GetResourcesResultItem, credentials, exists=True): return cls( srn=item.srn, data=item.data, s3_location=item.s3_location, temporary_credentials=credentials,",
"len(srns_to_fetch) == 0: break logger.debug(f'Unprocessed srns: {delivered_response.unprocessed_srn} after {i} attempt') if i <",
"= list(islice(resource_ids, self.MAX_GET_RESOURCES_BATCH_SIZE)) while len(srns_to_fetch) > 0: yield from self.get_resources_batch_unordered(srns_to_fetch) srns_to_fetch = list(islice(resource_ids,",
"GetResourcesResponseNotFound, partial(self.handle_get_resources_not_found, srn_to_fetch=srns_to_fetch), ) @staticmethod def handle_get_resources_success(get_resources_response: GetResourcesResponseSuccess) -> DeliveredResponse: result = get_resources_response.result",
"handle_get_resources_not_found(get_resources_response: GetResourcesResponseNotFound, srn_to_fetch: List[SRN]) -> DeliveredResponse: not_found_srns = get_resources_response.not_found_resource_ids not_found_delivery_resources = [DeliveredResource(srn=srn, exists=False)",
"srn_to_fetch=srns_to_fetch), ) @staticmethod def handle_get_resources_success(get_resources_response: GetResourcesResponseSuccess) -> DeliveredResponse: result = get_resources_response.result credentials =",
"srns_to_fetch = list(islice(resource_ids, self.MAX_GET_RESOURCES_BATCH_SIZE)) def get_resources_batch_unordered(self, resource_ids: List[SRN]) -> Iterable[DeliveredResource]: srns_to_fetch = set(resource_ids)",
"-> DeliveredResponse: result = get_resources_response.result credentials = get_resources_response.temporary_credentials delivery_resources = [ DeliveredResource.from_get_resource_result_item(res_item, credentials)",
"in components_ids if component_id.type == component_type ] return self.get_resources(components_ids_with_requested_type) def get_component_of_type(self, resource_id: SRN,",
"from osdu_commons.clients.delivery_client import DeliveryClient, GetResourcesResponseSuccess, \\ GetResourcesResponseNotFound, GetResourcesResultItem from osdu_commons.model.aws import S3Location from",
") @attr.s(frozen=True) class DeliveredResponse: delivery_resources: List[DeliveredResource] = attr.ib(validator=list_of(instance_of(DeliveredResource))) not_found_resources: List[DeliveredResource] = attr.ib(validator=list_of(instance_of(DeliveredResource))) unprocessed_srn:",
"List[SRN] = attr.ib(validator=list_of(instance_of(SRN))) class DeliveryServiceException(object): pass class DeliveryService: MAX_GET_RESOURCES_BATCH_SIZE = 100 def __init__(self,",
"self.handle_get_resources_success, GetResourcesResponseNotFound, partial(self.handle_get_resources_not_found, srn_to_fetch=srns_to_fetch), ) @staticmethod def handle_get_resources_success(get_resources_response: GetResourcesResponseSuccess) -> DeliveredResponse: result =",
"temporary_credentials: Optional[dict] = attr.ib(validator=optional(instance_of(dict)), default=None) @classmethod def from_json(cls, json_object, credentials, exists=True): return cls(",
"srn in not_found_srns] unprocessed_srn = list(set(srn_to_fetch) - set(not_found_srns)) return DeliveredResponse( delivery_resources=[], not_found_resources=not_found_delivery_resources, unprocessed_srn=unprocessed_srn",
"Optional[S3Location] = attr.ib(validator=optional(instance_of(S3Location)), default=None) temporary_credentials: Optional[dict] = attr.ib(validator=optional(instance_of(dict)), default=None) @classmethod def from_json(cls, json_object,",
"> 0: yield from self.get_resources_batch_unordered(srns_to_fetch) srns_to_fetch = list(islice(resource_ids, self.MAX_GET_RESOURCES_BATCH_SIZE)) def get_resources_batch_unordered(self, resource_ids: List[SRN])",
"= self.get_resources_batch_unordered_response(srns_to_fetch) yield from delivered_response.delivery_resources yield from delivered_response.not_found_resources srns_to_fetch = delivered_response.unprocessed_srn if len(srns_to_fetch)",
"< MAX_RESOURCES_FETCHING_ATTEMPTS - 1: time.sleep(2 ** i) if len(srns_to_fetch) > 0: raise Exception(f'Cannot",
"if len(srns_to_fetch) == 0: break logger.debug(f'Unprocessed srns: {delivered_response.unprocessed_srn} after {i} attempt') if i",
"GetResourcesResponseNotFound, GetResourcesResultItem from osdu_commons.model.aws import S3Location from osdu_commons.utils import convert from osdu_commons.utils.srn import",
"def handle_get_resources_not_found(get_resources_response: GetResourcesResponseNotFound, srn_to_fetch: List[SRN]) -> DeliveredResponse: not_found_srns = get_resources_response.not_found_resource_ids not_found_delivery_resources = [DeliveredResource(srn=srn,",
"result = get_resources_response.result credentials = get_resources_response.temporary_credentials delivery_resources = [ DeliveredResource.from_get_resource_result_item(res_item, credentials) for res_item",
"attr.ib(validator=instance_of(bool)) data: Optional[dict] = attr.ib(validator=optional(instance_of(dict)), default=None) s3_location: Optional[S3Location] = attr.ib(validator=optional(instance_of(S3Location)), default=None) temporary_credentials: Optional[dict]",
"exists=False) for srn in not_found_srns] unprocessed_srn = list(set(srn_to_fetch) - set(not_found_srns)) return DeliveredResponse( delivery_resources=[],",
"from osdu_commons.utils.validators import list_of logger = logging.getLogger(__name__) MAX_RESOURCES_FETCHING_ATTEMPTS = 5 @attr.s(frozen=True) class DeliveredResource:",
"from pampy import match from osdu_commons.clients.delivery_client import DeliveryClient, GetResourcesResponseSuccess, \\ GetResourcesResponseNotFound, GetResourcesResultItem from",
"def get_resources(self, resource_ids: Iterable[SRN]) -> Iterable[DeliveredResource]: resource_ids = iter(resource_ids) srns_to_fetch = list(islice(resource_ids, self.MAX_GET_RESOURCES_BATCH_SIZE))",
"Optional, Iterable import attr from attr.validators import instance_of, optional from pampy import match",
"0: raise Exception(f'Cannot fetch srns: {srns_to_fetch}') def get_resources_batch_unordered_response(self, resource_ids: Iterable[SRN]) -> DeliveredResponse: srns_to_fetch",
"in not_found_srns] unprocessed_srn = list(set(srn_to_fetch) - set(not_found_srns)) return DeliveredResponse( delivery_resources=[], not_found_resources=not_found_delivery_resources, unprocessed_srn=unprocessed_srn )",
"return cls( srn=json_object['SRN'], data=json_object.get('Data'), s3_location=json_object.get('S3Location'), temporary_credentials=credentials, exists=exists ) @classmethod def from_get_resource_result_item(cls, item: GetResourcesResultItem,",
"for i in range(MAX_RESOURCES_FETCHING_ATTEMPTS): delivered_response = self.get_resources_batch_unordered_response(srns_to_fetch) yield from delivered_response.delivery_resources yield from delivered_response.not_found_resources",
"= list(set(srn_to_fetch) - set(not_found_srns)) return DeliveredResponse( delivery_resources=[], not_found_resources=not_found_delivery_resources, unprocessed_srn=unprocessed_srn ) def get_resource(self, resource_id:",
"len(srns_to_fetch) > 0: yield from self.get_resources_batch_unordered(srns_to_fetch) srns_to_fetch = list(islice(resource_ids, self.MAX_GET_RESOURCES_BATCH_SIZE)) def get_resources_batch_unordered(self, resource_ids:",
"DeliveryClient): self._delivery_client = delivery_client def get_resources(self, resource_ids: Iterable[SRN]) -> Iterable[DeliveredResource]: resource_ids = iter(resource_ids)",
"default=None) temporary_credentials: Optional[dict] = attr.ib(validator=optional(instance_of(dict)), default=None) @classmethod def from_json(cls, json_object, credentials, exists=True): return",
"list(self.get_resources([resource_id])) assert len(get_resources_result) == 1 return get_resources_result[0] def check_if_resources_exist(self, resource_ids: Iterable[SRN]) -> bool:",
"resource_id: SRN, component_type: str) -> Iterable[DeliveredResource]: resource = self.get_resource(resource_id) components_ids = [SRN.from_string(item) for",
"self._delivery_client = delivery_client def get_resources(self, resource_ids: Iterable[SRN]) -> Iterable[DeliveredResource]: resource_ids = iter(resource_ids) srns_to_fetch",
"fetch srns: {srns_to_fetch}') def get_resources_batch_unordered_response(self, resource_ids: Iterable[SRN]) -> DeliveredResponse: srns_to_fetch = list(resource_ids) get_resources_response",
"SRN, component_type: str) -> DeliveredResource: get_components_of_type_result = list(self.get_components_of_type(resource_id, component_type)) assert len(get_components_of_type_result) == 1",
"resource_ids: Iterable[SRN]) -> Iterable[DeliveredResource]: resource_ids = iter(resource_ids) srns_to_fetch = list(islice(resource_ids, self.MAX_GET_RESOURCES_BATCH_SIZE)) while len(srns_to_fetch)",
"= attr.ib(validator=list_of(instance_of(SRN))) class DeliveryServiceException(object): pass class DeliveryService: MAX_GET_RESOURCES_BATCH_SIZE = 100 def __init__(self, delivery_client:",
"resource_ids = iter(resource_ids) srns_to_fetch = list(islice(resource_ids, self.MAX_GET_RESOURCES_BATCH_SIZE)) while len(srns_to_fetch) > 0: yield from",
"import convert from osdu_commons.utils.srn import SRN from osdu_commons.utils.validators import list_of logger = logging.getLogger(__name__)",
"= [DeliveredResource(srn=srn, exists=False) for srn in not_found_srns] unprocessed_srn = list(set(srn_to_fetch) - set(not_found_srns)) return",
") @classmethod def from_get_resource_result_item(cls, item: GetResourcesResultItem, credentials, exists=True): return cls( srn=item.srn, data=item.data, s3_location=item.s3_location,",
"delivery_client def get_resources(self, resource_ids: Iterable[SRN]) -> Iterable[DeliveredResource]: resource_ids = iter(resource_ids) srns_to_fetch = list(islice(resource_ids,",
"list(islice(resource_ids, self.MAX_GET_RESOURCES_BATCH_SIZE)) def get_resources_batch_unordered(self, resource_ids: List[SRN]) -> Iterable[DeliveredResource]: srns_to_fetch = set(resource_ids) for i",
"handle_get_resources_success(get_resources_response: GetResourcesResponseSuccess) -> DeliveredResponse: result = get_resources_response.result credentials = get_resources_response.temporary_credentials delivery_resources = [",
"from osdu_commons.utils.srn import SRN from osdu_commons.utils.validators import list_of logger = logging.getLogger(__name__) MAX_RESOURCES_FETCHING_ATTEMPTS =",
"MAX_RESOURCES_FETCHING_ATTEMPTS - 1: time.sleep(2 ** i) if len(srns_to_fetch) > 0: raise Exception(f'Cannot fetch",
"srns_to_fetch = delivered_response.unprocessed_srn if len(srns_to_fetch) == 0: break logger.debug(f'Unprocessed srns: {delivered_response.unprocessed_srn} after {i}",
"osdu_commons.utils.validators import list_of logger = logging.getLogger(__name__) MAX_RESOURCES_FETCHING_ATTEMPTS = 5 @attr.s(frozen=True) class DeliveredResource: srn:",
"in resource.data['GroupTypeProperties']['Components']] components_ids_with_requested_type = [ component_id for component_id in components_ids if component_id.type ==",
") @staticmethod def handle_get_resources_success(get_resources_response: GetResourcesResponseSuccess) -> DeliveredResponse: result = get_resources_response.result credentials = get_resources_response.temporary_credentials",
"attempt') if i < MAX_RESOURCES_FETCHING_ATTEMPTS - 1: time.sleep(2 ** i) if len(srns_to_fetch) >",
"delivery_resources=[], not_found_resources=not_found_delivery_resources, unprocessed_srn=unprocessed_srn ) def get_resource(self, resource_id: SRN) -> DeliveredResource: get_resources_result = list(self.get_resources([resource_id]))",
"Iterable[DeliveredResource]: resource = self.get_resource(resource_id) components_ids = [SRN.from_string(item) for item in resource.data['GroupTypeProperties']['Components']] components_ids_with_requested_type =",
"set(not_found_srns)) return DeliveredResponse( delivery_resources=[], not_found_resources=not_found_delivery_resources, unprocessed_srn=unprocessed_srn ) def get_resource(self, resource_id: SRN) -> DeliveredResource:",
"get_resources_result = list(self.get_resources([resource_id])) assert len(get_resources_result) == 1 return get_resources_result[0] def check_if_resources_exist(self, resource_ids: Iterable[SRN])",
"MAX_RESOURCES_FETCHING_ATTEMPTS = 5 @attr.s(frozen=True) class DeliveredResource: srn: SRN = attr.ib(validator=instance_of(SRN), converter=convert.srn) exists: bool",
"= self.get_resource(resource_id) components_ids = [SRN.from_string(item) for item in resource.data['GroupTypeProperties']['Components']] components_ids_with_requested_type = [ component_id",
"def get_components_of_type(self, resource_id: SRN, component_type: str) -> Iterable[DeliveredResource]: resource = self.get_resource(resource_id) components_ids =",
"exists=True): return cls( srn=json_object['SRN'], data=json_object.get('Data'), s3_location=json_object.get('S3Location'), temporary_credentials=credentials, exists=exists ) @classmethod def from_get_resource_result_item(cls, item:",
"if i < MAX_RESOURCES_FETCHING_ATTEMPTS - 1: time.sleep(2 ** i) if len(srns_to_fetch) > 0:",
"return self.get_resources(components_ids_with_requested_type) def get_component_of_type(self, resource_id: SRN, component_type: str) -> DeliveredResource: get_components_of_type_result = list(self.get_components_of_type(resource_id,",
"not_found_srns] unprocessed_srn = list(set(srn_to_fetch) - set(not_found_srns)) return DeliveredResponse( delivery_resources=[], not_found_resources=not_found_delivery_resources, unprocessed_srn=unprocessed_srn ) def",
"list(set(srn_to_fetch) - set(not_found_srns)) return DeliveredResponse( delivery_resources=[], not_found_resources=not_found_delivery_resources, unprocessed_srn=unprocessed_srn ) def get_resource(self, resource_id: SRN)",
"== component_type ] return self.get_resources(components_ids_with_requested_type) def get_component_of_type(self, resource_id: SRN, component_type: str) -> DeliveredResource:",
"1: time.sleep(2 ** i) if len(srns_to_fetch) > 0: raise Exception(f'Cannot fetch srns: {srns_to_fetch}')",
"DeliveredResponse: srns_to_fetch = list(resource_ids) get_resources_response = self._delivery_client.get_resources(srns_to_fetch) return match( get_resources_response, GetResourcesResponseSuccess, self.handle_get_resources_success, GetResourcesResponseNotFound,",
"= [SRN.from_string(item) for item in resource.data['GroupTypeProperties']['Components']] components_ids_with_requested_type = [ component_id for component_id in",
"get_resource(self, resource_id: SRN) -> DeliveredResource: get_resources_result = list(self.get_resources([resource_id])) assert len(get_resources_result) == 1 return",
"def get_resource(self, resource_id: SRN) -> DeliveredResource: get_resources_result = list(self.get_resources([resource_id])) assert len(get_resources_result) == 1",
"list(resource_ids) get_resources_response = self._delivery_client.get_resources(srns_to_fetch) return match( get_resources_response, GetResourcesResponseSuccess, self.handle_get_resources_success, GetResourcesResponseNotFound, partial(self.handle_get_resources_not_found, srn_to_fetch=srns_to_fetch), )",
"delivery_resources: List[DeliveredResource] = attr.ib(validator=list_of(instance_of(DeliveredResource))) not_found_resources: List[DeliveredResource] = attr.ib(validator=list_of(instance_of(DeliveredResource))) unprocessed_srn: List[SRN] = attr.ib(validator=list_of(instance_of(SRN))) class",
"SRN) -> DeliveredResource: get_resources_result = list(self.get_resources([resource_id])) assert len(get_resources_result) == 1 return get_resources_result[0] def",
"exists=exists, ) @attr.s(frozen=True) class DeliveredResponse: delivery_resources: List[DeliveredResource] = attr.ib(validator=list_of(instance_of(DeliveredResource))) not_found_resources: List[DeliveredResource] = attr.ib(validator=list_of(instance_of(DeliveredResource)))",
"get_resources_batch_unordered_response(self, resource_ids: Iterable[SRN]) -> DeliveredResponse: srns_to_fetch = list(resource_ids) get_resources_response = self._delivery_client.get_resources(srns_to_fetch) return match(",
"= self._delivery_client.get_resources(srns_to_fetch) return match( get_resources_response, GetResourcesResponseSuccess, self.handle_get_resources_success, GetResourcesResponseNotFound, partial(self.handle_get_resources_not_found, srn_to_fetch=srns_to_fetch), ) @staticmethod def",
"@classmethod def from_json(cls, json_object, credentials, exists=True): return cls( srn=json_object['SRN'], data=json_object.get('Data'), s3_location=json_object.get('S3Location'), temporary_credentials=credentials, exists=exists",
"islice from typing import List, Optional, Iterable import attr from attr.validators import instance_of,",
"__init__(self, delivery_client: DeliveryClient): self._delivery_client = delivery_client def get_resources(self, resource_ids: Iterable[SRN]) -> Iterable[DeliveredResource]: resource_ids",
"[ DeliveredResource.from_get_resource_result_item(res_item, credentials) for res_item in result] return DeliveredResponse( delivery_resources=delivery_resources, not_found_resources=[], unprocessed_srn=get_resources_response.unprocessed_srn )",
"component_type ] return self.get_resources(components_ids_with_requested_type) def get_component_of_type(self, resource_id: SRN, component_type: str) -> DeliveredResource: get_components_of_type_result",
"1 return get_resources_result[0] def check_if_resources_exist(self, resource_ids: Iterable[SRN]) -> bool: resources = self.get_resources(resource_ids) return",
"for resource in resources) def get_components_of_type(self, resource_id: SRN, component_type: str) -> Iterable[DeliveredResource]: resource",
"\\ GetResourcesResponseNotFound, GetResourcesResultItem from osdu_commons.model.aws import S3Location from osdu_commons.utils import convert from osdu_commons.utils.srn",
"self.get_resources(components_ids_with_requested_type) def get_component_of_type(self, resource_id: SRN, component_type: str) -> DeliveredResource: get_components_of_type_result = list(self.get_components_of_type(resource_id, component_type))",
"iter(resource_ids) srns_to_fetch = list(islice(resource_ids, self.MAX_GET_RESOURCES_BATCH_SIZE)) while len(srns_to_fetch) > 0: yield from self.get_resources_batch_unordered(srns_to_fetch) srns_to_fetch",
"resource.data['GroupTypeProperties']['Components']] components_ids_with_requested_type = [ component_id for component_id in components_ids if component_id.type == component_type",
"def handle_get_resources_success(get_resources_response: GetResourcesResponseSuccess) -> DeliveredResponse: result = get_resources_response.result credentials = get_resources_response.temporary_credentials delivery_resources =",
"Iterable[SRN]) -> Iterable[DeliveredResource]: resource_ids = iter(resource_ids) srns_to_fetch = list(islice(resource_ids, self.MAX_GET_RESOURCES_BATCH_SIZE)) while len(srns_to_fetch) >",
"default=None) s3_location: Optional[S3Location] = attr.ib(validator=optional(instance_of(S3Location)), default=None) temporary_credentials: Optional[dict] = attr.ib(validator=optional(instance_of(dict)), default=None) @classmethod def",
"= get_resources_response.temporary_credentials delivery_resources = [ DeliveredResource.from_get_resource_result_item(res_item, credentials) for res_item in result] return DeliveredResponse(",
"- 1: time.sleep(2 ** i) if len(srns_to_fetch) > 0: raise Exception(f'Cannot fetch srns:",
"class DeliveredResource: srn: SRN = attr.ib(validator=instance_of(SRN), converter=convert.srn) exists: bool = attr.ib(validator=instance_of(bool)) data: Optional[dict]",
"= logging.getLogger(__name__) MAX_RESOURCES_FETCHING_ATTEMPTS = 5 @attr.s(frozen=True) class DeliveredResource: srn: SRN = attr.ib(validator=instance_of(SRN), converter=convert.srn)",
"DeliveredResource: srn: SRN = attr.ib(validator=instance_of(SRN), converter=convert.srn) exists: bool = attr.ib(validator=instance_of(bool)) data: Optional[dict] =",
"time from functools import partial from itertools import islice from typing import List,",
"srns: {delivered_response.unprocessed_srn} after {i} attempt') if i < MAX_RESOURCES_FETCHING_ATTEMPTS - 1: time.sleep(2 **",
"def from_json(cls, json_object, credentials, exists=True): return cls( srn=json_object['SRN'], data=json_object.get('Data'), s3_location=json_object.get('S3Location'), temporary_credentials=credentials, exists=exists )",
"= attr.ib(validator=list_of(instance_of(DeliveredResource))) unprocessed_srn: List[SRN] = attr.ib(validator=list_of(instance_of(SRN))) class DeliveryServiceException(object): pass class DeliveryService: MAX_GET_RESOURCES_BATCH_SIZE =",
"delivery_resources=delivery_resources, not_found_resources=[], unprocessed_srn=get_resources_response.unprocessed_srn ) @staticmethod def handle_get_resources_not_found(get_resources_response: GetResourcesResponseNotFound, srn_to_fetch: List[SRN]) -> DeliveredResponse: not_found_srns",
"srn: SRN = attr.ib(validator=instance_of(SRN), converter=convert.srn) exists: bool = attr.ib(validator=instance_of(bool)) data: Optional[dict] = attr.ib(validator=optional(instance_of(dict)),",
"resource = self.get_resource(resource_id) components_ids = [SRN.from_string(item) for item in resource.data['GroupTypeProperties']['Components']] components_ids_with_requested_type = [",
"-> bool: resources = self.get_resources(resource_ids) return all(resource.exists for resource in resources) def get_components_of_type(self,",
"data=json_object.get('Data'), s3_location=json_object.get('S3Location'), temporary_credentials=credentials, exists=exists ) @classmethod def from_get_resource_result_item(cls, item: GetResourcesResultItem, credentials, exists=True): return",
"from attr.validators import instance_of, optional from pampy import match from osdu_commons.clients.delivery_client import DeliveryClient,",
"Iterable import attr from attr.validators import instance_of, optional from pampy import match from",
"get_resources(self, resource_ids: Iterable[SRN]) -> Iterable[DeliveredResource]: resource_ids = iter(resource_ids) srns_to_fetch = list(islice(resource_ids, self.MAX_GET_RESOURCES_BATCH_SIZE)) while",
"itertools import islice from typing import List, Optional, Iterable import attr from attr.validators",
"if len(srns_to_fetch) > 0: raise Exception(f'Cannot fetch srns: {srns_to_fetch}') def get_resources_batch_unordered_response(self, resource_ids: Iterable[SRN])",
"i in range(MAX_RESOURCES_FETCHING_ATTEMPTS): delivered_response = self.get_resources_batch_unordered_response(srns_to_fetch) yield from delivered_response.delivery_resources yield from delivered_response.not_found_resources srns_to_fetch",
"temporary_credentials=credentials, exists=exists ) @classmethod def from_get_resource_result_item(cls, item: GetResourcesResultItem, credentials, exists=True): return cls( srn=item.srn,",
"import S3Location from osdu_commons.utils import convert from osdu_commons.utils.srn import SRN from osdu_commons.utils.validators import",
"attr.ib(validator=optional(instance_of(dict)), default=None) s3_location: Optional[S3Location] = attr.ib(validator=optional(instance_of(S3Location)), default=None) temporary_credentials: Optional[dict] = attr.ib(validator=optional(instance_of(dict)), default=None) @classmethod",
"import List, Optional, Iterable import attr from attr.validators import instance_of, optional from pampy",
"SRN from osdu_commons.utils.validators import list_of logger = logging.getLogger(__name__) MAX_RESOURCES_FETCHING_ATTEMPTS = 5 @attr.s(frozen=True) class",
"data=item.data, s3_location=item.s3_location, temporary_credentials=credentials, exists=exists, ) @attr.s(frozen=True) class DeliveredResponse: delivery_resources: List[DeliveredResource] = attr.ib(validator=list_of(instance_of(DeliveredResource))) not_found_resources:",
"for srn in not_found_srns] unprocessed_srn = list(set(srn_to_fetch) - set(not_found_srns)) return DeliveredResponse( delivery_resources=[], not_found_resources=not_found_delivery_resources,",
"i) if len(srns_to_fetch) > 0: raise Exception(f'Cannot fetch srns: {srns_to_fetch}') def get_resources_batch_unordered_response(self, resource_ids:",
"attr.validators import instance_of, optional from pampy import match from osdu_commons.clients.delivery_client import DeliveryClient, GetResourcesResponseSuccess,",
"srn=item.srn, data=item.data, s3_location=item.s3_location, temporary_credentials=credentials, exists=exists, ) @attr.s(frozen=True) class DeliveredResponse: delivery_resources: List[DeliveredResource] = attr.ib(validator=list_of(instance_of(DeliveredResource)))",
"] return self.get_resources(components_ids_with_requested_type) def get_component_of_type(self, resource_id: SRN, component_type: str) -> DeliveredResource: get_components_of_type_result =",
"Iterable[DeliveredResource]: srns_to_fetch = set(resource_ids) for i in range(MAX_RESOURCES_FETCHING_ATTEMPTS): delivered_response = self.get_resources_batch_unordered_response(srns_to_fetch) yield from",
"= attr.ib(validator=optional(instance_of(S3Location)), default=None) temporary_credentials: Optional[dict] = attr.ib(validator=optional(instance_of(dict)), default=None) @classmethod def from_json(cls, json_object, credentials,",
"-> Iterable[DeliveredResource]: srns_to_fetch = set(resource_ids) for i in range(MAX_RESOURCES_FETCHING_ATTEMPTS): delivered_response = self.get_resources_batch_unordered_response(srns_to_fetch) yield"
] |
[
"from bxgateway.messages.ont.tx_ont_message import TxOntMessage from bxgateway.messages.ont.ver_ack_ont_message import VerAckOntMessage from bxgateway.messages.ont.version_ont_message import VersionOntMessage class",
"_OntMessageFactory(AbstractMessageFactory): _MESSAGE_TYPE_MAPPING = { OntMessageType.VERSION: VersionOntMessage, OntMessageType.VERACK: VerAckOntMessage, OntMessageType.GET_ADDRESS: GetAddrOntMessage, OntMessageType.ADDRESS: AddrOntMessage, OntMessageType.PING:",
"} def __init__(self): super(_OntMessageFactory, self).__init__() self.message_type_mapping = self._MESSAGE_TYPE_MAPPING def get_base_message_type(self) -> Type[AbstractMessage]: return",
"bxgateway.messages.ont.ping_ont_message import PingOntMessage from bxgateway.messages.ont.pong_ont_message import PongOntMessage from bxgateway.messages.ont.tx_ont_message import TxOntMessage from bxgateway.messages.ont.ver_ack_ont_message",
"from bxgateway.messages.ont.get_addr_ont_message import GetAddrOntMessage from bxgateway.messages.ont.get_blocks_ont_message import GetBlocksOntMessage from bxgateway.messages.ont.get_data_ont_message import GetDataOntMessage from",
"from bxgateway.messages.ont.ont_message_type import OntMessageType from bxgateway.messages.ont.ping_ont_message import PingOntMessage from bxgateway.messages.ont.pong_ont_message import PongOntMessage from",
"import AbstractMessage from bxcommon.messages.abstract_message_factory import AbstractMessageFactory from bxgateway.messages.ont.addr_ont_message import AddrOntMessage from bxgateway.messages.ont.block_ont_message import",
"OntMessageType.VERACK: VerAckOntMessage, OntMessageType.GET_ADDRESS: GetAddrOntMessage, OntMessageType.ADDRESS: AddrOntMessage, OntMessageType.PING: PingOntMessage, OntMessageType.PONG: PongOntMessage, OntMessageType.CONSENSUS: OntConsensusMessage, OntMessageType.INVENTORY:",
"from bxgateway.messages.ont.consensus_ont_message import OntConsensusMessage from bxgateway.messages.ont.get_addr_ont_message import GetAddrOntMessage from bxgateway.messages.ont.get_blocks_ont_message import GetBlocksOntMessage from",
"import NotFoundOntMessage from bxgateway.messages.ont.ont_message import OntMessage from bxgateway.messages.ont.ont_message_type import OntMessageType from bxgateway.messages.ont.ping_ont_message import",
"PingOntMessage, OntMessageType.PONG: PongOntMessage, OntMessageType.CONSENSUS: OntConsensusMessage, OntMessageType.INVENTORY: InvOntMessage, OntMessageType.GET_DATA: GetDataOntMessage, OntMessageType.GET_HEADERS: GetHeadersOntMessage, OntMessageType.GET_BLOCKS: GetBlocksOntMessage,",
"import GetBlocksOntMessage from bxgateway.messages.ont.get_data_ont_message import GetDataOntMessage from bxgateway.messages.ont.get_headers_ont_message import GetHeadersOntMessage from bxgateway.messages.ont.headers_ont_message import",
"PongOntMessage from bxgateway.messages.ont.tx_ont_message import TxOntMessage from bxgateway.messages.ont.ver_ack_ont_message import VerAckOntMessage from bxgateway.messages.ont.version_ont_message import VersionOntMessage",
"VerAckOntMessage from bxgateway.messages.ont.version_ont_message import VersionOntMessage class _OntMessageFactory(AbstractMessageFactory): _MESSAGE_TYPE_MAPPING = { OntMessageType.VERSION: VersionOntMessage, OntMessageType.VERACK:",
"bxcommon.messages.abstract_message_factory import AbstractMessageFactory from bxgateway.messages.ont.addr_ont_message import AddrOntMessage from bxgateway.messages.ont.block_ont_message import BlockOntMessage from bxgateway.messages.ont.consensus_ont_message",
"from bxcommon.messages.abstract_message import AbstractMessage from bxcommon.messages.abstract_message_factory import AbstractMessageFactory from bxgateway.messages.ont.addr_ont_message import AddrOntMessage from",
"GetBlocksOntMessage, OntMessageType.BLOCK: BlockOntMessage, OntMessageType.HEADERS: HeadersOntMessage, OntMessageType.TRANSACTIONS: TxOntMessage, OntMessageType.NOT_FOUND: NotFoundOntMessage } def __init__(self): super(_OntMessageFactory,",
"OntMessageType.GET_HEADERS: GetHeadersOntMessage, OntMessageType.GET_BLOCKS: GetBlocksOntMessage, OntMessageType.BLOCK: BlockOntMessage, OntMessageType.HEADERS: HeadersOntMessage, OntMessageType.TRANSACTIONS: TxOntMessage, OntMessageType.NOT_FOUND: NotFoundOntMessage }",
"AddrOntMessage, OntMessageType.PING: PingOntMessage, OntMessageType.PONG: PongOntMessage, OntMessageType.CONSENSUS: OntConsensusMessage, OntMessageType.INVENTORY: InvOntMessage, OntMessageType.GET_DATA: GetDataOntMessage, OntMessageType.GET_HEADERS: GetHeadersOntMessage,",
"OntMessageType.INVENTORY: InvOntMessage, OntMessageType.GET_DATA: GetDataOntMessage, OntMessageType.GET_HEADERS: GetHeadersOntMessage, OntMessageType.GET_BLOCKS: GetBlocksOntMessage, OntMessageType.BLOCK: BlockOntMessage, OntMessageType.HEADERS: HeadersOntMessage, OntMessageType.TRANSACTIONS:",
"bxgateway.messages.ont.tx_ont_message import TxOntMessage from bxgateway.messages.ont.ver_ack_ont_message import VerAckOntMessage from bxgateway.messages.ont.version_ont_message import VersionOntMessage class _OntMessageFactory(AbstractMessageFactory):",
"<filename>src/bxgateway/messages/ont/ont_message_factory.py from typing import Type from bxcommon.messages.abstract_message import AbstractMessage from bxcommon.messages.abstract_message_factory import AbstractMessageFactory",
"from bxgateway.messages.ont.ont_message import OntMessage from bxgateway.messages.ont.ont_message_type import OntMessageType from bxgateway.messages.ont.ping_ont_message import PingOntMessage from",
"self).__init__() self.message_type_mapping = self._MESSAGE_TYPE_MAPPING def get_base_message_type(self) -> Type[AbstractMessage]: return OntMessage ont_message_factory = _OntMessageFactory()",
"from bxgateway.messages.ont.inventory_ont_message import InvOntMessage from bxgateway.messages.ont.notfound_ont_message import NotFoundOntMessage from bxgateway.messages.ont.ont_message import OntMessage from",
"InvOntMessage, OntMessageType.GET_DATA: GetDataOntMessage, OntMessageType.GET_HEADERS: GetHeadersOntMessage, OntMessageType.GET_BLOCKS: GetBlocksOntMessage, OntMessageType.BLOCK: BlockOntMessage, OntMessageType.HEADERS: HeadersOntMessage, OntMessageType.TRANSACTIONS: TxOntMessage,",
"bxgateway.messages.ont.inventory_ont_message import InvOntMessage from bxgateway.messages.ont.notfound_ont_message import NotFoundOntMessage from bxgateway.messages.ont.ont_message import OntMessage from bxgateway.messages.ont.ont_message_type",
"import GetDataOntMessage from bxgateway.messages.ont.get_headers_ont_message import GetHeadersOntMessage from bxgateway.messages.ont.headers_ont_message import HeadersOntMessage from bxgateway.messages.ont.inventory_ont_message import",
"def __init__(self): super(_OntMessageFactory, self).__init__() self.message_type_mapping = self._MESSAGE_TYPE_MAPPING def get_base_message_type(self) -> Type[AbstractMessage]: return OntMessage",
"OntMessageType.TRANSACTIONS: TxOntMessage, OntMessageType.NOT_FOUND: NotFoundOntMessage } def __init__(self): super(_OntMessageFactory, self).__init__() self.message_type_mapping = self._MESSAGE_TYPE_MAPPING def",
"OntMessageType.ADDRESS: AddrOntMessage, OntMessageType.PING: PingOntMessage, OntMessageType.PONG: PongOntMessage, OntMessageType.CONSENSUS: OntConsensusMessage, OntMessageType.INVENTORY: InvOntMessage, OntMessageType.GET_DATA: GetDataOntMessage, OntMessageType.GET_HEADERS:",
"from bxgateway.messages.ont.ver_ack_ont_message import VerAckOntMessage from bxgateway.messages.ont.version_ont_message import VersionOntMessage class _OntMessageFactory(AbstractMessageFactory): _MESSAGE_TYPE_MAPPING = {",
"OntMessageType from bxgateway.messages.ont.ping_ont_message import PingOntMessage from bxgateway.messages.ont.pong_ont_message import PongOntMessage from bxgateway.messages.ont.tx_ont_message import TxOntMessage",
"bxgateway.messages.ont.addr_ont_message import AddrOntMessage from bxgateway.messages.ont.block_ont_message import BlockOntMessage from bxgateway.messages.ont.consensus_ont_message import OntConsensusMessage from bxgateway.messages.ont.get_addr_ont_message",
"OntMessageType.PONG: PongOntMessage, OntMessageType.CONSENSUS: OntConsensusMessage, OntMessageType.INVENTORY: InvOntMessage, OntMessageType.GET_DATA: GetDataOntMessage, OntMessageType.GET_HEADERS: GetHeadersOntMessage, OntMessageType.GET_BLOCKS: GetBlocksOntMessage, OntMessageType.BLOCK:",
"GetBlocksOntMessage from bxgateway.messages.ont.get_data_ont_message import GetDataOntMessage from bxgateway.messages.ont.get_headers_ont_message import GetHeadersOntMessage from bxgateway.messages.ont.headers_ont_message import HeadersOntMessage",
"import PongOntMessage from bxgateway.messages.ont.tx_ont_message import TxOntMessage from bxgateway.messages.ont.ver_ack_ont_message import VerAckOntMessage from bxgateway.messages.ont.version_ont_message import",
"OntMessage from bxgateway.messages.ont.ont_message_type import OntMessageType from bxgateway.messages.ont.ping_ont_message import PingOntMessage from bxgateway.messages.ont.pong_ont_message import PongOntMessage",
"bxcommon.messages.abstract_message import AbstractMessage from bxcommon.messages.abstract_message_factory import AbstractMessageFactory from bxgateway.messages.ont.addr_ont_message import AddrOntMessage from bxgateway.messages.ont.block_ont_message",
"= { OntMessageType.VERSION: VersionOntMessage, OntMessageType.VERACK: VerAckOntMessage, OntMessageType.GET_ADDRESS: GetAddrOntMessage, OntMessageType.ADDRESS: AddrOntMessage, OntMessageType.PING: PingOntMessage, OntMessageType.PONG:",
"BlockOntMessage from bxgateway.messages.ont.consensus_ont_message import OntConsensusMessage from bxgateway.messages.ont.get_addr_ont_message import GetAddrOntMessage from bxgateway.messages.ont.get_blocks_ont_message import GetBlocksOntMessage",
"class _OntMessageFactory(AbstractMessageFactory): _MESSAGE_TYPE_MAPPING = { OntMessageType.VERSION: VersionOntMessage, OntMessageType.VERACK: VerAckOntMessage, OntMessageType.GET_ADDRESS: GetAddrOntMessage, OntMessageType.ADDRESS: AddrOntMessage,",
"OntMessageType.GET_DATA: GetDataOntMessage, OntMessageType.GET_HEADERS: GetHeadersOntMessage, OntMessageType.GET_BLOCKS: GetBlocksOntMessage, OntMessageType.BLOCK: BlockOntMessage, OntMessageType.HEADERS: HeadersOntMessage, OntMessageType.TRANSACTIONS: TxOntMessage, OntMessageType.NOT_FOUND:",
"import VersionOntMessage class _OntMessageFactory(AbstractMessageFactory): _MESSAGE_TYPE_MAPPING = { OntMessageType.VERSION: VersionOntMessage, OntMessageType.VERACK: VerAckOntMessage, OntMessageType.GET_ADDRESS: GetAddrOntMessage,",
"import GetAddrOntMessage from bxgateway.messages.ont.get_blocks_ont_message import GetBlocksOntMessage from bxgateway.messages.ont.get_data_ont_message import GetDataOntMessage from bxgateway.messages.ont.get_headers_ont_message import",
"NotFoundOntMessage } def __init__(self): super(_OntMessageFactory, self).__init__() self.message_type_mapping = self._MESSAGE_TYPE_MAPPING def get_base_message_type(self) -> Type[AbstractMessage]:",
"AbstractMessageFactory from bxgateway.messages.ont.addr_ont_message import AddrOntMessage from bxgateway.messages.ont.block_ont_message import BlockOntMessage from bxgateway.messages.ont.consensus_ont_message import OntConsensusMessage",
"bxgateway.messages.ont.ont_message_type import OntMessageType from bxgateway.messages.ont.ping_ont_message import PingOntMessage from bxgateway.messages.ont.pong_ont_message import PongOntMessage from bxgateway.messages.ont.tx_ont_message",
"import OntMessage from bxgateway.messages.ont.ont_message_type import OntMessageType from bxgateway.messages.ont.ping_ont_message import PingOntMessage from bxgateway.messages.ont.pong_ont_message import",
"OntMessageType.CONSENSUS: OntConsensusMessage, OntMessageType.INVENTORY: InvOntMessage, OntMessageType.GET_DATA: GetDataOntMessage, OntMessageType.GET_HEADERS: GetHeadersOntMessage, OntMessageType.GET_BLOCKS: GetBlocksOntMessage, OntMessageType.BLOCK: BlockOntMessage, OntMessageType.HEADERS:",
"OntMessageType.PING: PingOntMessage, OntMessageType.PONG: PongOntMessage, OntMessageType.CONSENSUS: OntConsensusMessage, OntMessageType.INVENTORY: InvOntMessage, OntMessageType.GET_DATA: GetDataOntMessage, OntMessageType.GET_HEADERS: GetHeadersOntMessage, OntMessageType.GET_BLOCKS:",
"from bxgateway.messages.ont.block_ont_message import BlockOntMessage from bxgateway.messages.ont.consensus_ont_message import OntConsensusMessage from bxgateway.messages.ont.get_addr_ont_message import GetAddrOntMessage from",
"VersionOntMessage, OntMessageType.VERACK: VerAckOntMessage, OntMessageType.GET_ADDRESS: GetAddrOntMessage, OntMessageType.ADDRESS: AddrOntMessage, OntMessageType.PING: PingOntMessage, OntMessageType.PONG: PongOntMessage, OntMessageType.CONSENSUS: OntConsensusMessage,",
"bxgateway.messages.ont.get_blocks_ont_message import GetBlocksOntMessage from bxgateway.messages.ont.get_data_ont_message import GetDataOntMessage from bxgateway.messages.ont.get_headers_ont_message import GetHeadersOntMessage from bxgateway.messages.ont.headers_ont_message",
"from bxgateway.messages.ont.get_headers_ont_message import GetHeadersOntMessage from bxgateway.messages.ont.headers_ont_message import HeadersOntMessage from bxgateway.messages.ont.inventory_ont_message import InvOntMessage from",
"VersionOntMessage class _OntMessageFactory(AbstractMessageFactory): _MESSAGE_TYPE_MAPPING = { OntMessageType.VERSION: VersionOntMessage, OntMessageType.VERACK: VerAckOntMessage, OntMessageType.GET_ADDRESS: GetAddrOntMessage, OntMessageType.ADDRESS:",
"import GetHeadersOntMessage from bxgateway.messages.ont.headers_ont_message import HeadersOntMessage from bxgateway.messages.ont.inventory_ont_message import InvOntMessage from bxgateway.messages.ont.notfound_ont_message import",
"OntMessageType.NOT_FOUND: NotFoundOntMessage } def __init__(self): super(_OntMessageFactory, self).__init__() self.message_type_mapping = self._MESSAGE_TYPE_MAPPING def get_base_message_type(self) ->",
"OntMessageType.GET_ADDRESS: GetAddrOntMessage, OntMessageType.ADDRESS: AddrOntMessage, OntMessageType.PING: PingOntMessage, OntMessageType.PONG: PongOntMessage, OntMessageType.CONSENSUS: OntConsensusMessage, OntMessageType.INVENTORY: InvOntMessage, OntMessageType.GET_DATA:",
"from bxcommon.messages.abstract_message_factory import AbstractMessageFactory from bxgateway.messages.ont.addr_ont_message import AddrOntMessage from bxgateway.messages.ont.block_ont_message import BlockOntMessage from",
"PingOntMessage from bxgateway.messages.ont.pong_ont_message import PongOntMessage from bxgateway.messages.ont.tx_ont_message import TxOntMessage from bxgateway.messages.ont.ver_ack_ont_message import VerAckOntMessage",
"bxgateway.messages.ont.version_ont_message import VersionOntMessage class _OntMessageFactory(AbstractMessageFactory): _MESSAGE_TYPE_MAPPING = { OntMessageType.VERSION: VersionOntMessage, OntMessageType.VERACK: VerAckOntMessage, OntMessageType.GET_ADDRESS:",
"import OntMessageType from bxgateway.messages.ont.ping_ont_message import PingOntMessage from bxgateway.messages.ont.pong_ont_message import PongOntMessage from bxgateway.messages.ont.tx_ont_message import",
"import Type from bxcommon.messages.abstract_message import AbstractMessage from bxcommon.messages.abstract_message_factory import AbstractMessageFactory from bxgateway.messages.ont.addr_ont_message import",
"GetDataOntMessage, OntMessageType.GET_HEADERS: GetHeadersOntMessage, OntMessageType.GET_BLOCKS: GetBlocksOntMessage, OntMessageType.BLOCK: BlockOntMessage, OntMessageType.HEADERS: HeadersOntMessage, OntMessageType.TRANSACTIONS: TxOntMessage, OntMessageType.NOT_FOUND: NotFoundOntMessage",
"from bxgateway.messages.ont.headers_ont_message import HeadersOntMessage from bxgateway.messages.ont.inventory_ont_message import InvOntMessage from bxgateway.messages.ont.notfound_ont_message import NotFoundOntMessage from",
"from bxgateway.messages.ont.get_data_ont_message import GetDataOntMessage from bxgateway.messages.ont.get_headers_ont_message import GetHeadersOntMessage from bxgateway.messages.ont.headers_ont_message import HeadersOntMessage from",
"from bxgateway.messages.ont.pong_ont_message import PongOntMessage from bxgateway.messages.ont.tx_ont_message import TxOntMessage from bxgateway.messages.ont.ver_ack_ont_message import VerAckOntMessage from",
"bxgateway.messages.ont.pong_ont_message import PongOntMessage from bxgateway.messages.ont.tx_ont_message import TxOntMessage from bxgateway.messages.ont.ver_ack_ont_message import VerAckOntMessage from bxgateway.messages.ont.version_ont_message",
"OntMessageType.GET_BLOCKS: GetBlocksOntMessage, OntMessageType.BLOCK: BlockOntMessage, OntMessageType.HEADERS: HeadersOntMessage, OntMessageType.TRANSACTIONS: TxOntMessage, OntMessageType.NOT_FOUND: NotFoundOntMessage } def __init__(self):",
"OntConsensusMessage from bxgateway.messages.ont.get_addr_ont_message import GetAddrOntMessage from bxgateway.messages.ont.get_blocks_ont_message import GetBlocksOntMessage from bxgateway.messages.ont.get_data_ont_message import GetDataOntMessage",
"from bxgateway.messages.ont.get_blocks_ont_message import GetBlocksOntMessage from bxgateway.messages.ont.get_data_ont_message import GetDataOntMessage from bxgateway.messages.ont.get_headers_ont_message import GetHeadersOntMessage from",
"import HeadersOntMessage from bxgateway.messages.ont.inventory_ont_message import InvOntMessage from bxgateway.messages.ont.notfound_ont_message import NotFoundOntMessage from bxgateway.messages.ont.ont_message import",
"from bxgateway.messages.ont.notfound_ont_message import NotFoundOntMessage from bxgateway.messages.ont.ont_message import OntMessage from bxgateway.messages.ont.ont_message_type import OntMessageType from",
"bxgateway.messages.ont.get_addr_ont_message import GetAddrOntMessage from bxgateway.messages.ont.get_blocks_ont_message import GetBlocksOntMessage from bxgateway.messages.ont.get_data_ont_message import GetDataOntMessage from bxgateway.messages.ont.get_headers_ont_message",
"AbstractMessage from bxcommon.messages.abstract_message_factory import AbstractMessageFactory from bxgateway.messages.ont.addr_ont_message import AddrOntMessage from bxgateway.messages.ont.block_ont_message import BlockOntMessage",
"OntConsensusMessage, OntMessageType.INVENTORY: InvOntMessage, OntMessageType.GET_DATA: GetDataOntMessage, OntMessageType.GET_HEADERS: GetHeadersOntMessage, OntMessageType.GET_BLOCKS: GetBlocksOntMessage, OntMessageType.BLOCK: BlockOntMessage, OntMessageType.HEADERS: HeadersOntMessage,",
"{ OntMessageType.VERSION: VersionOntMessage, OntMessageType.VERACK: VerAckOntMessage, OntMessageType.GET_ADDRESS: GetAddrOntMessage, OntMessageType.ADDRESS: AddrOntMessage, OntMessageType.PING: PingOntMessage, OntMessageType.PONG: PongOntMessage,",
"bxgateway.messages.ont.get_headers_ont_message import GetHeadersOntMessage from bxgateway.messages.ont.headers_ont_message import HeadersOntMessage from bxgateway.messages.ont.inventory_ont_message import InvOntMessage from bxgateway.messages.ont.notfound_ont_message",
"import VerAckOntMessage from bxgateway.messages.ont.version_ont_message import VersionOntMessage class _OntMessageFactory(AbstractMessageFactory): _MESSAGE_TYPE_MAPPING = { OntMessageType.VERSION: VersionOntMessage,",
"GetHeadersOntMessage, OntMessageType.GET_BLOCKS: GetBlocksOntMessage, OntMessageType.BLOCK: BlockOntMessage, OntMessageType.HEADERS: HeadersOntMessage, OntMessageType.TRANSACTIONS: TxOntMessage, OntMessageType.NOT_FOUND: NotFoundOntMessage } def",
"import PingOntMessage from bxgateway.messages.ont.pong_ont_message import PongOntMessage from bxgateway.messages.ont.tx_ont_message import TxOntMessage from bxgateway.messages.ont.ver_ack_ont_message import",
"OntMessageType.HEADERS: HeadersOntMessage, OntMessageType.TRANSACTIONS: TxOntMessage, OntMessageType.NOT_FOUND: NotFoundOntMessage } def __init__(self): super(_OntMessageFactory, self).__init__() self.message_type_mapping =",
"OntMessageType.BLOCK: BlockOntMessage, OntMessageType.HEADERS: HeadersOntMessage, OntMessageType.TRANSACTIONS: TxOntMessage, OntMessageType.NOT_FOUND: NotFoundOntMessage } def __init__(self): super(_OntMessageFactory, self).__init__()",
"GetAddrOntMessage, OntMessageType.ADDRESS: AddrOntMessage, OntMessageType.PING: PingOntMessage, OntMessageType.PONG: PongOntMessage, OntMessageType.CONSENSUS: OntConsensusMessage, OntMessageType.INVENTORY: InvOntMessage, OntMessageType.GET_DATA: GetDataOntMessage,",
"import TxOntMessage from bxgateway.messages.ont.ver_ack_ont_message import VerAckOntMessage from bxgateway.messages.ont.version_ont_message import VersionOntMessage class _OntMessageFactory(AbstractMessageFactory): _MESSAGE_TYPE_MAPPING",
"OntMessageType.VERSION: VersionOntMessage, OntMessageType.VERACK: VerAckOntMessage, OntMessageType.GET_ADDRESS: GetAddrOntMessage, OntMessageType.ADDRESS: AddrOntMessage, OntMessageType.PING: PingOntMessage, OntMessageType.PONG: PongOntMessage, OntMessageType.CONSENSUS:",
"BlockOntMessage, OntMessageType.HEADERS: HeadersOntMessage, OntMessageType.TRANSACTIONS: TxOntMessage, OntMessageType.NOT_FOUND: NotFoundOntMessage } def __init__(self): super(_OntMessageFactory, self).__init__() self.message_type_mapping",
"TxOntMessage from bxgateway.messages.ont.ver_ack_ont_message import VerAckOntMessage from bxgateway.messages.ont.version_ont_message import VersionOntMessage class _OntMessageFactory(AbstractMessageFactory): _MESSAGE_TYPE_MAPPING =",
"import BlockOntMessage from bxgateway.messages.ont.consensus_ont_message import OntConsensusMessage from bxgateway.messages.ont.get_addr_ont_message import GetAddrOntMessage from bxgateway.messages.ont.get_blocks_ont_message import",
"GetDataOntMessage from bxgateway.messages.ont.get_headers_ont_message import GetHeadersOntMessage from bxgateway.messages.ont.headers_ont_message import HeadersOntMessage from bxgateway.messages.ont.inventory_ont_message import InvOntMessage",
"import OntConsensusMessage from bxgateway.messages.ont.get_addr_ont_message import GetAddrOntMessage from bxgateway.messages.ont.get_blocks_ont_message import GetBlocksOntMessage from bxgateway.messages.ont.get_data_ont_message import",
"PongOntMessage, OntMessageType.CONSENSUS: OntConsensusMessage, OntMessageType.INVENTORY: InvOntMessage, OntMessageType.GET_DATA: GetDataOntMessage, OntMessageType.GET_HEADERS: GetHeadersOntMessage, OntMessageType.GET_BLOCKS: GetBlocksOntMessage, OntMessageType.BLOCK: BlockOntMessage,",
"bxgateway.messages.ont.block_ont_message import BlockOntMessage from bxgateway.messages.ont.consensus_ont_message import OntConsensusMessage from bxgateway.messages.ont.get_addr_ont_message import GetAddrOntMessage from bxgateway.messages.ont.get_blocks_ont_message",
"__init__(self): super(_OntMessageFactory, self).__init__() self.message_type_mapping = self._MESSAGE_TYPE_MAPPING def get_base_message_type(self) -> Type[AbstractMessage]: return OntMessage ont_message_factory",
"bxgateway.messages.ont.consensus_ont_message import OntConsensusMessage from bxgateway.messages.ont.get_addr_ont_message import GetAddrOntMessage from bxgateway.messages.ont.get_blocks_ont_message import GetBlocksOntMessage from bxgateway.messages.ont.get_data_ont_message",
"bxgateway.messages.ont.ont_message import OntMessage from bxgateway.messages.ont.ont_message_type import OntMessageType from bxgateway.messages.ont.ping_ont_message import PingOntMessage from bxgateway.messages.ont.pong_ont_message",
"InvOntMessage from bxgateway.messages.ont.notfound_ont_message import NotFoundOntMessage from bxgateway.messages.ont.ont_message import OntMessage from bxgateway.messages.ont.ont_message_type import OntMessageType",
"from typing import Type from bxcommon.messages.abstract_message import AbstractMessage from bxcommon.messages.abstract_message_factory import AbstractMessageFactory from",
"from bxgateway.messages.ont.ping_ont_message import PingOntMessage from bxgateway.messages.ont.pong_ont_message import PongOntMessage from bxgateway.messages.ont.tx_ont_message import TxOntMessage from",
"import AddrOntMessage from bxgateway.messages.ont.block_ont_message import BlockOntMessage from bxgateway.messages.ont.consensus_ont_message import OntConsensusMessage from bxgateway.messages.ont.get_addr_ont_message import",
"GetAddrOntMessage from bxgateway.messages.ont.get_blocks_ont_message import GetBlocksOntMessage from bxgateway.messages.ont.get_data_ont_message import GetDataOntMessage from bxgateway.messages.ont.get_headers_ont_message import GetHeadersOntMessage",
"import InvOntMessage from bxgateway.messages.ont.notfound_ont_message import NotFoundOntMessage from bxgateway.messages.ont.ont_message import OntMessage from bxgateway.messages.ont.ont_message_type import",
"super(_OntMessageFactory, self).__init__() self.message_type_mapping = self._MESSAGE_TYPE_MAPPING def get_base_message_type(self) -> Type[AbstractMessage]: return OntMessage ont_message_factory =",
"bxgateway.messages.ont.get_data_ont_message import GetDataOntMessage from bxgateway.messages.ont.get_headers_ont_message import GetHeadersOntMessage from bxgateway.messages.ont.headers_ont_message import HeadersOntMessage from bxgateway.messages.ont.inventory_ont_message",
"GetHeadersOntMessage from bxgateway.messages.ont.headers_ont_message import HeadersOntMessage from bxgateway.messages.ont.inventory_ont_message import InvOntMessage from bxgateway.messages.ont.notfound_ont_message import NotFoundOntMessage",
"HeadersOntMessage from bxgateway.messages.ont.inventory_ont_message import InvOntMessage from bxgateway.messages.ont.notfound_ont_message import NotFoundOntMessage from bxgateway.messages.ont.ont_message import OntMessage",
"HeadersOntMessage, OntMessageType.TRANSACTIONS: TxOntMessage, OntMessageType.NOT_FOUND: NotFoundOntMessage } def __init__(self): super(_OntMessageFactory, self).__init__() self.message_type_mapping = self._MESSAGE_TYPE_MAPPING",
"typing import Type from bxcommon.messages.abstract_message import AbstractMessage from bxcommon.messages.abstract_message_factory import AbstractMessageFactory from bxgateway.messages.ont.addr_ont_message",
"from bxgateway.messages.ont.addr_ont_message import AddrOntMessage from bxgateway.messages.ont.block_ont_message import BlockOntMessage from bxgateway.messages.ont.consensus_ont_message import OntConsensusMessage from",
"VerAckOntMessage, OntMessageType.GET_ADDRESS: GetAddrOntMessage, OntMessageType.ADDRESS: AddrOntMessage, OntMessageType.PING: PingOntMessage, OntMessageType.PONG: PongOntMessage, OntMessageType.CONSENSUS: OntConsensusMessage, OntMessageType.INVENTORY: InvOntMessage,",
"bxgateway.messages.ont.notfound_ont_message import NotFoundOntMessage from bxgateway.messages.ont.ont_message import OntMessage from bxgateway.messages.ont.ont_message_type import OntMessageType from bxgateway.messages.ont.ping_ont_message",
"bxgateway.messages.ont.headers_ont_message import HeadersOntMessage from bxgateway.messages.ont.inventory_ont_message import InvOntMessage from bxgateway.messages.ont.notfound_ont_message import NotFoundOntMessage from bxgateway.messages.ont.ont_message",
"from bxgateway.messages.ont.version_ont_message import VersionOntMessage class _OntMessageFactory(AbstractMessageFactory): _MESSAGE_TYPE_MAPPING = { OntMessageType.VERSION: VersionOntMessage, OntMessageType.VERACK: VerAckOntMessage,",
"Type from bxcommon.messages.abstract_message import AbstractMessage from bxcommon.messages.abstract_message_factory import AbstractMessageFactory from bxgateway.messages.ont.addr_ont_message import AddrOntMessage",
"TxOntMessage, OntMessageType.NOT_FOUND: NotFoundOntMessage } def __init__(self): super(_OntMessageFactory, self).__init__() self.message_type_mapping = self._MESSAGE_TYPE_MAPPING def get_base_message_type(self)",
"_MESSAGE_TYPE_MAPPING = { OntMessageType.VERSION: VersionOntMessage, OntMessageType.VERACK: VerAckOntMessage, OntMessageType.GET_ADDRESS: GetAddrOntMessage, OntMessageType.ADDRESS: AddrOntMessage, OntMessageType.PING: PingOntMessage,",
"AddrOntMessage from bxgateway.messages.ont.block_ont_message import BlockOntMessage from bxgateway.messages.ont.consensus_ont_message import OntConsensusMessage from bxgateway.messages.ont.get_addr_ont_message import GetAddrOntMessage",
"import AbstractMessageFactory from bxgateway.messages.ont.addr_ont_message import AddrOntMessage from bxgateway.messages.ont.block_ont_message import BlockOntMessage from bxgateway.messages.ont.consensus_ont_message import",
"NotFoundOntMessage from bxgateway.messages.ont.ont_message import OntMessage from bxgateway.messages.ont.ont_message_type import OntMessageType from bxgateway.messages.ont.ping_ont_message import PingOntMessage",
"bxgateway.messages.ont.ver_ack_ont_message import VerAckOntMessage from bxgateway.messages.ont.version_ont_message import VersionOntMessage class _OntMessageFactory(AbstractMessageFactory): _MESSAGE_TYPE_MAPPING = { OntMessageType.VERSION:"
] |
[
"self.model.maxpool(x) x = self.model.layer1(x) if self.out_channel >= 128: x = self.model.layer2(x) if self.out_channel",
"pretrained = self.args[1] if len(self.args) > 1 else True return self._get_module( Resnet18(self.in_channel, self.out_channel,",
"== 64: del self.model.layer4 del self.model.layer3 del self.model.layer2 else: raise Exception(\"out_channel: 512, 256,",
"self.model.relu(x) x = self.model.maxpool(x) x = self.model.layer1(x) if self.out_channel >= 128: x =",
"self.model.layer3(x) if self.out_channel >= 512: x = self.model.layer4(x) return x class Resnet18Generator(GeneratorAbstract): \"\"\"",
"return self.args[0] def __call__(self, repeat: int = 1): # TODO: Apply repeat pretrained",
"x = self.model.conv1(x) x = self.model.bn1(x) x = self.model.relu(x) x = self.model.maxpool(x) x",
"int: \"\"\"Get out channel size.\"\"\" return self.args[0] def __call__(self, repeat: int = 1):",
"\"\"\" Args: in_channel: input channels. out_channel: output channels. \"\"\" super().__init__() self.out_channel = out_channel",
"= self.model.layer3(x) if self.out_channel >= 512: x = self.model.layer4(x) return x class Resnet18Generator(GeneratorAbstract):",
"import models class Resnet18(nn.Module): def __init__(self, in_channel: int, out_channel: int, pretrained: bool): \"\"\"",
"self.out_channel == 512: pass elif self.out_channel == 256: del self.model.layer4 elif self.out_channel ==",
"= 1): # TODO: Apply repeat pretrained = self.args[1] if len(self.args) > 1",
"out_channel(self) -> int: \"\"\"Get out channel size.\"\"\" return self.args[0] def __call__(self, repeat: int",
"self.model.layer3 elif self.out_channel == 64: del self.model.layer4 del self.model.layer3 del self.model.layer2 else: raise",
"128: x = self.model.layer2(x) if self.out_channel >= 256: x = self.model.layer3(x) if self.out_channel",
"= self.model.layer2(x) if self.out_channel >= 256: x = self.model.layer3(x) if self.out_channel >= 512:",
"or 64\") def forward(self,x): x = self.model.conv1(x) x = self.model.bn1(x) x = self.model.relu(x)",
"= out_channel self.model = models.resnet18(pretrained=pretrained) del self.model.fc del self.model.avgpool if self.out_channel == 512:",
"256: del self.model.layer4 elif self.out_channel == 128: del self.model.layer4 del self.model.layer3 elif self.out_channel",
"self.model.layer4(x) return x class Resnet18Generator(GeneratorAbstract): \"\"\" Resnet18 (torchvision.models) module generator for parsing.\"\"\" def",
"128 or 64\") def forward(self,x): x = self.model.conv1(x) x = self.model.bn1(x) x =",
"**kwargs): \"\"\"Initailize.\"\"\" super().__init__(*args, **kwargs) @property def out_channel(self) -> int: \"\"\"Get out channel size.\"\"\"",
"\"\"\" super().__init__() self.out_channel = out_channel self.model = models.resnet18(pretrained=pretrained) del self.model.fc del self.model.avgpool if",
"self.args[0] def __call__(self, repeat: int = 1): # TODO: Apply repeat pretrained =",
"__init__(self, in_channel: int, out_channel: int, pretrained: bool): \"\"\" Args: in_channel: input channels. out_channel:",
"out_channel: int, pretrained: bool): \"\"\" Args: in_channel: input channels. out_channel: output channels. \"\"\"",
"64\") def forward(self,x): x = self.model.conv1(x) x = self.model.bn1(x) x = self.model.relu(x) x",
"x = self.model.layer4(x) return x class Resnet18Generator(GeneratorAbstract): \"\"\" Resnet18 (torchvision.models) module generator for",
"del self.model.layer4 elif self.out_channel == 128: del self.model.layer4 del self.model.layer3 elif self.out_channel ==",
"512: x = self.model.layer4(x) return x class Resnet18Generator(GeneratorAbstract): \"\"\" Resnet18 (torchvision.models) module generator",
"\"\"\"Get out channel size.\"\"\" return self.args[0] def __call__(self, repeat: int = 1): #",
"x = self.model.bn1(x) x = self.model.relu(x) x = self.model.maxpool(x) x = self.model.layer1(x) if",
"self.out_channel == 128: del self.model.layer4 del self.model.layer3 elif self.out_channel == 64: del self.model.layer4",
"self.model.layer1(x) if self.out_channel >= 128: x = self.model.layer2(x) if self.out_channel >= 256: x",
"raise Exception(\"out_channel: 512, 256, 128 or 64\") def forward(self,x): x = self.model.conv1(x) x",
"elif self.out_channel == 128: del self.model.layer4 del self.model.layer3 elif self.out_channel == 64: del",
"self.model.layer2(x) if self.out_channel >= 256: x = self.model.layer3(x) if self.out_channel >= 512: x",
"super().__init__() self.out_channel = out_channel self.model = models.resnet18(pretrained=pretrained) del self.model.fc del self.model.avgpool if self.out_channel",
"def __call__(self, repeat: int = 1): # TODO: Apply repeat pretrained = self.args[1]",
"del self.model.layer4 del self.model.layer3 elif self.out_channel == 64: del self.model.layer4 del self.model.layer3 del",
"import torch from torch import nn as nn from src.modules.base_generator import GeneratorAbstract from",
"== 512: pass elif self.out_channel == 256: del self.model.layer4 elif self.out_channel == 128:",
"= self.model.bn1(x) x = self.model.relu(x) x = self.model.maxpool(x) x = self.model.layer1(x) if self.out_channel",
"from src.modules.base_generator import GeneratorAbstract from torchvision import models class Resnet18(nn.Module): def __init__(self, in_channel:",
"256: x = self.model.layer3(x) if self.out_channel >= 512: x = self.model.layer4(x) return x",
"-> int: \"\"\"Get out channel size.\"\"\" return self.args[0] def __call__(self, repeat: int =",
"512: pass elif self.out_channel == 256: del self.model.layer4 elif self.out_channel == 128: del",
"self.model.conv1(x) x = self.model.bn1(x) x = self.model.relu(x) x = self.model.maxpool(x) x = self.model.layer1(x)",
"self.model.layer4 del self.model.layer3 del self.model.layer2 else: raise Exception(\"out_channel: 512, 256, 128 or 64\")",
"= self.model.layer1(x) if self.out_channel >= 128: x = self.model.layer2(x) if self.out_channel >= 256:",
"del self.model.layer4 del self.model.layer3 del self.model.layer2 else: raise Exception(\"out_channel: 512, 256, 128 or",
"nn from src.modules.base_generator import GeneratorAbstract from torchvision import models class Resnet18(nn.Module): def __init__(self,",
"self.model.bn1(x) x = self.model.relu(x) x = self.model.maxpool(x) x = self.model.layer1(x) if self.out_channel >=",
"self.out_channel >= 512: x = self.model.layer4(x) return x class Resnet18Generator(GeneratorAbstract): \"\"\" Resnet18 (torchvision.models)",
"self.model.layer3 del self.model.layer2 else: raise Exception(\"out_channel: 512, 256, 128 or 64\") def forward(self,x):",
"self.out_channel == 256: del self.model.layer4 elif self.out_channel == 128: del self.model.layer4 del self.model.layer3",
"if self.out_channel >= 512: x = self.model.layer4(x) return x class Resnet18Generator(GeneratorAbstract): \"\"\" Resnet18",
"<gh_stars>1-10 import torch from torch import nn as nn from src.modules.base_generator import GeneratorAbstract",
"generator for parsing.\"\"\" def __init__(self, *args, **kwargs): \"\"\"Initailize.\"\"\" super().__init__(*args, **kwargs) @property def out_channel(self)",
"elif self.out_channel == 64: del self.model.layer4 del self.model.layer3 del self.model.layer2 else: raise Exception(\"out_channel:",
"self.args[1] if len(self.args) > 1 else True return self._get_module( Resnet18(self.in_channel, self.out_channel, pretrained=pretrained) )",
"Args: in_channel: input channels. out_channel: output channels. \"\"\" super().__init__() self.out_channel = out_channel self.model",
"super().__init__(*args, **kwargs) @property def out_channel(self) -> int: \"\"\"Get out channel size.\"\"\" return self.args[0]",
"nn as nn from src.modules.base_generator import GeneratorAbstract from torchvision import models class Resnet18(nn.Module):",
"def out_channel(self) -> int: \"\"\"Get out channel size.\"\"\" return self.args[0] def __call__(self, repeat:",
"elif self.out_channel == 256: del self.model.layer4 elif self.out_channel == 128: del self.model.layer4 del",
"256, 128 or 64\") def forward(self,x): x = self.model.conv1(x) x = self.model.bn1(x) x",
"in_channel: int, out_channel: int, pretrained: bool): \"\"\" Args: in_channel: input channels. out_channel: output",
"Resnet18Generator(GeneratorAbstract): \"\"\" Resnet18 (torchvision.models) module generator for parsing.\"\"\" def __init__(self, *args, **kwargs): \"\"\"Initailize.\"\"\"",
"from torchvision import models class Resnet18(nn.Module): def __init__(self, in_channel: int, out_channel: int, pretrained:",
"out_channel: output channels. \"\"\" super().__init__() self.out_channel = out_channel self.model = models.resnet18(pretrained=pretrained) del self.model.fc",
"__init__(self, *args, **kwargs): \"\"\"Initailize.\"\"\" super().__init__(*args, **kwargs) @property def out_channel(self) -> int: \"\"\"Get out",
"GeneratorAbstract from torchvision import models class Resnet18(nn.Module): def __init__(self, in_channel: int, out_channel: int,",
"int, out_channel: int, pretrained: bool): \"\"\" Args: in_channel: input channels. out_channel: output channels.",
"int, pretrained: bool): \"\"\" Args: in_channel: input channels. out_channel: output channels. \"\"\" super().__init__()",
"\"\"\" Resnet18 (torchvision.models) module generator for parsing.\"\"\" def __init__(self, *args, **kwargs): \"\"\"Initailize.\"\"\" super().__init__(*args,",
"from torch import nn as nn from src.modules.base_generator import GeneratorAbstract from torchvision import",
"def __init__(self, in_channel: int, out_channel: int, pretrained: bool): \"\"\" Args: in_channel: input channels.",
"for parsing.\"\"\" def __init__(self, *args, **kwargs): \"\"\"Initailize.\"\"\" super().__init__(*args, **kwargs) @property def out_channel(self) ->",
"del self.model.layer3 del self.model.layer2 else: raise Exception(\"out_channel: 512, 256, 128 or 64\") def",
"if self.out_channel == 512: pass elif self.out_channel == 256: del self.model.layer4 elif self.out_channel",
"torch from torch import nn as nn from src.modules.base_generator import GeneratorAbstract from torchvision",
"parsing.\"\"\" def __init__(self, *args, **kwargs): \"\"\"Initailize.\"\"\" super().__init__(*args, **kwargs) @property def out_channel(self) -> int:",
"512, 256, 128 or 64\") def forward(self,x): x = self.model.conv1(x) x = self.model.bn1(x)",
"return x class Resnet18Generator(GeneratorAbstract): \"\"\" Resnet18 (torchvision.models) module generator for parsing.\"\"\" def __init__(self,",
"= self.args[1] if len(self.args) > 1 else True return self._get_module( Resnet18(self.in_channel, self.out_channel, pretrained=pretrained)",
"module generator for parsing.\"\"\" def __init__(self, *args, **kwargs): \"\"\"Initailize.\"\"\" super().__init__(*args, **kwargs) @property def",
"self.model.layer2 else: raise Exception(\"out_channel: 512, 256, 128 or 64\") def forward(self,x): x =",
"pass elif self.out_channel == 256: del self.model.layer4 elif self.out_channel == 128: del self.model.layer4",
"x = self.model.maxpool(x) x = self.model.layer1(x) if self.out_channel >= 128: x = self.model.layer2(x)",
"else: raise Exception(\"out_channel: 512, 256, 128 or 64\") def forward(self,x): x = self.model.conv1(x)",
"output channels. \"\"\" super().__init__() self.out_channel = out_channel self.model = models.resnet18(pretrained=pretrained) del self.model.fc del",
"import GeneratorAbstract from torchvision import models class Resnet18(nn.Module): def __init__(self, in_channel: int, out_channel:",
"1): # TODO: Apply repeat pretrained = self.args[1] if len(self.args) > 1 else",
">= 256: x = self.model.layer3(x) if self.out_channel >= 512: x = self.model.layer4(x) return",
">= 128: x = self.model.layer2(x) if self.out_channel >= 256: x = self.model.layer3(x) if",
"Exception(\"out_channel: 512, 256, 128 or 64\") def forward(self,x): x = self.model.conv1(x) x =",
"import nn as nn from src.modules.base_generator import GeneratorAbstract from torchvision import models class",
"if self.out_channel >= 256: x = self.model.layer3(x) if self.out_channel >= 512: x =",
"bool): \"\"\" Args: in_channel: input channels. out_channel: output channels. \"\"\" super().__init__() self.out_channel =",
"128: del self.model.layer4 del self.model.layer3 elif self.out_channel == 64: del self.model.layer4 del self.model.layer3",
">= 512: x = self.model.layer4(x) return x class Resnet18Generator(GeneratorAbstract): \"\"\" Resnet18 (torchvision.models) module",
"self.out_channel >= 256: x = self.model.layer3(x) if self.out_channel >= 512: x = self.model.layer4(x)",
"(torchvision.models) module generator for parsing.\"\"\" def __init__(self, *args, **kwargs): \"\"\"Initailize.\"\"\" super().__init__(*args, **kwargs) @property",
"repeat: int = 1): # TODO: Apply repeat pretrained = self.args[1] if len(self.args)",
"# TODO: Apply repeat pretrained = self.args[1] if len(self.args) > 1 else True",
"del self.model.layer2 else: raise Exception(\"out_channel: 512, 256, 128 or 64\") def forward(self,x): x",
"= self.model.maxpool(x) x = self.model.layer1(x) if self.out_channel >= 128: x = self.model.layer2(x) if",
"out_channel self.model = models.resnet18(pretrained=pretrained) del self.model.fc del self.model.avgpool if self.out_channel == 512: pass",
"Apply repeat pretrained = self.args[1] if len(self.args) > 1 else True return self._get_module(",
"torchvision import models class Resnet18(nn.Module): def __init__(self, in_channel: int, out_channel: int, pretrained: bool):",
"as nn from src.modules.base_generator import GeneratorAbstract from torchvision import models class Resnet18(nn.Module): def",
"channel size.\"\"\" return self.args[0] def __call__(self, repeat: int = 1): # TODO: Apply",
"x class Resnet18Generator(GeneratorAbstract): \"\"\" Resnet18 (torchvision.models) module generator for parsing.\"\"\" def __init__(self, *args,",
"x = self.model.layer2(x) if self.out_channel >= 256: x = self.model.layer3(x) if self.out_channel >=",
"64: del self.model.layer4 del self.model.layer3 del self.model.layer2 else: raise Exception(\"out_channel: 512, 256, 128",
"__call__(self, repeat: int = 1): # TODO: Apply repeat pretrained = self.args[1] if",
"x = self.model.layer3(x) if self.out_channel >= 512: x = self.model.layer4(x) return x class",
"self.model.layer4 elif self.out_channel == 128: del self.model.layer4 del self.model.layer3 elif self.out_channel == 64:",
"repeat pretrained = self.args[1] if len(self.args) > 1 else True return self._get_module( Resnet18(self.in_channel,",
"in_channel: input channels. out_channel: output channels. \"\"\" super().__init__() self.out_channel = out_channel self.model =",
"x = self.model.relu(x) x = self.model.maxpool(x) x = self.model.layer1(x) if self.out_channel >= 128:",
"Resnet18 (torchvision.models) module generator for parsing.\"\"\" def __init__(self, *args, **kwargs): \"\"\"Initailize.\"\"\" super().__init__(*args, **kwargs)",
"forward(self,x): x = self.model.conv1(x) x = self.model.bn1(x) x = self.model.relu(x) x = self.model.maxpool(x)",
"self.out_channel = out_channel self.model = models.resnet18(pretrained=pretrained) del self.model.fc del self.model.avgpool if self.out_channel ==",
"pretrained: bool): \"\"\" Args: in_channel: input channels. out_channel: output channels. \"\"\" super().__init__() self.out_channel",
"channels. \"\"\" super().__init__() self.out_channel = out_channel self.model = models.resnet18(pretrained=pretrained) del self.model.fc del self.model.avgpool",
"del self.model.avgpool if self.out_channel == 512: pass elif self.out_channel == 256: del self.model.layer4",
"= self.model.conv1(x) x = self.model.bn1(x) x = self.model.relu(x) x = self.model.maxpool(x) x =",
"models.resnet18(pretrained=pretrained) del self.model.fc del self.model.avgpool if self.out_channel == 512: pass elif self.out_channel ==",
"self.model.fc del self.model.avgpool if self.out_channel == 512: pass elif self.out_channel == 256: del",
"models class Resnet18(nn.Module): def __init__(self, in_channel: int, out_channel: int, pretrained: bool): \"\"\" Args:",
"def __init__(self, *args, **kwargs): \"\"\"Initailize.\"\"\" super().__init__(*args, **kwargs) @property def out_channel(self) -> int: \"\"\"Get",
"torch import nn as nn from src.modules.base_generator import GeneratorAbstract from torchvision import models",
"size.\"\"\" return self.args[0] def __call__(self, repeat: int = 1): # TODO: Apply repeat",
"self.out_channel == 64: del self.model.layer4 del self.model.layer3 del self.model.layer2 else: raise Exception(\"out_channel: 512,",
"del self.model.layer3 elif self.out_channel == 64: del self.model.layer4 del self.model.layer3 del self.model.layer2 else:",
"= self.model.layer4(x) return x class Resnet18Generator(GeneratorAbstract): \"\"\" Resnet18 (torchvision.models) module generator for parsing.\"\"\"",
"self.model.avgpool if self.out_channel == 512: pass elif self.out_channel == 256: del self.model.layer4 elif",
"self.model.layer4 del self.model.layer3 elif self.out_channel == 64: del self.model.layer4 del self.model.layer3 del self.model.layer2",
"self.out_channel >= 128: x = self.model.layer2(x) if self.out_channel >= 256: x = self.model.layer3(x)",
"if self.out_channel >= 128: x = self.model.layer2(x) if self.out_channel >= 256: x =",
"= self.model.relu(x) x = self.model.maxpool(x) x = self.model.layer1(x) if self.out_channel >= 128: x",
"== 128: del self.model.layer4 del self.model.layer3 elif self.out_channel == 64: del self.model.layer4 del",
"self.model = models.resnet18(pretrained=pretrained) del self.model.fc del self.model.avgpool if self.out_channel == 512: pass elif",
"channels. out_channel: output channels. \"\"\" super().__init__() self.out_channel = out_channel self.model = models.resnet18(pretrained=pretrained) del",
"== 256: del self.model.layer4 elif self.out_channel == 128: del self.model.layer4 del self.model.layer3 elif",
"Resnet18(nn.Module): def __init__(self, in_channel: int, out_channel: int, pretrained: bool): \"\"\" Args: in_channel: input",
"= models.resnet18(pretrained=pretrained) del self.model.fc del self.model.avgpool if self.out_channel == 512: pass elif self.out_channel",
"def forward(self,x): x = self.model.conv1(x) x = self.model.bn1(x) x = self.model.relu(x) x =",
"*args, **kwargs): \"\"\"Initailize.\"\"\" super().__init__(*args, **kwargs) @property def out_channel(self) -> int: \"\"\"Get out channel",
"\"\"\"Initailize.\"\"\" super().__init__(*args, **kwargs) @property def out_channel(self) -> int: \"\"\"Get out channel size.\"\"\" return",
"TODO: Apply repeat pretrained = self.args[1] if len(self.args) > 1 else True return",
"src.modules.base_generator import GeneratorAbstract from torchvision import models class Resnet18(nn.Module): def __init__(self, in_channel: int,",
"del self.model.fc del self.model.avgpool if self.out_channel == 512: pass elif self.out_channel == 256:",
"x = self.model.layer1(x) if self.out_channel >= 128: x = self.model.layer2(x) if self.out_channel >=",
"int = 1): # TODO: Apply repeat pretrained = self.args[1] if len(self.args) >",
"class Resnet18Generator(GeneratorAbstract): \"\"\" Resnet18 (torchvision.models) module generator for parsing.\"\"\" def __init__(self, *args, **kwargs):",
"class Resnet18(nn.Module): def __init__(self, in_channel: int, out_channel: int, pretrained: bool): \"\"\" Args: in_channel:",
"input channels. out_channel: output channels. \"\"\" super().__init__() self.out_channel = out_channel self.model = models.resnet18(pretrained=pretrained)",
"@property def out_channel(self) -> int: \"\"\"Get out channel size.\"\"\" return self.args[0] def __call__(self,",
"**kwargs) @property def out_channel(self) -> int: \"\"\"Get out channel size.\"\"\" return self.args[0] def",
"out channel size.\"\"\" return self.args[0] def __call__(self, repeat: int = 1): # TODO:"
] |
[
"typing import TYPE_CHECKING if TYPE_CHECKING: # pragma: no cover from adorn.unit.complex import _UnitT",
"KIND, either express or implied. # See the License for the specific language",
"Unless required by applicable law or agreed to in writing, software # distributed",
"to perform some action. Examples of constructs that utilize the information contained in",
"getattr(self.cls, \"__args__\", None) def __eq__(self, o: object) -> bool: # noqa: C901 #",
"specific language governing permissions and # limitations under the License. \"\"\"State from the",
"of constructs that utilize the information contained in ``Parameter`` include: - :class:`~adorn.unit.parameter_value.DependentTypeCheck` -",
":class:`~adorn.unit.parameter_value.DependentUnion` Attributes: cls (_UnitT): the type of the underlying parameter parent (_UnitT): The",
"in ``Parameter`` include: - :class:`~adorn.unit.parameter_value.DependentTypeCheck` - :class:`~adorn.unit.parameter_value.DependentFromObj` - :class:`~adorn.unit.parameter_value.DependentUnion` Attributes: cls (_UnitT): the",
"we force the check # prevent circular import from adorn.unit.parameter_value import Dependent if",
"str(self.cls) == str(o.cls) else: eq_cls = self.cls == o.cls return eq_cls def __str__(self):",
"outside of its ``obj``, to be utilized, to perform some action. Examples of",
"this file except in compliance with the License. # You may obtain a",
"given :class:`~adorn.data.constructor.Constructor` ``Parameter`` allows for information outside of its ``obj``, to be utilized,",
"not normal_args: return False eq_cls = False if issubclass(getattr(self.cls, \"__origin__\", int), Dependent): eq_cls",
"<NAME> # # Licensed under the Apache License, Version 2.0 (the \"License\"); #",
"ANY KIND, either express or implied. # See the License for the specific",
"parameter in the constructor \"\"\" def __init__( self, cls: \"_UnitT\", parent: \"_UnitT\", local_state:",
"parameter_name: str, ): self.cls = cls self.parent = parent self.local_state = local_state self.parameter_name",
":class:`~adorn.unit.parameter_value.DependentFromObj` - :class:`~adorn.unit.parameter_value.DependentUnion` Attributes: cls (_UnitT): the type of the underlying parameter parent",
"WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See",
"local_state self.parameter_name = parameter_name self.origin = getattr(self.cls, \"__origin__\", None) self.args = getattr(self.cls, \"__args__\",",
"): self.cls = cls self.parent = parent self.local_state = local_state self.parameter_name = parameter_name",
"\"_UnitT\", local_state: Dict[str, Any], parameter_name: str, ): self.cls = cls self.parent = parent",
"IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or",
"local_state: Dict[str, Any], parameter_name: str, ): self.cls = cls self.parent = parent self.local_state",
"of object whose constructor requires an object of type ``cls`` local_state (Dict[str, Any]):",
"OF ANY KIND, either express or implied. # See the License for the",
"\"__args__\", None) def __eq__(self, o: object) -> bool: # noqa: C901 # Literal.__eq__",
"the constructor \"\"\" def __init__( self, cls: \"_UnitT\", parent: \"_UnitT\", local_state: Dict[str, Any],",
"# limitations under the License. \"\"\"State from the constructor for a parameter from",
"from typing import TYPE_CHECKING if TYPE_CHECKING: # pragma: no cover from adorn.unit.complex import",
") if not normal_args: return False eq_cls = False if issubclass(getattr(self.cls, \"__origin__\", int),",
"bool: # noqa: C901 # Literal.__eq__ doesn't support dict's so # we force",
"adorn.unit.complex import _UnitT class Parameter: \"\"\"State for a parameter from a given :class:`~adorn.data.constructor.Constructor`",
"``Parameter`` include: - :class:`~adorn.unit.parameter_value.DependentTypeCheck` - :class:`~adorn.unit.parameter_value.DependentFromObj` - :class:`~adorn.unit.parameter_value.DependentUnion` Attributes: cls (_UnitT): the type",
"adorn.unit.parameter_value import Dependent if not isinstance(o, Parameter): return False normal_args = all( i",
"(_UnitT): the type of the underlying parameter parent (_UnitT): The type of object",
"_UnitT class Parameter: \"\"\"State for a parameter from a given :class:`~adorn.data.constructor.Constructor` ``Parameter`` allows",
"object of type ``cls`` local_state (Dict[str, Any]): Information about other arguments provided to",
"normal_args: return False eq_cls = False if issubclass(getattr(self.cls, \"__origin__\", int), Dependent): eq_cls =",
"return False eq_cls = False if issubclass(getattr(self.cls, \"__origin__\", int), Dependent): eq_cls = str(self.cls)",
"provided to the given constructor parameter_name (str): name of the parameter in the",
"parameter parent (_UnitT): The type of object whose constructor requires an object of",
"not isinstance(o, Parameter): return False normal_args = all( i for i in [",
"[ self.parent == o.parent, self.local_state == o.local_state, self.parameter_name == o.parameter_name, ] ) if",
"] ) if not normal_args: return False eq_cls = False if issubclass(getattr(self.cls, \"__origin__\",",
"= getattr(self.cls, \"__args__\", None) def __eq__(self, o: object) -> bool: # noqa: C901",
"limitations under the License. \"\"\"State from the constructor for a parameter from the",
"software # distributed under the License is distributed on an \"AS IS\" BASIS,",
"Any], parameter_name: str, ): self.cls = cls self.parent = parent self.local_state = local_state",
"False normal_args = all( i for i in [ self.parent == o.parent, self.local_state",
"Dict[str, Any], parameter_name: str, ): self.cls = cls self.parent = parent self.local_state =",
"# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to",
"if not normal_args: return False eq_cls = False if issubclass(getattr(self.cls, \"__origin__\", int), Dependent):",
"under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES",
"from adorn.unit.parameter_value import Dependent if not isinstance(o, Parameter): return False normal_args = all(",
"the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law",
":class:`~adorn.data.constructor.Constructor` ``Parameter`` allows for information outside of its ``obj``, to be utilized, to",
"\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express",
"from adorn.unit.complex import _UnitT class Parameter: \"\"\"State for a parameter from a given",
"utilized, to perform some action. Examples of constructs that utilize the information contained",
"o.parent, self.local_state == o.local_state, self.parameter_name == o.parameter_name, ] ) if not normal_args: return",
"if TYPE_CHECKING: # pragma: no cover from adorn.unit.complex import _UnitT class Parameter: \"\"\"State",
"type of object whose constructor requires an object of type ``cls`` local_state (Dict[str,",
"required by applicable law or agreed to in writing, software # distributed under",
"TYPE_CHECKING: # pragma: no cover from adorn.unit.complex import _UnitT class Parameter: \"\"\"State for",
"cover from adorn.unit.complex import _UnitT class Parameter: \"\"\"State for a parameter from a",
"applicable law or agreed to in writing, software # distributed under the License",
"Any]): Information about other arguments provided to the given constructor parameter_name (str): name",
"and # limitations under the License. \"\"\"State from the constructor for a parameter",
"governing permissions and # limitations under the License. \"\"\"State from the constructor for",
"the parameter in the constructor \"\"\" def __init__( self, cls: \"_UnitT\", parent: \"_UnitT\",",
"= cls self.parent = parent self.local_state = local_state self.parameter_name = parameter_name self.origin =",
"== o.local_state, self.parameter_name == o.parameter_name, ] ) if not normal_args: return False eq_cls",
"permissions and # limitations under the License. \"\"\"State from the constructor for a",
"or agreed to in writing, software # distributed under the License is distributed",
"underlying parameter parent (_UnitT): The type of object whose constructor requires an object",
"str(o.cls) else: eq_cls = self.cls == o.cls return eq_cls def __str__(self): return str(self.cls)",
"CONDITIONS OF ANY KIND, either express or implied. # See the License for",
"prevent circular import from adorn.unit.parameter_value import Dependent if not isinstance(o, Parameter): return False",
"to be utilized, to perform some action. Examples of constructs that utilize the",
"of the parameter in the constructor \"\"\" def __init__( self, cls: \"_UnitT\", parent:",
"\"\"\" def __init__( self, cls: \"_UnitT\", parent: \"_UnitT\", local_state: Dict[str, Any], parameter_name: str,",
"under the Apache License, Version 2.0 (the \"License\"); # you may not use",
"writing, software # distributed under the License is distributed on an \"AS IS\"",
"name of the parameter in the constructor \"\"\" def __init__( self, cls: \"_UnitT\",",
"cls self.parent = parent self.local_state = local_state self.parameter_name = parameter_name self.origin = getattr(self.cls,",
"You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #",
"License. # You may obtain a copy of the License at # #",
"cls: \"_UnitT\", parent: \"_UnitT\", local_state: Dict[str, Any], parameter_name: str, ): self.cls = cls",
"compliance with the License. # You may obtain a copy of the License",
"whose constructor requires an object of type ``cls`` local_state (Dict[str, Any]): Information about",
"None) def __eq__(self, o: object) -> bool: # noqa: C901 # Literal.__eq__ doesn't",
"= parameter_name self.origin = getattr(self.cls, \"__origin__\", None) self.args = getattr(self.cls, \"__args__\", None) def",
"other arguments provided to the given constructor parameter_name (str): name of the parameter",
"include: - :class:`~adorn.unit.parameter_value.DependentTypeCheck` - :class:`~adorn.unit.parameter_value.DependentFromObj` - :class:`~adorn.unit.parameter_value.DependentUnion` Attributes: cls (_UnitT): the type of",
"return False normal_args = all( i for i in [ self.parent == o.parent,",
"of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable",
"\"__origin__\", None) self.args = getattr(self.cls, \"__args__\", None) def __eq__(self, o: object) -> bool:",
"C901 # Literal.__eq__ doesn't support dict's so # we force the check #",
"all( i for i in [ self.parent == o.parent, self.local_state == o.local_state, self.parameter_name",
"normal_args = all( i for i in [ self.parent == o.parent, self.local_state ==",
"== o.parent, self.local_state == o.local_state, self.parameter_name == o.parameter_name, ] ) if not normal_args:",
"contained in ``Parameter`` include: - :class:`~adorn.unit.parameter_value.DependentTypeCheck` - :class:`~adorn.unit.parameter_value.DependentFromObj` - :class:`~adorn.unit.parameter_value.DependentUnion` Attributes: cls (_UnitT):",
"noqa: C901 # Literal.__eq__ doesn't support dict's so # we force the check",
"``Parameter`` allows for information outside of its ``obj``, to be utilized, to perform",
"not use this file except in compliance with the License. # You may",
"parameter_name self.origin = getattr(self.cls, \"__origin__\", None) self.args = getattr(self.cls, \"__args__\", None) def __eq__(self,",
"in the constructor \"\"\" def __init__( self, cls: \"_UnitT\", parent: \"_UnitT\", local_state: Dict[str,",
"typing import Any from typing import Dict from typing import TYPE_CHECKING if TYPE_CHECKING:",
"a parameter from a given :class:`~adorn.data.constructor.Constructor` ``Parameter`` allows for information outside of its",
"License, Version 2.0 (the \"License\"); # you may not use this file except",
"getattr(self.cls, \"__origin__\", None) self.args = getattr(self.cls, \"__args__\", None) def __eq__(self, o: object) ->",
"of its ``obj``, to be utilized, to perform some action. Examples of constructs",
"under the License. \"\"\"State from the constructor for a parameter from the constructor.\"\"\"",
"def __eq__(self, o: object) -> bool: # noqa: C901 # Literal.__eq__ doesn't support",
"i in [ self.parent == o.parent, self.local_state == o.local_state, self.parameter_name == o.parameter_name, ]",
"distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY",
"check # prevent circular import from adorn.unit.parameter_value import Dependent if not isinstance(o, Parameter):",
"# you may not use this file except in compliance with the License.",
"agreed to in writing, software # distributed under the License is distributed on",
"Parameter: \"\"\"State for a parameter from a given :class:`~adorn.data.constructor.Constructor` ``Parameter`` allows for information",
"cls (_UnitT): the type of the underlying parameter parent (_UnitT): The type of",
"= str(self.cls) == str(o.cls) else: eq_cls = self.cls == o.cls return eq_cls def",
"(the \"License\"); # you may not use this file except in compliance with",
"None) self.args = getattr(self.cls, \"__args__\", None) def __eq__(self, o: object) -> bool: #",
"Parameter): return False normal_args = all( i for i in [ self.parent ==",
"parameter_name (str): name of the parameter in the constructor \"\"\" def __init__( self,",
"some action. Examples of constructs that utilize the information contained in ``Parameter`` include:",
"parent: \"_UnitT\", local_state: Dict[str, Any], parameter_name: str, ): self.cls = cls self.parent =",
"# Unless required by applicable law or agreed to in writing, software #",
"by applicable law or agreed to in writing, software # distributed under the",
"circular import from adorn.unit.parameter_value import Dependent if not isinstance(o, Parameter): return False normal_args",
"copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by",
"class Parameter: \"\"\"State for a parameter from a given :class:`~adorn.data.constructor.Constructor` ``Parameter`` allows for",
"= False if issubclass(getattr(self.cls, \"__origin__\", int), Dependent): eq_cls = str(self.cls) == str(o.cls) else:",
"arguments provided to the given constructor parameter_name (str): name of the parameter in",
"License. \"\"\"State from the constructor for a parameter from the constructor.\"\"\" from typing",
"file except in compliance with the License. # You may obtain a copy",
"object whose constructor requires an object of type ``cls`` local_state (Dict[str, Any]): Information",
"``cls`` local_state (Dict[str, Any]): Information about other arguments provided to the given constructor",
"for information outside of its ``obj``, to be utilized, to perform some action.",
"License for the specific language governing permissions and # limitations under the License.",
"support dict's so # we force the check # prevent circular import from",
"== o.parameter_name, ] ) if not normal_args: return False eq_cls = False if",
"to in writing, software # distributed under the License is distributed on an",
"implied. # See the License for the specific language governing permissions and #",
"\"License\"); # you may not use this file except in compliance with the",
"import Dependent if not isinstance(o, Parameter): return False normal_args = all( i for",
"obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless",
"of the underlying parameter parent (_UnitT): The type of object whose constructor requires",
"dict's so # we force the check # prevent circular import from adorn.unit.parameter_value",
"self.local_state = local_state self.parameter_name = parameter_name self.origin = getattr(self.cls, \"__origin__\", None) self.args =",
"or implied. # See the License for the specific language governing permissions and",
"the information contained in ``Parameter`` include: - :class:`~adorn.unit.parameter_value.DependentTypeCheck` - :class:`~adorn.unit.parameter_value.DependentFromObj` - :class:`~adorn.unit.parameter_value.DependentUnion` Attributes:",
"no cover from adorn.unit.complex import _UnitT class Parameter: \"\"\"State for a parameter from",
"constructor.\"\"\" from typing import Any from typing import Dict from typing import TYPE_CHECKING",
"action. Examples of constructs that utilize the information contained in ``Parameter`` include: -",
"Apache License, Version 2.0 (the \"License\"); # you may not use this file",
"OR CONDITIONS OF ANY KIND, either express or implied. # See the License",
"may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #",
"self, cls: \"_UnitT\", parent: \"_UnitT\", local_state: Dict[str, Any], parameter_name: str, ): self.cls =",
"information contained in ``Parameter`` include: - :class:`~adorn.unit.parameter_value.DependentTypeCheck` - :class:`~adorn.unit.parameter_value.DependentFromObj` - :class:`~adorn.unit.parameter_value.DependentUnion` Attributes: cls",
"http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,",
"in writing, software # distributed under the License is distributed on an \"AS",
"issubclass(getattr(self.cls, \"__origin__\", int), Dependent): eq_cls = str(self.cls) == str(o.cls) else: eq_cls = self.cls",
"\"_UnitT\", parent: \"_UnitT\", local_state: Dict[str, Any], parameter_name: str, ): self.cls = cls self.parent",
"# See the License for the specific language governing permissions and # limitations",
"the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR",
"from typing import Dict from typing import TYPE_CHECKING if TYPE_CHECKING: # pragma: no",
"# noqa: C901 # Literal.__eq__ doesn't support dict's so # we force the",
"import Any from typing import Dict from typing import TYPE_CHECKING if TYPE_CHECKING: #",
"in [ self.parent == o.parent, self.local_state == o.local_state, self.parameter_name == o.parameter_name, ] )",
"# pragma: no cover from adorn.unit.complex import _UnitT class Parameter: \"\"\"State for a",
"Dependent): eq_cls = str(self.cls) == str(o.cls) else: eq_cls = self.cls == o.cls return",
"(str): name of the parameter in the constructor \"\"\" def __init__( self, cls:",
"for i in [ self.parent == o.parent, self.local_state == o.local_state, self.parameter_name == o.parameter_name,",
"the Apache License, Version 2.0 (the \"License\"); # you may not use this",
"you may not use this file except in compliance with the License. #",
"the specific language governing permissions and # limitations under the License. \"\"\"State from",
"constructs that utilize the information contained in ``Parameter`` include: - :class:`~adorn.unit.parameter_value.DependentTypeCheck` - :class:`~adorn.unit.parameter_value.DependentFromObj`",
"typing import Dict from typing import TYPE_CHECKING if TYPE_CHECKING: # pragma: no cover",
"<gh_stars>1-10 # Copyright 2021 <NAME> # # Licensed under the Apache License, Version",
"the underlying parameter parent (_UnitT): The type of object whose constructor requires an",
"\"\"\"State for a parameter from a given :class:`~adorn.data.constructor.Constructor` ``Parameter`` allows for information outside",
"the constructor.\"\"\" from typing import Any from typing import Dict from typing import",
"given constructor parameter_name (str): name of the parameter in the constructor \"\"\" def",
"# we force the check # prevent circular import from adorn.unit.parameter_value import Dependent",
"(_UnitT): The type of object whose constructor requires an object of type ``cls``",
"if issubclass(getattr(self.cls, \"__origin__\", int), Dependent): eq_cls = str(self.cls) == str(o.cls) else: eq_cls =",
"= getattr(self.cls, \"__origin__\", None) self.args = getattr(self.cls, \"__args__\", None) def __eq__(self, o: object)",
"import Dict from typing import TYPE_CHECKING if TYPE_CHECKING: # pragma: no cover from",
":class:`~adorn.unit.parameter_value.DependentTypeCheck` - :class:`~adorn.unit.parameter_value.DependentFromObj` - :class:`~adorn.unit.parameter_value.DependentUnion` Attributes: cls (_UnitT): the type of the underlying",
"o.local_state, self.parameter_name == o.parameter_name, ] ) if not normal_args: return False eq_cls =",
"use this file except in compliance with the License. # You may obtain",
"pragma: no cover from adorn.unit.complex import _UnitT class Parameter: \"\"\"State for a parameter",
"from typing import Any from typing import Dict from typing import TYPE_CHECKING if",
"= all( i for i in [ self.parent == o.parent, self.local_state == o.local_state,",
"requires an object of type ``cls`` local_state (Dict[str, Any]): Information about other arguments",
"o.parameter_name, ] ) if not normal_args: return False eq_cls = False if issubclass(getattr(self.cls,",
"# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may",
"type of the underlying parameter parent (_UnitT): The type of object whose constructor",
"object) -> bool: # noqa: C901 # Literal.__eq__ doesn't support dict's so #",
"language governing permissions and # limitations under the License. \"\"\"State from the constructor",
"from the constructor.\"\"\" from typing import Any from typing import Dict from typing",
"local_state (Dict[str, Any]): Information about other arguments provided to the given constructor parameter_name",
"2.0 (the \"License\"); # you may not use this file except in compliance",
"that utilize the information contained in ``Parameter`` include: - :class:`~adorn.unit.parameter_value.DependentTypeCheck` - :class:`~adorn.unit.parameter_value.DependentFromObj` -",
"a parameter from the constructor.\"\"\" from typing import Any from typing import Dict",
"the given constructor parameter_name (str): name of the parameter in the constructor \"\"\"",
"constructor requires an object of type ``cls`` local_state (Dict[str, Any]): Information about other",
"perform some action. Examples of constructs that utilize the information contained in ``Parameter``",
"WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the",
"Literal.__eq__ doesn't support dict's so # we force the check # prevent circular",
"for the specific language governing permissions and # limitations under the License. \"\"\"State",
"for a parameter from the constructor.\"\"\" from typing import Any from typing import",
"eq_cls = str(self.cls) == str(o.cls) else: eq_cls = self.cls == o.cls return eq_cls",
"# # Unless required by applicable law or agreed to in writing, software",
"- :class:`~adorn.unit.parameter_value.DependentFromObj` - :class:`~adorn.unit.parameter_value.DependentUnion` Attributes: cls (_UnitT): the type of the underlying parameter",
"express or implied. # See the License for the specific language governing permissions",
"self.args = getattr(self.cls, \"__args__\", None) def __eq__(self, o: object) -> bool: # noqa:",
"either express or implied. # See the License for the specific language governing",
"__eq__(self, o: object) -> bool: # noqa: C901 # Literal.__eq__ doesn't support dict's",
"Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not",
"an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either",
"\"__origin__\", int), Dependent): eq_cls = str(self.cls) == str(o.cls) else: eq_cls = self.cls ==",
"# Copyright 2021 <NAME> # # Licensed under the Apache License, Version 2.0",
"``obj``, to be utilized, to perform some action. Examples of constructs that utilize",
"an object of type ``cls`` local_state (Dict[str, Any]): Information about other arguments provided",
"False eq_cls = False if issubclass(getattr(self.cls, \"__origin__\", int), Dependent): eq_cls = str(self.cls) ==",
"self.parameter_name = parameter_name self.origin = getattr(self.cls, \"__origin__\", None) self.args = getattr(self.cls, \"__args__\", None)",
"the License. # You may obtain a copy of the License at #",
"-> bool: # noqa: C901 # Literal.__eq__ doesn't support dict's so # we",
"the type of the underlying parameter parent (_UnitT): The type of object whose",
"str, ): self.cls = cls self.parent = parent self.local_state = local_state self.parameter_name =",
"# distributed under the License is distributed on an \"AS IS\" BASIS, #",
"from a given :class:`~adorn.data.constructor.Constructor` ``Parameter`` allows for information outside of its ``obj``, to",
"def __init__( self, cls: \"_UnitT\", parent: \"_UnitT\", local_state: Dict[str, Any], parameter_name: str, ):",
"is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF",
"self.local_state == o.local_state, self.parameter_name == o.parameter_name, ] ) if not normal_args: return False",
"Attributes: cls (_UnitT): the type of the underlying parameter parent (_UnitT): The type",
"information outside of its ``obj``, to be utilized, to perform some action. Examples",
"# Literal.__eq__ doesn't support dict's so # we force the check # prevent",
"with the License. # You may obtain a copy of the License at",
"for a parameter from a given :class:`~adorn.data.constructor.Constructor` ``Parameter`` allows for information outside of",
"constructor for a parameter from the constructor.\"\"\" from typing import Any from typing",
"Dict from typing import TYPE_CHECKING if TYPE_CHECKING: # pragma: no cover from adorn.unit.complex",
"# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you",
"= local_state self.parameter_name = parameter_name self.origin = getattr(self.cls, \"__origin__\", None) self.args = getattr(self.cls,",
"type ``cls`` local_state (Dict[str, Any]): Information about other arguments provided to the given",
"constructor parameter_name (str): name of the parameter in the constructor \"\"\" def __init__(",
"parameter from a given :class:`~adorn.data.constructor.Constructor` ``Parameter`` allows for information outside of its ``obj``,",
"law or agreed to in writing, software # distributed under the License is",
"the License for the specific language governing permissions and # limitations under the",
"i for i in [ self.parent == o.parent, self.local_state == o.local_state, self.parameter_name ==",
"on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,",
"parent (_UnitT): The type of object whose constructor requires an object of type",
"force the check # prevent circular import from adorn.unit.parameter_value import Dependent if not",
"Examples of constructs that utilize the information contained in ``Parameter`` include: - :class:`~adorn.unit.parameter_value.DependentTypeCheck`",
"to the given constructor parameter_name (str): name of the parameter in the constructor",
"\"\"\"State from the constructor for a parameter from the constructor.\"\"\" from typing import",
"self.origin = getattr(self.cls, \"__origin__\", None) self.args = getattr(self.cls, \"__args__\", None) def __eq__(self, o:",
"in compliance with the License. # You may obtain a copy of the",
"License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or",
"# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #",
"TYPE_CHECKING if TYPE_CHECKING: # pragma: no cover from adorn.unit.complex import _UnitT class Parameter:",
"at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed",
"import from adorn.unit.parameter_value import Dependent if not isinstance(o, Parameter): return False normal_args =",
"import _UnitT class Parameter: \"\"\"State for a parameter from a given :class:`~adorn.data.constructor.Constructor` ``Parameter``",
"a given :class:`~adorn.data.constructor.Constructor` ``Parameter`` allows for information outside of its ``obj``, to be",
"See the License for the specific language governing permissions and # limitations under",
"BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.",
"if not isinstance(o, Parameter): return False normal_args = all( i for i in",
"a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required",
"# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in",
"Information about other arguments provided to the given constructor parameter_name (str): name of",
"import TYPE_CHECKING if TYPE_CHECKING: # pragma: no cover from adorn.unit.complex import _UnitT class",
"__init__( self, cls: \"_UnitT\", parent: \"_UnitT\", local_state: Dict[str, Any], parameter_name: str, ): self.cls",
"allows for information outside of its ``obj``, to be utilized, to perform some",
"False if issubclass(getattr(self.cls, \"__origin__\", int), Dependent): eq_cls = str(self.cls) == str(o.cls) else: eq_cls",
"2021 <NAME> # # Licensed under the Apache License, Version 2.0 (the \"License\");",
"about other arguments provided to the given constructor parameter_name (str): name of the",
"utilize the information contained in ``Parameter`` include: - :class:`~adorn.unit.parameter_value.DependentTypeCheck` - :class:`~adorn.unit.parameter_value.DependentFromObj` - :class:`~adorn.unit.parameter_value.DependentUnion`",
"Any from typing import Dict from typing import TYPE_CHECKING if TYPE_CHECKING: # pragma:",
"Version 2.0 (the \"License\"); # you may not use this file except in",
"except in compliance with the License. # You may obtain a copy of",
"self.cls = cls self.parent = parent self.local_state = local_state self.parameter_name = parameter_name self.origin",
"int), Dependent): eq_cls = str(self.cls) == str(o.cls) else: eq_cls = self.cls == o.cls",
"the constructor for a parameter from the constructor.\"\"\" from typing import Any from",
"the check # prevent circular import from adorn.unit.parameter_value import Dependent if not isinstance(o,",
"so # we force the check # prevent circular import from adorn.unit.parameter_value import",
"of type ``cls`` local_state (Dict[str, Any]): Information about other arguments provided to the",
"isinstance(o, Parameter): return False normal_args = all( i for i in [ self.parent",
"its ``obj``, to be utilized, to perform some action. Examples of constructs that",
"# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0",
"may not use this file except in compliance with the License. # You",
"License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS",
"be utilized, to perform some action. Examples of constructs that utilize the information",
"The type of object whose constructor requires an object of type ``cls`` local_state",
"doesn't support dict's so # we force the check # prevent circular import",
"parent self.local_state = local_state self.parameter_name = parameter_name self.origin = getattr(self.cls, \"__origin__\", None) self.args",
"- :class:`~adorn.unit.parameter_value.DependentUnion` Attributes: cls (_UnitT): the type of the underlying parameter parent (_UnitT):",
"- :class:`~adorn.unit.parameter_value.DependentTypeCheck` - :class:`~adorn.unit.parameter_value.DependentFromObj` - :class:`~adorn.unit.parameter_value.DependentUnion` Attributes: cls (_UnitT): the type of the",
"self.parent == o.parent, self.local_state == o.local_state, self.parameter_name == o.parameter_name, ] ) if not",
"= parent self.local_state = local_state self.parameter_name = parameter_name self.origin = getattr(self.cls, \"__origin__\", None)",
"self.parameter_name == o.parameter_name, ] ) if not normal_args: return False eq_cls = False",
"parameter from the constructor.\"\"\" from typing import Any from typing import Dict from",
"Dependent if not isinstance(o, Parameter): return False normal_args = all( i for i",
"# prevent circular import from adorn.unit.parameter_value import Dependent if not isinstance(o, Parameter): return",
"self.parent = parent self.local_state = local_state self.parameter_name = parameter_name self.origin = getattr(self.cls, \"__origin__\",",
"(Dict[str, Any]): Information about other arguments provided to the given constructor parameter_name (str):",
"== str(o.cls) else: eq_cls = self.cls == o.cls return eq_cls def __str__(self): return",
"o: object) -> bool: # noqa: C901 # Literal.__eq__ doesn't support dict's so",
"the License. \"\"\"State from the constructor for a parameter from the constructor.\"\"\" from",
"distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT",
"from the constructor for a parameter from the constructor.\"\"\" from typing import Any",
"Copyright 2021 <NAME> # # Licensed under the Apache License, Version 2.0 (the",
"constructor \"\"\" def __init__( self, cls: \"_UnitT\", parent: \"_UnitT\", local_state: Dict[str, Any], parameter_name:",
"eq_cls = False if issubclass(getattr(self.cls, \"__origin__\", int), Dependent): eq_cls = str(self.cls) == str(o.cls)"
] |
[
"previous = 0 current = 1 for _ in range(n-2): previous, current =",
"# Expected only one integer as input input = sys.stdin.read() n = int(input)",
"1 for _ in range(n-2): previous, current = current, previous + current return",
"for _ in range(n-2): previous, current = current, previous + current return current",
"current, previous + current return current if __name__ == '__main__': # Expected only",
"'__main__': # Expected only one integer as input input = sys.stdin.read() n =",
"== 1: return 0 previous = 0 current = 1 for _ in",
"if n == 2: return 1 elif n == 1: return 0 previous",
"= current, previous + current return current if __name__ == '__main__': # Expected",
"import sys def getNthFib(n): if n == 2: return 1 elif n ==",
"= 1 for _ in range(n-2): previous, current = current, previous + current",
"if __name__ == '__main__': # Expected only one integer as input input =",
"return 1 elif n == 1: return 0 previous = 0 current =",
"0 current = 1 for _ in range(n-2): previous, current = current, previous",
"Expected only one integer as input input = sys.stdin.read() n = int(input) print(getNthFib(n))",
"1: return 0 previous = 0 current = 1 for _ in range(n-2):",
"getNthFib(n): if n == 2: return 1 elif n == 1: return 0",
"return current if __name__ == '__main__': # Expected only one integer as input",
"current = current, previous + current return current if __name__ == '__main__': #",
"n == 1: return 0 previous = 0 current = 1 for _",
"python3 import sys def getNthFib(n): if n == 2: return 1 elif n",
"sys def getNthFib(n): if n == 2: return 1 elif n == 1:",
"#use python3 import sys def getNthFib(n): if n == 2: return 1 elif",
"2: return 1 elif n == 1: return 0 previous = 0 current",
"range(n-2): previous, current = current, previous + current return current if __name__ ==",
"previous + current return current if __name__ == '__main__': # Expected only one",
"return 0 previous = 0 current = 1 for _ in range(n-2): previous,",
"__name__ == '__main__': # Expected only one integer as input input = sys.stdin.read()",
"== 2: return 1 elif n == 1: return 0 previous = 0",
"= 0 current = 1 for _ in range(n-2): previous, current = current,",
"in range(n-2): previous, current = current, previous + current return current if __name__",
"+ current return current if __name__ == '__main__': # Expected only one integer",
"_ in range(n-2): previous, current = current, previous + current return current if",
"current if __name__ == '__main__': # Expected only one integer as input input",
"previous, current = current, previous + current return current if __name__ == '__main__':",
"def getNthFib(n): if n == 2: return 1 elif n == 1: return",
"0 previous = 0 current = 1 for _ in range(n-2): previous, current",
"== '__main__': # Expected only one integer as input input = sys.stdin.read() n",
"n == 2: return 1 elif n == 1: return 0 previous =",
"current = 1 for _ in range(n-2): previous, current = current, previous +",
"current return current if __name__ == '__main__': # Expected only one integer as",
"elif n == 1: return 0 previous = 0 current = 1 for",
"1 elif n == 1: return 0 previous = 0 current = 1"
] |
[
"degree=2) gratingTM.solve() effs_TM = gratingTM.diffraction_efficiencies(1, orders=True) H = gratingTM.solution[\"total\"] ###################################################################### # Let's visualize",
"sub = geom.layers[\"substrate\"] sup = geom.layers[\"superstrate\"] sub, sup, hole, groove = geom.fragment([sub, sup,",
"Let's create the geometry using the :class:`~gyptis.Layered` # class: geom = gy.Layered(3, (dx,",
"print(\" order ref calc\") print(\"--------------------------------\") print(f\" 0 {T_ref['TE'][0]:.4f} {effs_TM['T'][1]:.4f} \") print(f\" sum {T_ref['TE'][1]:.4f}",
"** 0.5, \"groove\": pmesh * abs(eps_layer) ** 0.5, \"hole\": pmesh_hole, \"superstrate\": pmesh, \"pml_top\":",
"Vplot = gy.dolfin.FunctionSpace(geom.mesh,\"CG\",degree) # # E = grating.formulation.phasor # pp = gy.utils.project_iterative(E, Vplot)",
"= gy.Layered(3, (dx, dy), thicknesses) z0 = geom.z_position[\"groove\"] # + h/10 # l_pillar",
"the simulation class # :class:`~gyptis.Grating`, pw = gy.PlaneWave(lambda0, (theta0, phi0, psi0), dim=3, degree=degree)",
"geom.domains} mu = {d: 1 for d in geom.domains} epsilon[\"groove\"] = eps_layer #",
"* pmesh * eps_diel ** 0.5, \"substrate\": pmesh * eps_diel ** 0.5, \"groove\":",
"= eps_diel epsilon[\"hole\"] = 1 epsilon[\"substrate\"] = eps_diel ###################################################################### # Now we can",
"import os # os.system(\"paraview test.pvd\") # xsx grating.solve() effs = grating.diffraction_efficiencies(2, orders=True) print(effs)",
"agreement with the reference print(\"Transmission coefficient\") print(\" order ref calc\") print(\"--------------------------------\") print(f\" 0",
"hole, groove = geom.fragment([sub, sup, groove], hole) geom.add_physical(hole, \"hole\") geom.add_physical(groove, \"groove\") geom.add_physical(sub, \"substrate\")",
"R_hole = 0.25 hole = geom.add_cylinder(0, 0, z0, 0, 0, h, R_hole) #",
"\"groove\": 1 * h, \"superstrate\": lambda0 / 1, \"pml_top\": lambda0, } ) ##############################################################################",
"periods of the grating h = 0.05 theta0 = 0 phi0 = 0",
"ref calc\") print(\"--------------------------------\") print(f\" 0 {T_ref['TM'][0]:.4f} {effs_TE['T'][1]:.4f} \") print(f\" sum {T_ref['TM'][1]:.4f} {effs_TE['B']:.4f} \")",
"(dx, dy), thicknesses) z0 = geom.z_position[\"groove\"] # + h/10 # l_pillar = 0.9",
"# pillar = geom.add_box(-l_pillar / 2, -l_pillar / 2, z0, l_pillar, l_pillar, h)",
"* 2 ** 0.5 / 2 R_hole = 0.25 hole = geom.add_cylinder(0, 0,",
"0 {T_ref['TM'][0]:.4f} {effs_TE['T'][1]:.4f} \") print(f\" sum {T_ref['TM'][1]:.4f} {effs_TE['B']:.4f} \") ###################################################################### # We switch",
"the reference print(\"Transmission coefficient\") print(\" order ref calc\") print(\"--------------------------------\") print(f\" 0 {T_ref['TE'][0]:.4f} {effs_TM['T'][1]:.4f}",
"(TE)\") fig.tight_layout() fig.show() ###################################################################### # Results are in good agreement with the reference",
"# gy.dolfin.File(\"test.pvd\") << pp.real # # import os # os.system(\"paraview test.pvd\") # xsx",
"1, \"groove\": 1 * h, \"superstrate\": lambda0 / 1, \"pml_top\": lambda0, } )",
"parameters, in order to be able to have # ``parmesh`` cells per wavelength",
"eps_layer = 0.8125 - 5.2500j ############################################################################## # The thicknesses of the different layers",
"in geom.domains} mu = {d: 1 for d in geom.domains} epsilon[\"groove\"] = eps_layer",
"0.8125 - 5.2500j ############################################################################## # The thicknesses of the different layers are specified",
"= gy.utils.project_iterative(us,grating.formulation.real_function_space) # # gy.dolfin.File(\"test.pvd\") << pp.real # # import os # os.system(\"paraview",
"# pp = gy.utils.project_iterative(E, Vplot) gy.dolfin.File(\"test.pvd\") << pp.real import os os.system(\"paraview test.pvd\") xs",
"create the geometry using the :class:`~gyptis.Layered` # class: geom = gy.Layered(3, (dx, dy),",
"# class: geom = gy.Layered(3, (dx, dy), thicknesses) z0 = geom.z_position[\"groove\"] # +",
"# pp = gy.utils.project_iterative(pw.expression,grating.formulation.real_function_space) # gy.dolfin.File(\"test.pvd\") << pp.real # us = grating.formulation.annex_field[\"as_subdomain\"][\"stack\"] #",
"= 0.12860 T = 0.17486 T = 0.12860 T = 0.06196 T =",
"the various domains # using a dictionary: epsilon = {d: 1 for d",
"= pmesh * 1 mesh_param = dict( { \"pml_bottom\": 1 * pmesh *",
"<filename>examples/diffraction/hole_crossed_grating.py # -*- coding: utf-8 -*- \"\"\" 3D Checkerboard Grating ======================= Example of",
"ref calc\") print(\"--------------------------------\") print(f\" 0 {T_ref['TE'][0]:.4f} {effs_TM['T'][1]:.4f} \") print(f\" sum {T_ref['TE'][1]:.4f} {effs_TM['B']:.4f} \")",
"permeabilities for the various domains # using a dictionary: epsilon = {d: 1",
"fig.show() ###################################################################### # Results are in good agreement with the reference print(\"Transmission coefficient\")",
"hole = geom.add_cylinder(0, 0, z0, 0, 0, h, R_hole) # pillar = geom.add_box(-l_pillar",
"sup, hole, groove = geom.fragment([sub, sup, groove], hole) geom.add_physical(hole, \"hole\") geom.add_physical(groove, \"groove\") geom.add_physical(sub,",
"of the grating h = 0.05 theta0 = 0 phi0 = 0 psi0",
"geom.build(interactive=0) # geom.build(interactive=1) ###################################################################### # Set the permittivity and permeabilities for the various",
"# sphinx_gallery_thumbnail_number = 2 from collections import OrderedDict import matplotlib.pyplot as plt import",
"/ 2, z0, l_pillar, l_pillar, h) # geom.rotate(pillar, (0, 0, 0), (0, 0,",
"a dielectric bi-periodic diffraction grating. \"\"\" # sphinx_gallery_thumbnail_number = 2 from collections import",
"gratingTM.solution[\"total\"] ###################################################################### # Let's visualize the fields fig, ax = plt.subplots(1, 2) ylim",
"= eps_layer # epsilon[\"groove\"] = eps_diel epsilon[\"hole\"] = 1 epsilon[\"substrate\"] = eps_diel ######################################################################",
"sphinx_gallery_thumbnail_number = 2 from collections import OrderedDict import matplotlib.pyplot as plt import numpy",
"dim=3, degree=degree) grating = gy.Grating(geom, epsilon, mu, source=pw, degree=degree, periodic_map_tol=1e-8) # pp =",
"0.12860 T = 0.17486 T = 0.12860 T = 0.06196 T = 0.12860",
"# geom.build(interactive=1) ###################################################################### # Set the permittivity and permeabilities for the various domains",
"pp.real # us = grating.formulation.annex_field[\"as_subdomain\"][\"stack\"] # pp = gy.utils.project_iterative(us,grating.formulation.real_function_space) # # gy.dolfin.File(\"test.pvd\") <<",
"d in geom.domains} epsilon[\"groove\"] = eps_layer # epsilon[\"groove\"] = eps_diel epsilon[\"hole\"] = 1",
"pmesh_hole, \"superstrate\": pmesh, \"pml_top\": 1 * pmesh, } ) ############################################################################## # Let's create",
"gy.Grating(geom, epsilon, mu, source=pw, polarization=\"TE\", degree=2) gratingTM.solve() effs_TM = gratingTM.diffraction_efficiencies(1, orders=True) H =",
"\"substrate\": lambda0 / 1, \"groove\": 1 * h, \"superstrate\": lambda0 / 1, \"pml_top\":",
"gratingTM.plot_field(ax=ax[1]) gratingTM.plot_geometry(ax=ax[1]) ax[1].set_ylim(ylim) ax[1].set_axis_off() ax[1].set_title(\"$H_z$ (TE)\") fig.tight_layout() fig.show() ###################################################################### # Results are in",
"gy.dolfin.parameters[\"form_compiler\"][\"quadrature_degree\"] = 5 # gy.dolfin.parameters[\"ghost_mode\"] = \"shared_facet\" gy.dolfin.set_log_level(7) ############################################################################## # Structure is the",
"fig, ax = plt.subplots(1, 2) ylim = geom.y_position[\"substrate\"], geom.y_position[\"pml_top\"] gratingTE.plot_field(ax=ax[0]) gratingTE.plot_geometry(ax=ax[0]) ax[0].set_ylim(ylim) ax[0].set_axis_off()",
"gy.utils.project_iterative(E, grating.formulation.real_function_space) # Vplot = gy.dolfin.FunctionSpace(geom.mesh,\"CG\",degree) # # E = grating.formulation.phasor # pp",
"print(\"Transmission coefficient\") print(\" order ref calc\") print(\"--------------------------------\") print(f\" 0 {T_ref['TE'][0]:.4f} {effs_TM['T'][1]:.4f} \") print(f\"",
"0.9 * dx * 2 ** 0.5 / 2 R_hole = 0.25 hole",
"5 # gy.dolfin.parameters[\"ghost_mode\"] = \"shared_facet\" gy.dolfin.set_log_level(7) ############################################################################## # Structure is the same as",
"E = grating.solution[\"total\"] # E = grating.solution[\"diffracted\"] # E = grating.solution[\"periodic\"] # E",
"= 0.04308 T = 0.12860 T = 0.06196 T = 0.12860 T =",
"some # geometrical and optical parameters: lambda0 = 0.5 dx = dy =",
"in :cite:p:`Demesy2010`. # # The units of lengths are in nanometers here, and",
"0, 0, h, R_hole) # pillar = geom.add_box(-l_pillar / 2, -l_pillar / 2,",
"l_pillar, h) # geom.rotate(pillar, (0, 0, 0), (0, 0, 1), np.pi / 4)",
"2, -l_pillar / 2, z0, l_pillar, l_pillar, h) # geom.rotate(pillar, (0, 0, 0),",
"mesh refinement parameters, in order to be able to have # ``parmesh`` cells",
"Set the permittivity and permeabilities for the various domains # using a dictionary:",
"# The units of lengths are in nanometers here, and we first define",
"to top**: thicknesses = OrderedDict( { \"pml_bottom\": lambda0, \"substrate\": lambda0 / 1, \"groove\":",
"psi0 = gy.pi / 4 eps_diel = 2.25 eps_layer = 0.8125 - 5.2500j",
"* dx * 2 ** 0.5 / 2 R_hole = 0.25 hole =",
"coding: utf-8 -*- \"\"\" 3D Checkerboard Grating ======================= Example of a dielectric bi-periodic",
"\"\"\" # sphinx_gallery_thumbnail_number = 2 from collections import OrderedDict import matplotlib.pyplot as plt",
"define some # geometrical and optical parameters: lambda0 = 0.5 dx = dy",
"= gy.Grating(geom, epsilon, mu, source=pw, polarization=\"TE\", degree=2) gratingTM.solve() effs_TM = gratingTM.diffraction_efficiencies(1, orders=True) H",
"0 psi0 = gy.pi / 4 eps_diel = 2.25 eps_layer = 0.8125 -",
"geom.remove_all_duplicates() geom.build(interactive=0) # geom.build(interactive=1) ###################################################################### # Set the permittivity and permeabilities for the",
"Vplot) gy.dolfin.File(\"test.pvd\") << pp.real import os os.system(\"paraview test.pvd\") xs ### reference T_ref =",
"groove], hole) geom.add_physical(hole, \"hole\") geom.add_physical(groove, \"groove\") geom.add_physical(sub, \"substrate\") geom.add_physical(sup, \"superstrate\") mesh_size = {d:",
"gy.dolfin.FunctionSpace(geom.mesh,\"CG\",degree) # # E = grating.formulation.phasor # pp = gy.utils.project_iterative(E, Vplot) gy.dolfin.File(\"test.pvd\") <<",
"eps_diel ###################################################################### # Now we can create an instance of the simulation class",
"top**: thicknesses = OrderedDict( { \"pml_bottom\": lambda0, \"substrate\": lambda0 / 1, \"groove\": 1",
"lambda0 / 1, \"groove\": 1 * h, \"superstrate\": lambda0 / 1, \"pml_top\": lambda0,",
"pmesh * 1 mesh_param = dict( { \"pml_bottom\": 1 * pmesh * eps_diel",
"###################################################################### # Set the permittivity and permeabilities for the various domains # using",
"visualize the fields fig, ax = plt.subplots(1, 2) ylim = geom.y_position[\"substrate\"], geom.y_position[\"pml_top\"] gratingTE.plot_field(ax=ax[0])",
"\"shared_facet\" gy.dolfin.set_log_level(7) ############################################################################## # Structure is the same as in :cite:p:`Demesy2010`. # #",
"parameters: lambda0 = 0.5 dx = dy = 1 # 5 * lambda0",
"(TM)\") gratingTM.plot_field(ax=ax[1]) gratingTM.plot_geometry(ax=ax[1]) ax[1].set_ylim(ylim) ax[1].set_axis_off() ax[1].set_title(\"$H_z$ (TE)\") fig.tight_layout() fig.show() ###################################################################### # Results are",
"2 R_hole = 0.25 hole = geom.add_cylinder(0, 0, z0, 0, 0, h, R_hole)",
"for d, param in mesh_param.items()} geom.set_mesh_size(mesh_size) # geom.remove_all_duplicates() geom.build(interactive=0) # geom.build(interactive=1) ###################################################################### #",
"to be able to have # ``parmesh`` cells per wavelength of the field",
"# gy.dolfin.parameters[\"ghost_mode\"] = \"shared_facet\" gy.dolfin.set_log_level(7) ############################################################################## # Structure is the same as in",
"geom = gy.Layered(3, (dx, dy), thicknesses) z0 = geom.z_position[\"groove\"] # + h/10 #",
"* 1 mesh_param = dict( { \"pml_bottom\": 1 * pmesh * eps_diel **",
"= 2.25 eps_layer = 0.8125 - 5.2500j ############################################################################## # The thicknesses of the",
"1 * pmesh, } ) ############################################################################## # Let's create the geometry using the",
"print(effs) xssx E = grating.solution[\"total\"] # E = grating.solution[\"diffracted\"] # E = grating.solution[\"periodic\"]",
"{ \"pml_bottom\": 1 * pmesh * eps_diel ** 0.5, \"substrate\": pmesh * eps_diel",
"1 for d in geom.domains} mu = {d: 1 for d in geom.domains}",
"gratingTM.diffraction_efficiencies(1, orders=True) H = gratingTM.solution[\"total\"] ###################################################################### # Let's visualize the fields fig, ax",
"grating.solve() effs = grating.diffraction_efficiencies(2, orders=True) print(effs) xssx E = grating.solution[\"total\"] # E =",
"############################################################################## # Here we set the mesh refinement parameters, in order to be",
"# # The units of lengths are in nanometers here, and we first",
"pw = gy.PlaneWave(lambda0, (theta0, phi0, psi0), dim=3, degree=degree) grating = gy.Grating(geom, epsilon, mu,",
"T = 0.12860 T = 0.04308 fmm = {} print(\"Transmission coefficient\") print(\" order",
"a dictionary: epsilon = {d: 1 for d in geom.domains} mu = {d:",
"plt.subplots(1, 2) ylim = geom.y_position[\"substrate\"], geom.y_position[\"pml_top\"] gratingTE.plot_field(ax=ax[0]) gratingTE.plot_geometry(ax=ax[0]) ax[0].set_ylim(ylim) ax[0].set_axis_off() ax[0].set_title(\"$E_z$ (TM)\") gratingTM.plot_field(ax=ax[1])",
"coefficient\") print(\" order ref calc\") print(\"--------------------------------\") print(f\" 0 {T_ref['TM'][0]:.4f} {effs_TE['T'][1]:.4f} \") print(f\" sum",
"{d: lambda0 / param for d, param in mesh_param.items()} geom.set_mesh_size(mesh_size) # geom.remove_all_duplicates() geom.build(interactive=0)",
"``OrderedDict`` object **from bottom to top**: thicknesses = OrderedDict( { \"pml_bottom\": lambda0, \"substrate\":",
"h/10 # l_pillar = 0.9 * dx * 2 ** 0.5 / 2",
"** 0.5 / 2 R_hole = 0.25 hole = geom.add_cylinder(0, 0, z0, 0,",
"set the mesh refinement parameters, in order to be able to have #",
"0.25 hole = geom.add_cylinder(0, 0, z0, 0, 0, h, R_hole) # pillar =",
"z0, 0, 0, h, R_hole) # pillar = geom.add_box(-l_pillar / 2, -l_pillar /",
"import matplotlib.pyplot as plt import numpy as np import gyptis as gy gy.dolfin.parameters[\"form_compiler\"][\"quadrature_degree\"]",
"h, \"superstrate\": lambda0 / 1, \"pml_top\": lambda0, } ) ############################################################################## # Here we",
"} ) ############################################################################## # Here we set the mesh refinement parameters, in order",
"gratingTM = gy.Grating(geom, epsilon, mu, source=pw, polarization=\"TE\", degree=2) gratingTM.solve() effs_TM = gratingTM.diffraction_efficiencies(1, orders=True)",
"\"superstrate\": lambda0 / 1, \"pml_top\": lambda0, } ) ############################################################################## # Here we set",
"* eps_diel ** 0.5, \"groove\": pmesh * abs(eps_layer) ** 0.5, \"hole\": pmesh_hole, \"superstrate\":",
"os.system(\"paraview test.pvd\") # xsx grating.solve() effs = grating.diffraction_efficiencies(2, orders=True) print(effs) xssx E =",
"permittivity and permeabilities for the various domains # using a dictionary: epsilon =",
"epsilon[\"hole\"] = 1 epsilon[\"substrate\"] = eps_diel ###################################################################### # Now we can create an",
"os os.system(\"paraview test.pvd\") xs ### reference T_ref = dict(TM=[0.2070, 1.0001], TE=[0.8187, 1.0001]) T",
"Grating ======================= Example of a dielectric bi-periodic diffraction grating. \"\"\" # sphinx_gallery_thumbnail_number =",
"= gy.PlaneWave(lambda0, (theta0, phi0, psi0), dim=3, degree=degree) grating = gy.Grating(geom, epsilon, mu, source=pw,",
"# 5 * lambda0 * 2 ** 0.5 / 4 # periods of",
"mu = {d: 1 for d in geom.domains} epsilon[\"groove\"] = eps_layer # epsilon[\"groove\"]",
"2 ** 0.5 / 2 R_hole = 0.25 hole = geom.add_cylinder(0, 0, z0,",
"mesh_param.items()} geom.set_mesh_size(mesh_size) # geom.remove_all_duplicates() geom.build(interactive=0) # geom.build(interactive=1) ###################################################################### # Set the permittivity and",
"E = grating.formulation.phasor # pp = gy.utils.project_iterative(E, Vplot) gy.dolfin.File(\"test.pvd\") << pp.real import os",
"domains # using a dictionary: epsilon = {d: 1 for d in geom.domains}",
"the same as in :cite:p:`Demesy2010`. # # The units of lengths are in",
"H = gratingTM.solution[\"total\"] ###################################################################### # Let's visualize the fields fig, ax = plt.subplots(1,",
"layers are specified with an # ``OrderedDict`` object **from bottom to top**: thicknesses",
"import gyptis as gy gy.dolfin.parameters[\"form_compiler\"][\"quadrature_degree\"] = 5 # gy.dolfin.parameters[\"ghost_mode\"] = \"shared_facet\" gy.dolfin.set_log_level(7) ##############################################################################",
"l_pillar = 0.9 * dx * 2 ** 0.5 / 2 R_hole =",
"# l_pillar = 0.9 * dx * 2 ** 0.5 / 2 R_hole",
"geom.add_physical(sub, \"substrate\") geom.add_physical(sup, \"superstrate\") mesh_size = {d: lambda0 / param for d, param",
"# geom.remove_all_duplicates() geom.build(interactive=0) # geom.build(interactive=1) ###################################################################### # Set the permittivity and permeabilities for",
"4) groove = geom.layers[\"groove\"] sub = geom.layers[\"substrate\"] sup = geom.layers[\"superstrate\"] sub, sup, hole,",
"ax[0].set_axis_off() ax[0].set_title(\"$E_z$ (TM)\") gratingTM.plot_field(ax=ax[1]) gratingTM.plot_geometry(ax=ax[1]) ax[1].set_ylim(ylim) ax[1].set_axis_off() ax[1].set_title(\"$H_z$ (TE)\") fig.tight_layout() fig.show() ###################################################################### #",
"sup = geom.layers[\"superstrate\"] sub, sup, hole, groove = geom.fragment([sub, sup, groove], hole) geom.add_physical(hole,",
"groove = geom.fragment([sub, sup, groove], hole) geom.add_physical(hole, \"hole\") geom.add_physical(groove, \"groove\") geom.add_physical(sub, \"substrate\") geom.add_physical(sup,",
"grating.formulation.annex_field[\"as_subdomain\"][\"stack\"] pp = gy.utils.project_iterative(E, grating.formulation.real_function_space) # Vplot = gy.dolfin.FunctionSpace(geom.mesh,\"CG\",degree) # # E =",
"ylim = geom.y_position[\"substrate\"], geom.y_position[\"pml_top\"] gratingTE.plot_field(ax=ax[0]) gratingTE.plot_geometry(ax=ax[0]) ax[0].set_ylim(ylim) ax[0].set_axis_off() ax[0].set_title(\"$E_z$ (TM)\") gratingTM.plot_field(ax=ax[1]) gratingTM.plot_geometry(ax=ax[1]) ax[1].set_ylim(ylim)",
"\"pml_top\": 1 * pmesh, } ) ############################################################################## # Let's create the geometry using",
"= grating.formulation.annex_field[\"as_subdomain\"][\"stack\"] # pp = gy.utils.project_iterative(us,grating.formulation.real_function_space) # # gy.dolfin.File(\"test.pvd\") << pp.real # #",
"OrderedDict import matplotlib.pyplot as plt import numpy as np import gyptis as gy",
"# The thicknesses of the different layers are specified with an # ``OrderedDict``",
"# We switch to TE polarization gratingTM = gy.Grating(geom, epsilon, mu, source=pw, polarization=\"TE\",",
"thicknesses) z0 = geom.z_position[\"groove\"] # + h/10 # l_pillar = 0.9 * dx",
"the mesh refinement parameters, in order to be able to have # ``parmesh``",
"= {d: 1 for d in geom.domains} mu = {d: 1 for d",
"# Structure is the same as in :cite:p:`Demesy2010`. # # The units of",
"3 pmesh_hole = pmesh * 1 mesh_param = dict( { \"pml_bottom\": 1 *",
"= 1 # 5 * lambda0 * 2 ** 0.5 / 4 #",
"pillar = geom.add_box(-l_pillar / 2, -l_pillar / 2, z0, l_pillar, l_pillar, h) #",
"able to have # ``parmesh`` cells per wavelength of the field inside each",
"subdomain degree = 2 pmesh = 3 pmesh_hole = pmesh * 1 mesh_param",
"here, and we first define some # geometrical and optical parameters: lambda0 =",
"orders=True) print(effs) xssx E = grating.solution[\"total\"] # E = grating.solution[\"diffracted\"] # E =",
"{effs_TE['T'][1]:.4f} \") print(f\" sum {T_ref['TM'][1]:.4f} {effs_TE['B']:.4f} \") ###################################################################### # We switch to TE",
"lambda0 / 1, \"pml_top\": lambda0, } ) ############################################################################## # Here we set the",
"the :class:`~gyptis.Layered` # class: geom = gy.Layered(3, (dx, dy), thicknesses) z0 = geom.z_position[\"groove\"]",
"= grating.diffraction_efficiencies(2, orders=True) print(effs) xssx E = grating.solution[\"total\"] # E = grating.solution[\"diffracted\"] #",
"epsilon[\"substrate\"] = eps_diel ###################################################################### # Now we can create an instance of the",
"groove = geom.layers[\"groove\"] sub = geom.layers[\"substrate\"] sup = geom.layers[\"superstrate\"] sub, sup, hole, groove",
"} ) ############################################################################## # Let's create the geometry using the :class:`~gyptis.Layered` # class:",
"###################################################################### # We switch to TE polarization gratingTM = gy.Grating(geom, epsilon, mu, source=pw,",
"We switch to TE polarization gratingTM = gy.Grating(geom, epsilon, mu, source=pw, polarization=\"TE\", degree=2)",
"======================= Example of a dielectric bi-periodic diffraction grating. \"\"\" # sphinx_gallery_thumbnail_number = 2",
"= 0.05 theta0 = 0 phi0 = 0 psi0 = gy.pi / 4",
"<< pp.real # us = grating.formulation.annex_field[\"as_subdomain\"][\"stack\"] # pp = gy.utils.project_iterative(us,grating.formulation.real_function_space) # # gy.dolfin.File(\"test.pvd\")",
"object **from bottom to top**: thicknesses = OrderedDict( { \"pml_bottom\": lambda0, \"substrate\": lambda0",
"gratingTM.plot_geometry(ax=ax[1]) ax[1].set_ylim(ylim) ax[1].set_axis_off() ax[1].set_title(\"$H_z$ (TE)\") fig.tight_layout() fig.show() ###################################################################### # Results are in good",
"= 2 pmesh = 3 pmesh_hole = pmesh * 1 mesh_param = dict(",
"TE polarization gratingTM = gy.Grating(geom, epsilon, mu, source=pw, polarization=\"TE\", degree=2) gratingTM.solve() effs_TM =",
"geom.layers[\"groove\"] sub = geom.layers[\"substrate\"] sup = geom.layers[\"superstrate\"] sub, sup, hole, groove = geom.fragment([sub,",
"gy.Layered(3, (dx, dy), thicknesses) z0 = geom.z_position[\"groove\"] # + h/10 # l_pillar =",
"in order to be able to have # ``parmesh`` cells per wavelength of",
"# os.system(\"paraview test.pvd\") # xsx grating.solve() effs = grating.diffraction_efficiencies(2, orders=True) print(effs) xssx E",
"\"hole\") geom.add_physical(groove, \"groove\") geom.add_physical(sub, \"substrate\") geom.add_physical(sup, \"superstrate\") mesh_size = {d: lambda0 / param",
"bi-periodic diffraction grating. \"\"\" # sphinx_gallery_thumbnail_number = 2 from collections import OrderedDict import",
"<< pp.real import os os.system(\"paraview test.pvd\") xs ### reference T_ref = dict(TM=[0.2070, 1.0001],",
"gratingTE.plot_field(ax=ax[0]) gratingTE.plot_geometry(ax=ax[0]) ax[0].set_ylim(ylim) ax[0].set_axis_off() ax[0].set_title(\"$E_z$ (TM)\") gratingTM.plot_field(ax=ax[1]) gratingTM.plot_geometry(ax=ax[1]) ax[1].set_ylim(ylim) ax[1].set_axis_off() ax[1].set_title(\"$H_z$ (TE)\") fig.tight_layout()",
"the grating h = 0.05 theta0 = 0 phi0 = 0 psi0 =",
"pp.real # # import os # os.system(\"paraview test.pvd\") # xsx grating.solve() effs =",
"geom.add_physical(sup, \"superstrate\") mesh_size = {d: lambda0 / param for d, param in mesh_param.items()}",
"= grating.solution[\"periodic\"] # E = grating.formulation.annex_field[\"as_subdomain\"][\"stack\"] pp = gy.utils.project_iterative(E, grating.formulation.real_function_space) # Vplot =",
"\"superstrate\") mesh_size = {d: lambda0 / param for d, param in mesh_param.items()} geom.set_mesh_size(mesh_size)",
"ax[0].set_title(\"$E_z$ (TM)\") gratingTM.plot_field(ax=ax[1]) gratingTM.plot_geometry(ax=ax[1]) ax[1].set_ylim(ylim) ax[1].set_axis_off() ax[1].set_title(\"$H_z$ (TE)\") fig.tight_layout() fig.show() ###################################################################### # Results",
"geometrical and optical parameters: lambda0 = 0.5 dx = dy = 1 #",
"T = 0.04308 T = 0.12860 T = 0.06196 T = 0.12860 T",
"grating.solution[\"diffracted\"] # E = grating.solution[\"periodic\"] # E = grating.formulation.annex_field[\"as_subdomain\"][\"stack\"] pp = gy.utils.project_iterative(E, grating.formulation.real_function_space)",
"= 0.04308 fmm = {} print(\"Transmission coefficient\") print(\" order ref calc\") print(\"--------------------------------\") print(f\"",
"each subdomain degree = 2 pmesh = 3 pmesh_hole = pmesh * 1",
"print(f\" 0 {T_ref['TM'][0]:.4f} {effs_TE['T'][1]:.4f} \") print(f\" sum {T_ref['TM'][1]:.4f} {effs_TE['B']:.4f} \") ###################################################################### # We",
"the field inside each subdomain degree = 2 pmesh = 3 pmesh_hole =",
"to have # ``parmesh`` cells per wavelength of the field inside each subdomain",
"is the same as in :cite:p:`Demesy2010`. # # The units of lengths are",
"gratingTM.solve() effs_TM = gratingTM.diffraction_efficiencies(1, orders=True) H = gratingTM.solution[\"total\"] ###################################################################### # Let's visualize the",
"of the field inside each subdomain degree = 2 pmesh = 3 pmesh_hole",
"``parmesh`` cells per wavelength of the field inside each subdomain degree = 2",
"specified with an # ``OrderedDict`` object **from bottom to top**: thicknesses = OrderedDict(",
"as in :cite:p:`Demesy2010`. # # The units of lengths are in nanometers here,",
"ax = plt.subplots(1, 2) ylim = geom.y_position[\"substrate\"], geom.y_position[\"pml_top\"] gratingTE.plot_field(ax=ax[0]) gratingTE.plot_geometry(ax=ax[0]) ax[0].set_ylim(ylim) ax[0].set_axis_off() ax[0].set_title(\"$E_z$",
"lambda0 / param for d, param in mesh_param.items()} geom.set_mesh_size(mesh_size) # geom.remove_all_duplicates() geom.build(interactive=0) #",
"gy.PlaneWave(lambda0, (theta0, phi0, psi0), dim=3, degree=degree) grating = gy.Grating(geom, epsilon, mu, source=pw, degree=degree,",
"* eps_diel ** 0.5, \"substrate\": pmesh * eps_diel ** 0.5, \"groove\": pmesh *",
"T = 0.06196 T = 0.12860 T = 0.04308 fmm = {} print(\"Transmission",
"eps_diel ** 0.5, \"substrate\": pmesh * eps_diel ** 0.5, \"groove\": pmesh * abs(eps_layer)",
"lambda0, \"substrate\": lambda0 / 1, \"groove\": 1 * h, \"superstrate\": lambda0 / 1,",
"as np import gyptis as gy gy.dolfin.parameters[\"form_compiler\"][\"quadrature_degree\"] = 5 # gy.dolfin.parameters[\"ghost_mode\"] = \"shared_facet\"",
"= gy.pi / 4 eps_diel = 2.25 eps_layer = 0.8125 - 5.2500j ##############################################################################",
"\"superstrate\": pmesh, \"pml_top\": 1 * pmesh, } ) ############################################################################## # Let's create the",
"2.25 eps_layer = 0.8125 - 5.2500j ############################################################################## # The thicknesses of the different",
"0.12860 T = 0.06196 T = 0.12860 T = 0.17486 T = 0.12860",
"/ 1, \"groove\": 1 * h, \"superstrate\": lambda0 / 1, \"pml_top\": lambda0, }",
"grating = gy.Grating(geom, epsilon, mu, source=pw, degree=degree, periodic_map_tol=1e-8) # pp = gy.utils.project_iterative(pw.expression,grating.formulation.real_function_space) #",
"= 0 psi0 = gy.pi / 4 eps_diel = 2.25 eps_layer = 0.8125",
"thicknesses of the different layers are specified with an # ``OrderedDict`` object **from",
"dy), thicknesses) z0 = geom.z_position[\"groove\"] # + h/10 # l_pillar = 0.9 *",
"gy.dolfin.File(\"test.pvd\") << pp.real import os os.system(\"paraview test.pvd\") xs ### reference T_ref = dict(TM=[0.2070,",
"epsilon[\"groove\"] = eps_layer # epsilon[\"groove\"] = eps_diel epsilon[\"hole\"] = 1 epsilon[\"substrate\"] = eps_diel",
"0, z0, 0, 0, h, R_hole) # pillar = geom.add_box(-l_pillar / 2, -l_pillar",
"geom.domains} epsilon[\"groove\"] = eps_layer # epsilon[\"groove\"] = eps_diel epsilon[\"hole\"] = 1 epsilon[\"substrate\"] =",
"T = 0.12860 T = 0.06196 T = 0.12860 T = 0.04308 fmm",
"ax[1].set_axis_off() ax[1].set_title(\"$H_z$ (TE)\") fig.tight_layout() fig.show() ###################################################################### # Results are in good agreement with",
"= geom.fragment([sub, sup, groove], hole) geom.add_physical(hole, \"hole\") geom.add_physical(groove, \"groove\") geom.add_physical(sub, \"substrate\") geom.add_physical(sup, \"superstrate\")",
"are in good agreement with the reference print(\"Transmission coefficient\") print(\" order ref calc\")",
"-*- \"\"\" 3D Checkerboard Grating ======================= Example of a dielectric bi-periodic diffraction grating.",
"0.5 / 4 # periods of the grating h = 0.05 theta0 =",
"= geom.y_position[\"substrate\"], geom.y_position[\"pml_top\"] gratingTE.plot_field(ax=ax[0]) gratingTE.plot_geometry(ax=ax[0]) ax[0].set_ylim(ylim) ax[0].set_axis_off() ax[0].set_title(\"$E_z$ (TM)\") gratingTM.plot_field(ax=ax[1]) gratingTM.plot_geometry(ax=ax[1]) ax[1].set_ylim(ylim) ax[1].set_axis_off()",
"# Now we can create an instance of the simulation class # :class:`~gyptis.Grating`,",
"xssx E = grating.solution[\"total\"] # E = grating.solution[\"diffracted\"] # E = grating.solution[\"periodic\"] #",
"print(\"Transmission coefficient\") print(\" order ref calc\") print(\"--------------------------------\") print(f\" 0 {T_ref['TM'][0]:.4f} {effs_TE['T'][1]:.4f} \") print(f\"",
"print(\" order ref calc\") print(\"--------------------------------\") print(f\" 0 {T_ref['TM'][0]:.4f} {effs_TE['T'][1]:.4f} \") print(f\" sum {T_ref['TM'][1]:.4f}",
"geometry using the :class:`~gyptis.Layered` # class: geom = gy.Layered(3, (dx, dy), thicknesses) z0",
"The units of lengths are in nanometers here, and we first define some",
"<< pp.real # # import os # os.system(\"paraview test.pvd\") # xsx grating.solve() effs",
"gy gy.dolfin.parameters[\"form_compiler\"][\"quadrature_degree\"] = 5 # gy.dolfin.parameters[\"ghost_mode\"] = \"shared_facet\" gy.dolfin.set_log_level(7) ############################################################################## # Structure is",
"epsilon[\"groove\"] = eps_diel epsilon[\"hole\"] = 1 epsilon[\"substrate\"] = eps_diel ###################################################################### # Now we",
"dielectric bi-periodic diffraction grating. \"\"\" # sphinx_gallery_thumbnail_number = 2 from collections import OrderedDict",
"# epsilon[\"groove\"] = eps_diel epsilon[\"hole\"] = 1 epsilon[\"substrate\"] = eps_diel ###################################################################### # Now",
"eps_diel = 2.25 eps_layer = 0.8125 - 5.2500j ############################################################################## # The thicknesses of",
"* h, \"superstrate\": lambda0 / 1, \"pml_top\": lambda0, } ) ############################################################################## # Here",
"# import os # os.system(\"paraview test.pvd\") # xsx grating.solve() effs = grating.diffraction_efficiencies(2, orders=True)",
"= gy.utils.project_iterative(pw.expression,grating.formulation.real_function_space) # gy.dolfin.File(\"test.pvd\") << pp.real # us = grating.formulation.annex_field[\"as_subdomain\"][\"stack\"] # pp =",
"collections import OrderedDict import matplotlib.pyplot as plt import numpy as np import gyptis",
"4 eps_diel = 2.25 eps_layer = 0.8125 - 5.2500j ############################################################################## # The thicknesses",
"###################################################################### # Now we can create an instance of the simulation class #",
"= grating.solution[\"diffracted\"] # E = grating.solution[\"periodic\"] # E = grating.formulation.annex_field[\"as_subdomain\"][\"stack\"] pp = gy.utils.project_iterative(E,",
"different layers are specified with an # ``OrderedDict`` object **from bottom to top**:",
"reference T_ref = dict(TM=[0.2070, 1.0001], TE=[0.8187, 1.0001]) T = 0.04308 T = 0.12860",
"geom.y_position[\"pml_top\"] gratingTE.plot_field(ax=ax[0]) gratingTE.plot_geometry(ax=ax[0]) ax[0].set_ylim(ylim) ax[0].set_axis_off() ax[0].set_title(\"$E_z$ (TM)\") gratingTM.plot_field(ax=ax[1]) gratingTM.plot_geometry(ax=ax[1]) ax[1].set_ylim(ylim) ax[1].set_axis_off() ax[1].set_title(\"$H_z$ (TE)\")",
"= gratingTM.diffraction_efficiencies(1, orders=True) H = gratingTM.solution[\"total\"] ###################################################################### # Let's visualize the fields fig,",
"good agreement with the reference print(\"Transmission coefficient\") print(\" order ref calc\") print(\"--------------------------------\") print(f\"",
"we can create an instance of the simulation class # :class:`~gyptis.Grating`, pw =",
"= geom.z_position[\"groove\"] # + h/10 # l_pillar = 0.9 * dx * 2",
"/ param for d, param in mesh_param.items()} geom.set_mesh_size(mesh_size) # geom.remove_all_duplicates() geom.build(interactive=0) # geom.build(interactive=1)",
"pp = gy.utils.project_iterative(us,grating.formulation.real_function_space) # # gy.dolfin.File(\"test.pvd\") << pp.real # # import os #",
"an # ``OrderedDict`` object **from bottom to top**: thicknesses = OrderedDict( { \"pml_bottom\":",
"(theta0, phi0, psi0), dim=3, degree=degree) grating = gy.Grating(geom, epsilon, mu, source=pw, degree=degree, periodic_map_tol=1e-8)",
"sum {T_ref['TM'][1]:.4f} {effs_TE['B']:.4f} \") ###################################################################### # We switch to TE polarization gratingTM =",
"geom.set_mesh_size(mesh_size) # geom.remove_all_duplicates() geom.build(interactive=0) # geom.build(interactive=1) ###################################################################### # Set the permittivity and permeabilities",
"grating.formulation.phasor # pp = gy.utils.project_iterative(E, Vplot) gy.dolfin.File(\"test.pvd\") << pp.real import os os.system(\"paraview test.pvd\")",
"and optical parameters: lambda0 = 0.5 dx = dy = 1 # 5",
"= 0.8125 - 5.2500j ############################################################################## # The thicknesses of the different layers are",
"(0, 0, 1), np.pi / 4) groove = geom.layers[\"groove\"] sub = geom.layers[\"substrate\"] sup",
"1 mesh_param = dict( { \"pml_bottom\": 1 * pmesh * eps_diel ** 0.5,",
"using a dictionary: epsilon = {d: 1 for d in geom.domains} mu =",
"= gy.utils.project_iterative(E, grating.formulation.real_function_space) # Vplot = gy.dolfin.FunctionSpace(geom.mesh,\"CG\",degree) # # E = grating.formulation.phasor #",
"gy.dolfin.File(\"test.pvd\") << pp.real # # import os # os.system(\"paraview test.pvd\") # xsx grating.solve()",
"# E = grating.solution[\"diffracted\"] # E = grating.solution[\"periodic\"] # E = grating.formulation.annex_field[\"as_subdomain\"][\"stack\"] pp",
"os.system(\"paraview test.pvd\") xs ### reference T_ref = dict(TM=[0.2070, 1.0001], TE=[0.8187, 1.0001]) T =",
"\"pml_bottom\": 1 * pmesh * eps_diel ** 0.5, \"substrate\": pmesh * eps_diel **",
"= 0.17486 T = 0.12860 T = 0.06196 T = 0.12860 T =",
"0.12860 T = 0.06196 T = 0.12860 T = 0.04308 fmm = {}",
"mesh_size = {d: lambda0 / param for d, param in mesh_param.items()} geom.set_mesh_size(mesh_size) #",
"are in nanometers here, and we first define some # geometrical and optical",
"0, 0), (0, 0, 1), np.pi / 4) groove = geom.layers[\"groove\"] sub =",
"and permeabilities for the various domains # using a dictionary: epsilon = {d:",
"= 0.12860 T = 0.06196 T = 0.12860 T = 0.04308 fmm =",
"ax[0].set_ylim(ylim) ax[0].set_axis_off() ax[0].set_title(\"$E_z$ (TM)\") gratingTM.plot_field(ax=ax[1]) gratingTM.plot_geometry(ax=ax[1]) ax[1].set_ylim(ylim) ax[1].set_axis_off() ax[1].set_title(\"$H_z$ (TE)\") fig.tight_layout() fig.show() ######################################################################",
"= 1 epsilon[\"substrate\"] = eps_diel ###################################################################### # Now we can create an instance",
"gy.Grating(geom, epsilon, mu, source=pw, degree=degree, periodic_map_tol=1e-8) # pp = gy.utils.project_iterative(pw.expression,grating.formulation.real_function_space) # gy.dolfin.File(\"test.pvd\") <<",
"of lengths are in nanometers here, and we first define some # geometrical",
"dict( { \"pml_bottom\": 1 * pmesh * eps_diel ** 0.5, \"substrate\": pmesh *",
"0.5 dx = dy = 1 # 5 * lambda0 * 2 **",
"mesh_param = dict( { \"pml_bottom\": 1 * pmesh * eps_diel ** 0.5, \"substrate\":",
"gy.utils.project_iterative(E, Vplot) gy.dolfin.File(\"test.pvd\") << pp.real import os os.system(\"paraview test.pvd\") xs ### reference T_ref",
"dx = dy = 1 # 5 * lambda0 * 2 ** 0.5",
"dy = 1 # 5 * lambda0 * 2 ** 0.5 / 4",
"0.04308 fmm = {} print(\"Transmission coefficient\") print(\" order ref calc\") print(\"--------------------------------\") print(f\" 0",
"geom.z_position[\"groove\"] # + h/10 # l_pillar = 0.9 * dx * 2 **",
"# E = grating.formulation.phasor # pp = gy.utils.project_iterative(E, Vplot) gy.dolfin.File(\"test.pvd\") << pp.real import",
"mu, source=pw, degree=degree, periodic_map_tol=1e-8) # pp = gy.utils.project_iterative(pw.expression,grating.formulation.real_function_space) # gy.dolfin.File(\"test.pvd\") << pp.real #",
"ax[1].set_ylim(ylim) ax[1].set_axis_off() ax[1].set_title(\"$H_z$ (TE)\") fig.tight_layout() fig.show() ###################################################################### # Results are in good agreement",
"1 for d in geom.domains} epsilon[\"groove\"] = eps_layer # epsilon[\"groove\"] = eps_diel epsilon[\"hole\"]",
"\"substrate\": pmesh * eps_diel ** 0.5, \"groove\": pmesh * abs(eps_layer) ** 0.5, \"hole\":",
"\") ###################################################################### # We switch to TE polarization gratingTM = gy.Grating(geom, epsilon, mu,",
"= dict( { \"pml_bottom\": 1 * pmesh * eps_diel ** 0.5, \"substrate\": pmesh",
"{effs_TE['B']:.4f} \") ###################################################################### # We switch to TE polarization gratingTM = gy.Grating(geom, epsilon,",
"{d: 1 for d in geom.domains} mu = {d: 1 for d in",
"# using a dictionary: epsilon = {d: 1 for d in geom.domains} mu",
"test.pvd\") # xsx grating.solve() effs = grating.diffraction_efficiencies(2, orders=True) print(effs) xssx E = grating.solution[\"total\"]",
"simulation class # :class:`~gyptis.Grating`, pw = gy.PlaneWave(lambda0, (theta0, phi0, psi0), dim=3, degree=degree) grating",
"gy.utils.project_iterative(pw.expression,grating.formulation.real_function_space) # gy.dolfin.File(\"test.pvd\") << pp.real # us = grating.formulation.annex_field[\"as_subdomain\"][\"stack\"] # pp = gy.utils.project_iterative(us,grating.formulation.real_function_space)",
"# Set the permittivity and permeabilities for the various domains # using a",
"pp.real import os os.system(\"paraview test.pvd\") xs ### reference T_ref = dict(TM=[0.2070, 1.0001], TE=[0.8187,",
"be able to have # ``parmesh`` cells per wavelength of the field inside",
"thicknesses = OrderedDict( { \"pml_bottom\": lambda0, \"substrate\": lambda0 / 1, \"groove\": 1 *",
"= grating.formulation.phasor # pp = gy.utils.project_iterative(E, Vplot) gy.dolfin.File(\"test.pvd\") << pp.real import os os.system(\"paraview",
"eps_layer # epsilon[\"groove\"] = eps_diel epsilon[\"hole\"] = 1 epsilon[\"substrate\"] = eps_diel ###################################################################### #",
"for d in geom.domains} epsilon[\"groove\"] = eps_layer # epsilon[\"groove\"] = eps_diel epsilon[\"hole\"] =",
"eps_diel ** 0.5, \"groove\": pmesh * abs(eps_layer) ** 0.5, \"hole\": pmesh_hole, \"superstrate\": pmesh,",
"geom.layers[\"substrate\"] sup = geom.layers[\"superstrate\"] sub, sup, hole, groove = geom.fragment([sub, sup, groove], hole)",
"d, param in mesh_param.items()} geom.set_mesh_size(mesh_size) # geom.remove_all_duplicates() geom.build(interactive=0) # geom.build(interactive=1) ###################################################################### # Set",
"* abs(eps_layer) ** 0.5, \"hole\": pmesh_hole, \"superstrate\": pmesh, \"pml_top\": 1 * pmesh, }",
"TE=[0.8187, 1.0001]) T = 0.04308 T = 0.12860 T = 0.06196 T =",
"orders=True) H = gratingTM.solution[\"total\"] ###################################################################### # Let's visualize the fields fig, ax =",
"h = 0.05 theta0 = 0 phi0 = 0 psi0 = gy.pi /",
"degree=degree, periodic_map_tol=1e-8) # pp = gy.utils.project_iterative(pw.expression,grating.formulation.real_function_space) # gy.dolfin.File(\"test.pvd\") << pp.real # us =",
"0.5, \"groove\": pmesh * abs(eps_layer) ** 0.5, \"hole\": pmesh_hole, \"superstrate\": pmesh, \"pml_top\": 1",
"d in geom.domains} mu = {d: 1 for d in geom.domains} epsilon[\"groove\"] =",
"geom.add_box(-l_pillar / 2, -l_pillar / 2, z0, l_pillar, l_pillar, h) # geom.rotate(pillar, (0,",
"source=pw, degree=degree, periodic_map_tol=1e-8) # pp = gy.utils.project_iterative(pw.expression,grating.formulation.real_function_space) # gy.dolfin.File(\"test.pvd\") << pp.real # us",
"for the various domains # using a dictionary: epsilon = {d: 1 for",
"diffraction grating. \"\"\" # sphinx_gallery_thumbnail_number = 2 from collections import OrderedDict import matplotlib.pyplot",
"0.06196 T = 0.12860 T = 0.17486 T = 0.12860 T = 0.06196",
"-l_pillar / 2, z0, l_pillar, l_pillar, h) # geom.rotate(pillar, (0, 0, 0), (0,",
"# ``parmesh`` cells per wavelength of the field inside each subdomain degree =",
"for d in geom.domains} mu = {d: 1 for d in geom.domains} epsilon[\"groove\"]",
"lambda0 * 2 ** 0.5 / 4 # periods of the grating h",
"# + h/10 # l_pillar = 0.9 * dx * 2 ** 0.5",
"2) ylim = geom.y_position[\"substrate\"], geom.y_position[\"pml_top\"] gratingTE.plot_field(ax=ax[0]) gratingTE.plot_geometry(ax=ax[0]) ax[0].set_ylim(ylim) ax[0].set_axis_off() ax[0].set_title(\"$E_z$ (TM)\") gratingTM.plot_field(ax=ax[1]) gratingTM.plot_geometry(ax=ax[1])",
"as gy gy.dolfin.parameters[\"form_compiler\"][\"quadrature_degree\"] = 5 # gy.dolfin.parameters[\"ghost_mode\"] = \"shared_facet\" gy.dolfin.set_log_level(7) ############################################################################## # Structure",
"cells per wavelength of the field inside each subdomain degree = 2 pmesh",
"pmesh * eps_diel ** 0.5, \"groove\": pmesh * abs(eps_layer) ** 0.5, \"hole\": pmesh_hole,",
"pp = gy.utils.project_iterative(E, Vplot) gy.dolfin.File(\"test.pvd\") << pp.real import os os.system(\"paraview test.pvd\") xs ###",
"grating. \"\"\" # sphinx_gallery_thumbnail_number = 2 from collections import OrderedDict import matplotlib.pyplot as",
"epsilon, mu, source=pw, polarization=\"TE\", degree=2) gratingTM.solve() effs_TM = gratingTM.diffraction_efficiencies(1, orders=True) H = gratingTM.solution[\"total\"]",
"4 # periods of the grating h = 0.05 theta0 = 0 phi0",
"= grating.solution[\"total\"] # E = grating.solution[\"diffracted\"] # E = grating.solution[\"periodic\"] # E =",
":cite:p:`Demesy2010`. # # The units of lengths are in nanometers here, and we",
"2 from collections import OrderedDict import matplotlib.pyplot as plt import numpy as np",
"= gy.utils.project_iterative(E, Vplot) gy.dolfin.File(\"test.pvd\") << pp.real import os os.system(\"paraview test.pvd\") xs ### reference",
"sup, groove], hole) geom.add_physical(hole, \"hole\") geom.add_physical(groove, \"groove\") geom.add_physical(sub, \"substrate\") geom.add_physical(sup, \"superstrate\") mesh_size =",
"geom.y_position[\"substrate\"], geom.y_position[\"pml_top\"] gratingTE.plot_field(ax=ax[0]) gratingTE.plot_geometry(ax=ax[0]) ax[0].set_ylim(ylim) ax[0].set_axis_off() ax[0].set_title(\"$E_z$ (TM)\") gratingTM.plot_field(ax=ax[1]) gratingTM.plot_geometry(ax=ax[1]) ax[1].set_ylim(ylim) ax[1].set_axis_off() ax[1].set_title(\"$H_z$",
"from collections import OrderedDict import matplotlib.pyplot as plt import numpy as np import",
"1), np.pi / 4) groove = geom.layers[\"groove\"] sub = geom.layers[\"substrate\"] sup = geom.layers[\"superstrate\"]",
"= 0.06196 T = 0.12860 T = 0.04308 fmm = {} print(\"Transmission coefficient\")",
"polarization=\"TE\", degree=2) gratingTM.solve() effs_TM = gratingTM.diffraction_efficiencies(1, orders=True) H = gratingTM.solution[\"total\"] ###################################################################### # Let's",
"dx * 2 ** 0.5 / 2 R_hole = 0.25 hole = geom.add_cylinder(0,",
"E = grating.solution[\"diffracted\"] # E = grating.solution[\"periodic\"] # E = grating.formulation.annex_field[\"as_subdomain\"][\"stack\"] pp =",
"with the reference print(\"Transmission coefficient\") print(\" order ref calc\") print(\"--------------------------------\") print(f\" 0 {T_ref['TE'][0]:.4f}",
"# # gy.dolfin.File(\"test.pvd\") << pp.real # # import os # os.system(\"paraview test.pvd\") #",
"import OrderedDict import matplotlib.pyplot as plt import numpy as np import gyptis as",
"using the :class:`~gyptis.Layered` # class: geom = gy.Layered(3, (dx, dy), thicknesses) z0 =",
"E = grating.solution[\"periodic\"] # E = grating.formulation.annex_field[\"as_subdomain\"][\"stack\"] pp = gy.utils.project_iterative(E, grating.formulation.real_function_space) # Vplot",
"= 3 pmesh_hole = pmesh * 1 mesh_param = dict( { \"pml_bottom\": 1",
"the permittivity and permeabilities for the various domains # using a dictionary: epsilon",
"= dict(TM=[0.2070, 1.0001], TE=[0.8187, 1.0001]) T = 0.04308 T = 0.12860 T =",
"can create an instance of the simulation class # :class:`~gyptis.Grating`, pw = gy.PlaneWave(lambda0,",
"degree = 2 pmesh = 3 pmesh_hole = pmesh * 1 mesh_param =",
"an instance of the simulation class # :class:`~gyptis.Grating`, pw = gy.PlaneWave(lambda0, (theta0, phi0,",
"** 0.5, \"hole\": pmesh_hole, \"superstrate\": pmesh, \"pml_top\": 1 * pmesh, } ) ##############################################################################",
"* pmesh, } ) ############################################################################## # Let's create the geometry using the :class:`~gyptis.Layered`",
"* 2 ** 0.5 / 4 # periods of the grating h =",
"Now we can create an instance of the simulation class # :class:`~gyptis.Grating`, pw",
"geom.layers[\"superstrate\"] sub, sup, hole, groove = geom.fragment([sub, sup, groove], hole) geom.add_physical(hole, \"hole\") geom.add_physical(groove,",
"= eps_diel ###################################################################### # Now we can create an instance of the simulation",
"# us = grating.formulation.annex_field[\"as_subdomain\"][\"stack\"] # pp = gy.utils.project_iterative(us,grating.formulation.real_function_space) # # gy.dolfin.File(\"test.pvd\") << pp.real",
"import numpy as np import gyptis as gy gy.dolfin.parameters[\"form_compiler\"][\"quadrature_degree\"] = 5 # gy.dolfin.parameters[\"ghost_mode\"]",
"pmesh * eps_diel ** 0.5, \"substrate\": pmesh * eps_diel ** 0.5, \"groove\": pmesh",
"optical parameters: lambda0 = 0.5 dx = dy = 1 # 5 *",
"phi0, psi0), dim=3, degree=degree) grating = gy.Grating(geom, epsilon, mu, source=pw, degree=degree, periodic_map_tol=1e-8) #",
"Let's visualize the fields fig, ax = plt.subplots(1, 2) ylim = geom.y_position[\"substrate\"], geom.y_position[\"pml_top\"]",
"# # import os # os.system(\"paraview test.pvd\") # xsx grating.solve() effs = grating.diffraction_efficiencies(2,",
"(0, 0, 0), (0, 0, 1), np.pi / 4) groove = geom.layers[\"groove\"] sub",
"* lambda0 * 2 ** 0.5 / 4 # periods of the grating",
"2, z0, l_pillar, l_pillar, h) # geom.rotate(pillar, (0, 0, 0), (0, 0, 1),",
"test.pvd\") xs ### reference T_ref = dict(TM=[0.2070, 1.0001], TE=[0.8187, 1.0001]) T = 0.04308",
"5 * lambda0 * 2 ** 0.5 / 4 # periods of the",
"** 0.5 / 4 # periods of the grating h = 0.05 theta0",
"grating.solution[\"periodic\"] # E = grating.formulation.annex_field[\"as_subdomain\"][\"stack\"] pp = gy.utils.project_iterative(E, grating.formulation.real_function_space) # Vplot = gy.dolfin.FunctionSpace(geom.mesh,\"CG\",degree)",
"The thicknesses of the different layers are specified with an # ``OrderedDict`` object",
"0.06196 T = 0.12860 T = 0.04308 fmm = {} print(\"Transmission coefficient\") print(\"",
"instance of the simulation class # :class:`~gyptis.Grating`, pw = gy.PlaneWave(lambda0, (theta0, phi0, psi0),",
"os # os.system(\"paraview test.pvd\") # xsx grating.solve() effs = grating.diffraction_efficiencies(2, orders=True) print(effs) xssx",
"0.17486 T = 0.12860 T = 0.06196 T = 0.12860 T = 0.04308",
"2 ** 0.5 / 4 # periods of the grating h = 0.05",
"hole) geom.add_physical(hole, \"hole\") geom.add_physical(groove, \"groove\") geom.add_physical(sub, \"substrate\") geom.add_physical(sup, \"superstrate\") mesh_size = {d: lambda0",
"coefficient\") print(\" order ref calc\") print(\"--------------------------------\") print(f\" 0 {T_ref['TE'][0]:.4f} {effs_TM['T'][1]:.4f} \") print(f\" sum",
"nanometers here, and we first define some # geometrical and optical parameters: lambda0",
"# geom.rotate(pillar, (0, 0, 0), (0, 0, 1), np.pi / 4) groove =",
"theta0 = 0 phi0 = 0 psi0 = gy.pi / 4 eps_diel =",
"inside each subdomain degree = 2 pmesh = 3 pmesh_hole = pmesh *",
"############################################################################## # Structure is the same as in :cite:p:`Demesy2010`. # # The units",
"+ h/10 # l_pillar = 0.9 * dx * 2 ** 0.5 /",
"# Let's create the geometry using the :class:`~gyptis.Layered` # class: geom = gy.Layered(3,",
"param for d, param in mesh_param.items()} geom.set_mesh_size(mesh_size) # geom.remove_all_duplicates() geom.build(interactive=0) # geom.build(interactive=1) ######################################################################",
"/ 2, -l_pillar / 2, z0, l_pillar, l_pillar, h) # geom.rotate(pillar, (0, 0,",
"abs(eps_layer) ** 0.5, \"hole\": pmesh_hole, \"superstrate\": pmesh, \"pml_top\": 1 * pmesh, } )",
"T = 0.17486 T = 0.12860 T = 0.06196 T = 0.12860 T",
"gy.utils.project_iterative(us,grating.formulation.real_function_space) # # gy.dolfin.File(\"test.pvd\") << pp.real # # import os # os.system(\"paraview test.pvd\")",
"z0 = geom.z_position[\"groove\"] # + h/10 # l_pillar = 0.9 * dx *",
"Results are in good agreement with the reference print(\"Transmission coefficient\") print(\" order ref",
"h, R_hole) # pillar = geom.add_box(-l_pillar / 2, -l_pillar / 2, z0, l_pillar,",
"1 epsilon[\"substrate\"] = eps_diel ###################################################################### # Now we can create an instance of",
"reference print(\"Transmission coefficient\") print(\" order ref calc\") print(\"--------------------------------\") print(f\" 0 {T_ref['TE'][0]:.4f} {effs_TM['T'][1]:.4f} \")",
"periodic_map_tol=1e-8) # pp = gy.utils.project_iterative(pw.expression,grating.formulation.real_function_space) # gy.dolfin.File(\"test.pvd\") << pp.real # us = grating.formulation.annex_field[\"as_subdomain\"][\"stack\"]",
"1 * h, \"superstrate\": lambda0 / 1, \"pml_top\": lambda0, } ) ############################################################################## #",
"Checkerboard Grating ======================= Example of a dielectric bi-periodic diffraction grating. \"\"\" # sphinx_gallery_thumbnail_number",
"= 5 # gy.dolfin.parameters[\"ghost_mode\"] = \"shared_facet\" gy.dolfin.set_log_level(7) ############################################################################## # Structure is the same",
"0, h, R_hole) # pillar = geom.add_box(-l_pillar / 2, -l_pillar / 2, z0,",
"\"substrate\") geom.add_physical(sup, \"superstrate\") mesh_size = {d: lambda0 / param for d, param in",
"fields fig, ax = plt.subplots(1, 2) ylim = geom.y_position[\"substrate\"], geom.y_position[\"pml_top\"] gratingTE.plot_field(ax=ax[0]) gratingTE.plot_geometry(ax=ax[0]) ax[0].set_ylim(ylim)",
"in mesh_param.items()} geom.set_mesh_size(mesh_size) # geom.remove_all_duplicates() geom.build(interactive=0) # geom.build(interactive=1) ###################################################################### # Set the permittivity",
"###################################################################### # Let's visualize the fields fig, ax = plt.subplots(1, 2) ylim =",
"= gy.dolfin.FunctionSpace(geom.mesh,\"CG\",degree) # # E = grating.formulation.phasor # pp = gy.utils.project_iterative(E, Vplot) gy.dolfin.File(\"test.pvd\")",
"lengths are in nanometers here, and we first define some # geometrical and",
"\") print(f\" sum {T_ref['TM'][1]:.4f} {effs_TE['B']:.4f} \") ###################################################################### # We switch to TE polarization",
"bottom to top**: thicknesses = OrderedDict( { \"pml_bottom\": lambda0, \"substrate\": lambda0 / 1,",
"are specified with an # ``OrderedDict`` object **from bottom to top**: thicknesses =",
"the fields fig, ax = plt.subplots(1, 2) ylim = geom.y_position[\"substrate\"], geom.y_position[\"pml_top\"] gratingTE.plot_field(ax=ax[0]) gratingTE.plot_geometry(ax=ax[0])",
"5.2500j ############################################################################## # The thicknesses of the different layers are specified with an",
"0.5, \"substrate\": pmesh * eps_diel ** 0.5, \"groove\": pmesh * abs(eps_layer) ** 0.5,",
"= {d: lambda0 / param for d, param in mesh_param.items()} geom.set_mesh_size(mesh_size) # geom.remove_all_duplicates()",
"utf-8 -*- \"\"\" 3D Checkerboard Grating ======================= Example of a dielectric bi-periodic diffraction",
"effs = grating.diffraction_efficiencies(2, orders=True) print(effs) xssx E = grating.solution[\"total\"] # E = grating.solution[\"diffracted\"]",
"pmesh = 3 pmesh_hole = pmesh * 1 mesh_param = dict( { \"pml_bottom\":",
"eps_diel epsilon[\"hole\"] = 1 epsilon[\"substrate\"] = eps_diel ###################################################################### # Now we can create",
"xs ### reference T_ref = dict(TM=[0.2070, 1.0001], TE=[0.8187, 1.0001]) T = 0.04308 T",
"pmesh, } ) ############################################################################## # Let's create the geometry using the :class:`~gyptis.Layered` #",
"first define some # geometrical and optical parameters: lambda0 = 0.5 dx =",
":class:`~gyptis.Layered` # class: geom = gy.Layered(3, (dx, dy), thicknesses) z0 = geom.z_position[\"groove\"] #",
"= 2 from collections import OrderedDict import matplotlib.pyplot as plt import numpy as",
"the different layers are specified with an # ``OrderedDict`` object **from bottom to",
"# -*- coding: utf-8 -*- \"\"\" 3D Checkerboard Grating ======================= Example of a",
"as plt import numpy as np import gyptis as gy gy.dolfin.parameters[\"form_compiler\"][\"quadrature_degree\"] = 5",
"of a dielectric bi-periodic diffraction grating. \"\"\" # sphinx_gallery_thumbnail_number = 2 from collections",
"# # E = grating.formulation.phasor # pp = gy.utils.project_iterative(E, Vplot) gy.dolfin.File(\"test.pvd\") << pp.real",
"** 0.5, \"substrate\": pmesh * eps_diel ** 0.5, \"groove\": pmesh * abs(eps_layer) **",
"= gratingTM.solution[\"total\"] ###################################################################### # Let's visualize the fields fig, ax = plt.subplots(1, 2)",
"h) # geom.rotate(pillar, (0, 0, 0), (0, 0, 1), np.pi / 4) groove",
"order to be able to have # ``parmesh`` cells per wavelength of the",
"z0, l_pillar, l_pillar, h) # geom.rotate(pillar, (0, 0, 0), (0, 0, 1), np.pi",
"grating.formulation.real_function_space) # Vplot = gy.dolfin.FunctionSpace(geom.mesh,\"CG\",degree) # # E = grating.formulation.phasor # pp =",
"\"\"\" 3D Checkerboard Grating ======================= Example of a dielectric bi-periodic diffraction grating. \"\"\"",
"in good agreement with the reference print(\"Transmission coefficient\") print(\" order ref calc\") print(\"--------------------------------\")",
"= geom.add_cylinder(0, 0, z0, 0, 0, h, R_hole) # pillar = geom.add_box(-l_pillar /",
"same as in :cite:p:`Demesy2010`. # # The units of lengths are in nanometers",
"mu, source=pw, polarization=\"TE\", degree=2) gratingTM.solve() effs_TM = gratingTM.diffraction_efficiencies(1, orders=True) H = gratingTM.solution[\"total\"] ######################################################################",
"= 0 phi0 = 0 psi0 = gy.pi / 4 eps_diel = 2.25",
"dictionary: epsilon = {d: 1 for d in geom.domains} mu = {d: 1",
"matplotlib.pyplot as plt import numpy as np import gyptis as gy gy.dolfin.parameters[\"form_compiler\"][\"quadrature_degree\"] =",
"np import gyptis as gy gy.dolfin.parameters[\"form_compiler\"][\"quadrature_degree\"] = 5 # gy.dolfin.parameters[\"ghost_mode\"] = \"shared_facet\" gy.dolfin.set_log_level(7)",
"gyptis as gy gy.dolfin.parameters[\"form_compiler\"][\"quadrature_degree\"] = 5 # gy.dolfin.parameters[\"ghost_mode\"] = \"shared_facet\" gy.dolfin.set_log_level(7) ############################################################################## #",
"\"groove\": pmesh * abs(eps_layer) ** 0.5, \"hole\": pmesh_hole, \"superstrate\": pmesh, \"pml_top\": 1 *",
"T = 0.12860 T = 0.06196 T = 0.12860 T = 0.17486 T",
"# pp = gy.utils.project_iterative(us,grating.formulation.real_function_space) # # gy.dolfin.File(\"test.pvd\") << pp.real # # import os",
"numpy as np import gyptis as gy gy.dolfin.parameters[\"form_compiler\"][\"quadrature_degree\"] = 5 # gy.dolfin.parameters[\"ghost_mode\"] =",
"/ 4 # periods of the grating h = 0.05 theta0 = 0",
"sub, sup, hole, groove = geom.fragment([sub, sup, groove], hole) geom.add_physical(hole, \"hole\") geom.add_physical(groove, \"groove\")",
"= 0.9 * dx * 2 ** 0.5 / 2 R_hole = 0.25",
"= plt.subplots(1, 2) ylim = geom.y_position[\"substrate\"], geom.y_position[\"pml_top\"] gratingTE.plot_field(ax=ax[0]) gratingTE.plot_geometry(ax=ax[0]) ax[0].set_ylim(ylim) ax[0].set_axis_off() ax[0].set_title(\"$E_z$ (TM)\")",
"xsx grating.solve() effs = grating.diffraction_efficiencies(2, orders=True) print(effs) xssx E = grating.solution[\"total\"] # E",
"0.12860 T = 0.04308 fmm = {} print(\"Transmission coefficient\") print(\" order ref calc\")",
"gy.dolfin.File(\"test.pvd\") << pp.real # us = grating.formulation.annex_field[\"as_subdomain\"][\"stack\"] # pp = gy.utils.project_iterative(us,grating.formulation.real_function_space) # #",
"1, \"pml_top\": lambda0, } ) ############################################################################## # Here we set the mesh refinement",
"Here we set the mesh refinement parameters, in order to be able to",
"the geometry using the :class:`~gyptis.Layered` # class: geom = gy.Layered(3, (dx, dy), thicknesses)",
"degree=degree) grating = gy.Grating(geom, epsilon, mu, source=pw, degree=degree, periodic_map_tol=1e-8) # pp = gy.utils.project_iterative(pw.expression,grating.formulation.real_function_space)",
"###################################################################### # Results are in good agreement with the reference print(\"Transmission coefficient\") print(\"",
"R_hole) # pillar = geom.add_box(-l_pillar / 2, -l_pillar / 2, z0, l_pillar, l_pillar,",
"of the different layers are specified with an # ``OrderedDict`` object **from bottom",
"T = 0.04308 fmm = {} print(\"Transmission coefficient\") print(\" order ref calc\") print(\"--------------------------------\")",
") ############################################################################## # Let's create the geometry using the :class:`~gyptis.Layered` # class: geom",
"gratingTE.plot_geometry(ax=ax[0]) ax[0].set_ylim(ylim) ax[0].set_axis_off() ax[0].set_title(\"$E_z$ (TM)\") gratingTM.plot_field(ax=ax[1]) gratingTM.plot_geometry(ax=ax[1]) ax[1].set_ylim(ylim) ax[1].set_axis_off() ax[1].set_title(\"$H_z$ (TE)\") fig.tight_layout() fig.show()",
"geom.add_cylinder(0, 0, z0, 0, 0, h, R_hole) # pillar = geom.add_box(-l_pillar / 2,",
"= geom.layers[\"substrate\"] sup = geom.layers[\"superstrate\"] sub, sup, hole, groove = geom.fragment([sub, sup, groove],",
"fmm = {} print(\"Transmission coefficient\") print(\" order ref calc\") print(\"--------------------------------\") print(f\" 0 {T_ref['TM'][0]:.4f}",
"units of lengths are in nanometers here, and we first define some #",
"order ref calc\") print(\"--------------------------------\") print(f\" 0 {T_ref['TE'][0]:.4f} {effs_TM['T'][1]:.4f} \") print(f\" sum {T_ref['TE'][1]:.4f} {effs_TM['B']:.4f}",
"l_pillar, l_pillar, h) # geom.rotate(pillar, (0, 0, 0), (0, 0, 1), np.pi /",
"gy.pi / 4 eps_diel = 2.25 eps_layer = 0.8125 - 5.2500j ############################################################################## #",
"# geometrical and optical parameters: lambda0 = 0.5 dx = dy = 1",
"geom.add_physical(hole, \"hole\") geom.add_physical(groove, \"groove\") geom.add_physical(sub, \"substrate\") geom.add_physical(sup, \"superstrate\") mesh_size = {d: lambda0 /",
"various domains # using a dictionary: epsilon = {d: 1 for d in",
"calc\") print(\"--------------------------------\") print(f\" 0 {T_ref['TM'][0]:.4f} {effs_TE['T'][1]:.4f} \") print(f\" sum {T_ref['TM'][1]:.4f} {effs_TE['B']:.4f} \") ######################################################################",
"we set the mesh refinement parameters, in order to be able to have",
"T = 0.12860 T = 0.17486 T = 0.12860 T = 0.06196 T",
"Structure is the same as in :cite:p:`Demesy2010`. # # The units of lengths",
"# E = grating.solution[\"periodic\"] # E = grating.formulation.annex_field[\"as_subdomain\"][\"stack\"] pp = gy.utils.project_iterative(E, grating.formulation.real_function_space) #",
"0.5, \"hole\": pmesh_hole, \"superstrate\": pmesh, \"pml_top\": 1 * pmesh, } ) ############################################################################## #",
"= 0.06196 T = 0.12860 T = 0.17486 T = 0.12860 T =",
"= {} print(\"Transmission coefficient\") print(\" order ref calc\") print(\"--------------------------------\") print(f\" 0 {T_ref['TM'][0]:.4f} {effs_TE['T'][1]:.4f}",
"grating.solution[\"total\"] # E = grating.solution[\"diffracted\"] # E = grating.solution[\"periodic\"] # E = grating.formulation.annex_field[\"as_subdomain\"][\"stack\"]",
"1.0001]) T = 0.04308 T = 0.12860 T = 0.06196 T = 0.12860",
"switch to TE polarization gratingTM = gy.Grating(geom, epsilon, mu, source=pw, polarization=\"TE\", degree=2) gratingTM.solve()",
"in nanometers here, and we first define some # geometrical and optical parameters:",
"= dy = 1 # 5 * lambda0 * 2 ** 0.5 /",
"0.5 / 2 R_hole = 0.25 hole = geom.add_cylinder(0, 0, z0, 0, 0,",
"in geom.domains} epsilon[\"groove\"] = eps_layer # epsilon[\"groove\"] = eps_diel epsilon[\"hole\"] = 1 epsilon[\"substrate\"]",
"pp = gy.utils.project_iterative(pw.expression,grating.formulation.real_function_space) # gy.dolfin.File(\"test.pvd\") << pp.real # us = grating.formulation.annex_field[\"as_subdomain\"][\"stack\"] # pp",
"1 # 5 * lambda0 * 2 ** 0.5 / 4 # periods",
"{} print(\"Transmission coefficient\") print(\" order ref calc\") print(\"--------------------------------\") print(f\" 0 {T_ref['TM'][0]:.4f} {effs_TE['T'][1]:.4f} \")",
"of the simulation class # :class:`~gyptis.Grating`, pw = gy.PlaneWave(lambda0, (theta0, phi0, psi0), dim=3,",
"- 5.2500j ############################################################################## # The thicknesses of the different layers are specified with",
"print(f\" sum {T_ref['TM'][1]:.4f} {effs_TE['B']:.4f} \") ###################################################################### # We switch to TE polarization gratingTM",
"epsilon, mu, source=pw, degree=degree, periodic_map_tol=1e-8) # pp = gy.utils.project_iterative(pw.expression,grating.formulation.real_function_space) # gy.dolfin.File(\"test.pvd\") << pp.real",
"0.04308 T = 0.12860 T = 0.06196 T = 0.12860 T = 0.17486",
"0, 1), np.pi / 4) groove = geom.layers[\"groove\"] sub = geom.layers[\"substrate\"] sup =",
"gy.dolfin.parameters[\"ghost_mode\"] = \"shared_facet\" gy.dolfin.set_log_level(7) ############################################################################## # Structure is the same as in :cite:p:`Demesy2010`.",
"E = grating.formulation.annex_field[\"as_subdomain\"][\"stack\"] pp = gy.utils.project_iterative(E, grating.formulation.real_function_space) # Vplot = gy.dolfin.FunctionSpace(geom.mesh,\"CG\",degree) # #",
"{T_ref['TM'][0]:.4f} {effs_TE['T'][1]:.4f} \") print(f\" sum {T_ref['TM'][1]:.4f} {effs_TE['B']:.4f} \") ###################################################################### # We switch to",
"dict(TM=[0.2070, 1.0001], TE=[0.8187, 1.0001]) T = 0.04308 T = 0.12860 T = 0.06196",
"\"hole\": pmesh_hole, \"superstrate\": pmesh, \"pml_top\": 1 * pmesh, } ) ############################################################################## # Let's",
"source=pw, polarization=\"TE\", degree=2) gratingTM.solve() effs_TM = gratingTM.diffraction_efficiencies(1, orders=True) H = gratingTM.solution[\"total\"] ###################################################################### #",
"# ``OrderedDict`` object **from bottom to top**: thicknesses = OrderedDict( { \"pml_bottom\": lambda0,",
"have # ``parmesh`` cells per wavelength of the field inside each subdomain degree",
"effs_TM = gratingTM.diffraction_efficiencies(1, orders=True) H = gratingTM.solution[\"total\"] ###################################################################### # Let's visualize the fields",
"{ \"pml_bottom\": lambda0, \"substrate\": lambda0 / 1, \"groove\": 1 * h, \"superstrate\": lambda0",
"geom.fragment([sub, sup, groove], hole) geom.add_physical(hole, \"hole\") geom.add_physical(groove, \"groove\") geom.add_physical(sub, \"substrate\") geom.add_physical(sup, \"superstrate\") mesh_size",
"grating h = 0.05 theta0 = 0 phi0 = 0 psi0 = gy.pi",
"us = grating.formulation.annex_field[\"as_subdomain\"][\"stack\"] # pp = gy.utils.project_iterative(us,grating.formulation.real_function_space) # # gy.dolfin.File(\"test.pvd\") << pp.real #",
"/ 2 R_hole = 0.25 hole = geom.add_cylinder(0, 0, z0, 0, 0, h,",
"# periods of the grating h = 0.05 theta0 = 0 phi0 =",
"T = 0.06196 T = 0.12860 T = 0.17486 T = 0.12860 T",
"lambda0 = 0.5 dx = dy = 1 # 5 * lambda0 *",
"class: geom = gy.Layered(3, (dx, dy), thicknesses) z0 = geom.z_position[\"groove\"] # + h/10",
"= OrderedDict( { \"pml_bottom\": lambda0, \"substrate\": lambda0 / 1, \"groove\": 1 * h,",
"pp = gy.utils.project_iterative(E, grating.formulation.real_function_space) # Vplot = gy.dolfin.FunctionSpace(geom.mesh,\"CG\",degree) # # E = grating.formulation.phasor",
"pmesh_hole = pmesh * 1 mesh_param = dict( { \"pml_bottom\": 1 * pmesh",
"geom.build(interactive=1) ###################################################################### # Set the permittivity and permeabilities for the various domains #",
"0 phi0 = 0 psi0 = gy.pi / 4 eps_diel = 2.25 eps_layer",
"order ref calc\") print(\"--------------------------------\") print(f\" 0 {T_ref['TM'][0]:.4f} {effs_TE['T'][1]:.4f} \") print(f\" sum {T_ref['TM'][1]:.4f} {effs_TE['B']:.4f}",
"0.05 theta0 = 0 phi0 = 0 psi0 = gy.pi / 4 eps_diel",
"1.0001], TE=[0.8187, 1.0001]) T = 0.04308 T = 0.12860 T = 0.06196 T",
"/ 1, \"pml_top\": lambda0, } ) ############################################################################## # Here we set the mesh",
"per wavelength of the field inside each subdomain degree = 2 pmesh =",
"epsilon = {d: 1 for d in geom.domains} mu = {d: 1 for",
"refinement parameters, in order to be able to have # ``parmesh`` cells per",
"# Let's visualize the fields fig, ax = plt.subplots(1, 2) ylim = geom.y_position[\"substrate\"],",
"/ 4 eps_diel = 2.25 eps_layer = 0.8125 - 5.2500j ############################################################################## # The",
"create an instance of the simulation class # :class:`~gyptis.Grating`, pw = gy.PlaneWave(lambda0, (theta0,",
"# Vplot = gy.dolfin.FunctionSpace(geom.mesh,\"CG\",degree) # # E = grating.formulation.phasor # pp = gy.utils.project_iterative(E,",
"### reference T_ref = dict(TM=[0.2070, 1.0001], TE=[0.8187, 1.0001]) T = 0.04308 T =",
"2 pmesh = 3 pmesh_hole = pmesh * 1 mesh_param = dict( {",
"fig.tight_layout() fig.show() ###################################################################### # Results are in good agreement with the reference print(\"Transmission",
"import os os.system(\"paraview test.pvd\") xs ### reference T_ref = dict(TM=[0.2070, 1.0001], TE=[0.8187, 1.0001])",
"**from bottom to top**: thicknesses = OrderedDict( { \"pml_bottom\": lambda0, \"substrate\": lambda0 /",
"= geom.add_box(-l_pillar / 2, -l_pillar / 2, z0, l_pillar, l_pillar, h) # geom.rotate(pillar,",
"\"pml_bottom\": lambda0, \"substrate\": lambda0 / 1, \"groove\": 1 * h, \"superstrate\": lambda0 /",
"param in mesh_param.items()} geom.set_mesh_size(mesh_size) # geom.remove_all_duplicates() geom.build(interactive=0) # geom.build(interactive=1) ###################################################################### # Set the",
"1 * pmesh * eps_diel ** 0.5, \"substrate\": pmesh * eps_diel ** 0.5,",
"field inside each subdomain degree = 2 pmesh = 3 pmesh_hole = pmesh",
"and we first define some # geometrical and optical parameters: lambda0 = 0.5",
"polarization gratingTM = gy.Grating(geom, epsilon, mu, source=pw, polarization=\"TE\", degree=2) gratingTM.solve() effs_TM = gratingTM.diffraction_efficiencies(1,",
"# xsx grating.solve() effs = grating.diffraction_efficiencies(2, orders=True) print(effs) xssx E = grating.solution[\"total\"] #",
"np.pi / 4) groove = geom.layers[\"groove\"] sub = geom.layers[\"substrate\"] sup = geom.layers[\"superstrate\"] sub,",
"# gy.dolfin.File(\"test.pvd\") << pp.real # us = grating.formulation.annex_field[\"as_subdomain\"][\"stack\"] # pp = gy.utils.project_iterative(us,grating.formulation.real_function_space) #",
"print(\"--------------------------------\") print(f\" 0 {T_ref['TM'][0]:.4f} {effs_TE['T'][1]:.4f} \") print(f\" sum {T_ref['TM'][1]:.4f} {effs_TE['B']:.4f} \") ###################################################################### #",
"ax[1].set_title(\"$H_z$ (TE)\") fig.tight_layout() fig.show() ###################################################################### # Results are in good agreement with the",
"T_ref = dict(TM=[0.2070, 1.0001], TE=[0.8187, 1.0001]) T = 0.04308 T = 0.12860 T",
"pmesh, \"pml_top\": 1 * pmesh, } ) ############################################################################## # Let's create the geometry",
"psi0), dim=3, degree=degree) grating = gy.Grating(geom, epsilon, mu, source=pw, degree=degree, periodic_map_tol=1e-8) # pp",
"# Results are in good agreement with the reference print(\"Transmission coefficient\") print(\" order",
"Example of a dielectric bi-periodic diffraction grating. \"\"\" # sphinx_gallery_thumbnail_number = 2 from",
"wavelength of the field inside each subdomain degree = 2 pmesh = 3",
"############################################################################## # The thicknesses of the different layers are specified with an #",
"= 0.25 hole = geom.add_cylinder(0, 0, z0, 0, 0, h, R_hole) # pillar",
"\"pml_top\": lambda0, } ) ############################################################################## # Here we set the mesh refinement parameters,",
"with an # ``OrderedDict`` object **from bottom to top**: thicknesses = OrderedDict( {",
"gy.dolfin.set_log_level(7) ############################################################################## # Structure is the same as in :cite:p:`Demesy2010`. # # The",
"3D Checkerboard Grating ======================= Example of a dielectric bi-periodic diffraction grating. \"\"\" #",
"0), (0, 0, 1), np.pi / 4) groove = geom.layers[\"groove\"] sub = geom.layers[\"substrate\"]",
"= 0.12860 T = 0.06196 T = 0.12860 T = 0.17486 T =",
"OrderedDict( { \"pml_bottom\": lambda0, \"substrate\": lambda0 / 1, \"groove\": 1 * h, \"superstrate\":",
"pmesh * abs(eps_layer) ** 0.5, \"hole\": pmesh_hole, \"superstrate\": pmesh, \"pml_top\": 1 * pmesh,",
"phi0 = 0 psi0 = gy.pi / 4 eps_diel = 2.25 eps_layer =",
"= geom.layers[\"superstrate\"] sub, sup, hole, groove = geom.fragment([sub, sup, groove], hole) geom.add_physical(hole, \"hole\")",
":class:`~gyptis.Grating`, pw = gy.PlaneWave(lambda0, (theta0, phi0, psi0), dim=3, degree=degree) grating = gy.Grating(geom, epsilon,",
"# :class:`~gyptis.Grating`, pw = gy.PlaneWave(lambda0, (theta0, phi0, psi0), dim=3, degree=degree) grating = gy.Grating(geom,",
"-*- coding: utf-8 -*- \"\"\" 3D Checkerboard Grating ======================= Example of a dielectric",
"grating.formulation.annex_field[\"as_subdomain\"][\"stack\"] # pp = gy.utils.project_iterative(us,grating.formulation.real_function_space) # # gy.dolfin.File(\"test.pvd\") << pp.real # # import",
"plt import numpy as np import gyptis as gy gy.dolfin.parameters[\"form_compiler\"][\"quadrature_degree\"] = 5 #",
"= grating.formulation.annex_field[\"as_subdomain\"][\"stack\"] pp = gy.utils.project_iterative(E, grating.formulation.real_function_space) # Vplot = gy.dolfin.FunctionSpace(geom.mesh,\"CG\",degree) # # E",
"= 0.12860 T = 0.04308 fmm = {} print(\"Transmission coefficient\") print(\" order ref",
"# E = grating.formulation.annex_field[\"as_subdomain\"][\"stack\"] pp = gy.utils.project_iterative(E, grating.formulation.real_function_space) # Vplot = gy.dolfin.FunctionSpace(geom.mesh,\"CG\",degree) #",
"to TE polarization gratingTM = gy.Grating(geom, epsilon, mu, source=pw, polarization=\"TE\", degree=2) gratingTM.solve() effs_TM",
"grating.diffraction_efficiencies(2, orders=True) print(effs) xssx E = grating.solution[\"total\"] # E = grating.solution[\"diffracted\"] # E",
"= gy.Grating(geom, epsilon, mu, source=pw, degree=degree, periodic_map_tol=1e-8) # pp = gy.utils.project_iterative(pw.expression,grating.formulation.real_function_space) # gy.dolfin.File(\"test.pvd\")",
") ############################################################################## # Here we set the mesh refinement parameters, in order to",
"{d: 1 for d in geom.domains} epsilon[\"groove\"] = eps_layer # epsilon[\"groove\"] = eps_diel",
"{T_ref['TM'][1]:.4f} {effs_TE['B']:.4f} \") ###################################################################### # We switch to TE polarization gratingTM = gy.Grating(geom,",
"############################################################################## # Let's create the geometry using the :class:`~gyptis.Layered` # class: geom =",
"class # :class:`~gyptis.Grating`, pw = gy.PlaneWave(lambda0, (theta0, phi0, psi0), dim=3, degree=degree) grating =",
"= \"shared_facet\" gy.dolfin.set_log_level(7) ############################################################################## # Structure is the same as in :cite:p:`Demesy2010`. #",
"= 0.5 dx = dy = 1 # 5 * lambda0 * 2",
"= geom.layers[\"groove\"] sub = geom.layers[\"substrate\"] sup = geom.layers[\"superstrate\"] sub, sup, hole, groove =",
"# Here we set the mesh refinement parameters, in order to be able",
"= {d: 1 for d in geom.domains} epsilon[\"groove\"] = eps_layer # epsilon[\"groove\"] =",
"geom.add_physical(groove, \"groove\") geom.add_physical(sub, \"substrate\") geom.add_physical(sup, \"superstrate\") mesh_size = {d: lambda0 / param for",
"/ 4) groove = geom.layers[\"groove\"] sub = geom.layers[\"substrate\"] sup = geom.layers[\"superstrate\"] sub, sup,",
"we first define some # geometrical and optical parameters: lambda0 = 0.5 dx",
"geom.rotate(pillar, (0, 0, 0), (0, 0, 1), np.pi / 4) groove = geom.layers[\"groove\"]",
"\"groove\") geom.add_physical(sub, \"substrate\") geom.add_physical(sup, \"superstrate\") mesh_size = {d: lambda0 / param for d,",
"lambda0, } ) ############################################################################## # Here we set the mesh refinement parameters, in"
] |
[
"as exc: _LOGGER.error(exc.message) def main(): \"\"\"Execute from command line.\"\"\" call() if __name__ ==",
"v in vehicles: if not v.wake_up(): _LOGGER.error(\"Unable to wake up vehicle\") continue v.update()",
"= TeslaApiClient(\"<EMAIL>\",\"T1aB2stsp!ez142215\") vehicles = api.vehicles for v in vehicles: if not v.wake_up(): _LOGGER.error(\"Unable",
"_LOGGER.error(\"Unable to wake up vehicle\") continue v.update() _LOGGER.info(v.drive.attributes) _LOGGER.info(v.climate.attributes) _LOGGER.info(v.charge.attributes) _LOGGER.info(v.gui_settings.attributes) _LOGGER.info(v.locked) except",
"try: from colorlog import ColoredFormatter logging.getLogger().handlers[0].setFormatter(ColoredFormatter( colorfmt, datefmt=datefmt, reset=True, log_colors={ 'DEBUG': 'cyan', 'INFO':",
"'INFO': 'green', 'WARNING': 'yellow', 'ERROR': 'red', 'CRITICAL': 'red', } )) except ImportError: pass",
"v.update() _LOGGER.info(v.drive.attributes) _LOGGER.info(v.climate.attributes) _LOGGER.info(v.charge.attributes) _LOGGER.info(v.gui_settings.attributes) _LOGGER.info(v.locked) except TeslaException as exc: _LOGGER.error(exc.message) def main():",
"vehicles = api.vehicles for v in vehicles: if not v.wake_up(): _LOGGER.error(\"Unable to wake",
"import logging _LOGGER = logging.getLogger('pyteslaapi_cli') def setup_logging(log_level=logging.INFO): \"\"\"Set up the logging.\"\"\" logging.basicConfig(level=log_level) fmt",
"_LOGGER.error(exc.message) def main(): \"\"\"Execute from command line.\"\"\" call() if __name__ == '__main__': main()",
"except TeslaException as exc: _LOGGER.error(exc.message) def main(): \"\"\"Execute from command line.\"\"\" call() if",
"helper.\"\"\" log_level = logging.DEBUG setup_logging(log_level) try: api = TeslaApiClient(\"<EMAIL>\",\"T1aB2stsp!ez142215\") vehicles = api.vehicles for",
"\"\"\" Tesla CLI \"\"\" from client import TeslaApiClient from exceptions import TeslaException import",
"'yellow', 'ERROR': 'red', 'CRITICAL': 'red', } )) except ImportError: pass logger = logging.getLogger('')",
"logging.\"\"\" logging.basicConfig(level=log_level) fmt = (\"%(asctime)s %(levelname)s (%(threadName)s) \" \"[%(name)s] %(message)s\") colorfmt = \"%(log_color)s{}%(reset)s\".format(fmt)",
"def setup_logging(log_level=logging.INFO): \"\"\"Set up the logging.\"\"\" logging.basicConfig(level=log_level) fmt = (\"%(asctime)s %(levelname)s (%(threadName)s) \"",
"<filename>pyteslaapi/__main__.py #!/usr/bin/python \"\"\" Tesla CLI \"\"\" from client import TeslaApiClient from exceptions import",
"'WARNING': 'yellow', 'ERROR': 'red', 'CRITICAL': 'red', } )) except ImportError: pass logger =",
"'cyan', 'INFO': 'green', 'WARNING': 'yellow', 'ERROR': 'red', 'CRITICAL': 'red', } )) except ImportError:",
"import ColoredFormatter logging.getLogger().handlers[0].setFormatter(ColoredFormatter( colorfmt, datefmt=datefmt, reset=True, log_colors={ 'DEBUG': 'cyan', 'INFO': 'green', 'WARNING': 'yellow',",
"try: api = TeslaApiClient(\"<EMAIL>\",\"T1aB2stsp!ez142215\") vehicles = api.vehicles for v in vehicles: if not",
"up vehicle\") continue v.update() _LOGGER.info(v.drive.attributes) _LOGGER.info(v.climate.attributes) _LOGGER.info(v.charge.attributes) _LOGGER.info(v.gui_settings.attributes) _LOGGER.info(v.locked) except TeslaException as exc:",
"Tesla CLI \"\"\" from client import TeslaApiClient from exceptions import TeslaException import logging",
"pass logger = logging.getLogger('') logger.setLevel(log_level) def call(): \"\"\"Execute command line helper.\"\"\" log_level =",
"not v.wake_up(): _LOGGER.error(\"Unable to wake up vehicle\") continue v.update() _LOGGER.info(v.drive.attributes) _LOGGER.info(v.climate.attributes) _LOGGER.info(v.charge.attributes) _LOGGER.info(v.gui_settings.attributes)",
"= api.vehicles for v in vehicles: if not v.wake_up(): _LOGGER.error(\"Unable to wake up",
"%(message)s\") colorfmt = \"%(log_color)s{}%(reset)s\".format(fmt) datefmt = '%Y-%m-%d %H:%M:%S' logging.getLogger('requests').setLevel(logging.WARNING) logging.getLogger('urllib3').setLevel(logging.WARNING) try: from colorlog",
"_LOGGER.info(v.climate.attributes) _LOGGER.info(v.charge.attributes) _LOGGER.info(v.gui_settings.attributes) _LOGGER.info(v.locked) except TeslaException as exc: _LOGGER.error(exc.message) def main(): \"\"\"Execute from",
"the logging.\"\"\" logging.basicConfig(level=log_level) fmt = (\"%(asctime)s %(levelname)s (%(threadName)s) \" \"[%(name)s] %(message)s\") colorfmt =",
"colorfmt, datefmt=datefmt, reset=True, log_colors={ 'DEBUG': 'cyan', 'INFO': 'green', 'WARNING': 'yellow', 'ERROR': 'red', 'CRITICAL':",
"to wake up vehicle\") continue v.update() _LOGGER.info(v.drive.attributes) _LOGGER.info(v.climate.attributes) _LOGGER.info(v.charge.attributes) _LOGGER.info(v.gui_settings.attributes) _LOGGER.info(v.locked) except TeslaException",
"in vehicles: if not v.wake_up(): _LOGGER.error(\"Unable to wake up vehicle\") continue v.update() _LOGGER.info(v.drive.attributes)",
"except ImportError: pass logger = logging.getLogger('') logger.setLevel(log_level) def call(): \"\"\"Execute command line helper.\"\"\"",
"exceptions import TeslaException import logging _LOGGER = logging.getLogger('pyteslaapi_cli') def setup_logging(log_level=logging.INFO): \"\"\"Set up the",
"'ERROR': 'red', 'CRITICAL': 'red', } )) except ImportError: pass logger = logging.getLogger('') logger.setLevel(log_level)",
"'CRITICAL': 'red', } )) except ImportError: pass logger = logging.getLogger('') logger.setLevel(log_level) def call():",
"logger = logging.getLogger('') logger.setLevel(log_level) def call(): \"\"\"Execute command line helper.\"\"\" log_level = logging.DEBUG",
"log_colors={ 'DEBUG': 'cyan', 'INFO': 'green', 'WARNING': 'yellow', 'ERROR': 'red', 'CRITICAL': 'red', } ))",
"= logging.getLogger('pyteslaapi_cli') def setup_logging(log_level=logging.INFO): \"\"\"Set up the logging.\"\"\" logging.basicConfig(level=log_level) fmt = (\"%(asctime)s %(levelname)s",
"line helper.\"\"\" log_level = logging.DEBUG setup_logging(log_level) try: api = TeslaApiClient(\"<EMAIL>\",\"T1aB2stsp!ez142215\") vehicles = api.vehicles",
"\"\"\"Set up the logging.\"\"\" logging.basicConfig(level=log_level) fmt = (\"%(asctime)s %(levelname)s (%(threadName)s) \" \"[%(name)s] %(message)s\")",
"= \"%(log_color)s{}%(reset)s\".format(fmt) datefmt = '%Y-%m-%d %H:%M:%S' logging.getLogger('requests').setLevel(logging.WARNING) logging.getLogger('urllib3').setLevel(logging.WARNING) try: from colorlog import ColoredFormatter",
"datefmt=datefmt, reset=True, log_colors={ 'DEBUG': 'cyan', 'INFO': 'green', 'WARNING': 'yellow', 'ERROR': 'red', 'CRITICAL': 'red',",
"client import TeslaApiClient from exceptions import TeslaException import logging _LOGGER = logging.getLogger('pyteslaapi_cli') def",
"reset=True, log_colors={ 'DEBUG': 'cyan', 'INFO': 'green', 'WARNING': 'yellow', 'ERROR': 'red', 'CRITICAL': 'red', }",
"} )) except ImportError: pass logger = logging.getLogger('') logger.setLevel(log_level) def call(): \"\"\"Execute command",
"datefmt = '%Y-%m-%d %H:%M:%S' logging.getLogger('requests').setLevel(logging.WARNING) logging.getLogger('urllib3').setLevel(logging.WARNING) try: from colorlog import ColoredFormatter logging.getLogger().handlers[0].setFormatter(ColoredFormatter( colorfmt,",
"colorlog import ColoredFormatter logging.getLogger().handlers[0].setFormatter(ColoredFormatter( colorfmt, datefmt=datefmt, reset=True, log_colors={ 'DEBUG': 'cyan', 'INFO': 'green', 'WARNING':",
"call(): \"\"\"Execute command line helper.\"\"\" log_level = logging.DEBUG setup_logging(log_level) try: api = TeslaApiClient(\"<EMAIL>\",\"T1aB2stsp!ez142215\")",
"def call(): \"\"\"Execute command line helper.\"\"\" log_level = logging.DEBUG setup_logging(log_level) try: api =",
"_LOGGER.info(v.drive.attributes) _LOGGER.info(v.climate.attributes) _LOGGER.info(v.charge.attributes) _LOGGER.info(v.gui_settings.attributes) _LOGGER.info(v.locked) except TeslaException as exc: _LOGGER.error(exc.message) def main(): \"\"\"Execute",
"continue v.update() _LOGGER.info(v.drive.attributes) _LOGGER.info(v.climate.attributes) _LOGGER.info(v.charge.attributes) _LOGGER.info(v.gui_settings.attributes) _LOGGER.info(v.locked) except TeslaException as exc: _LOGGER.error(exc.message) def",
"fmt = (\"%(asctime)s %(levelname)s (%(threadName)s) \" \"[%(name)s] %(message)s\") colorfmt = \"%(log_color)s{}%(reset)s\".format(fmt) datefmt =",
"= '%Y-%m-%d %H:%M:%S' logging.getLogger('requests').setLevel(logging.WARNING) logging.getLogger('urllib3').setLevel(logging.WARNING) try: from colorlog import ColoredFormatter logging.getLogger().handlers[0].setFormatter(ColoredFormatter( colorfmt, datefmt=datefmt,",
"import TeslaApiClient from exceptions import TeslaException import logging _LOGGER = logging.getLogger('pyteslaapi_cli') def setup_logging(log_level=logging.INFO):",
"'%Y-%m-%d %H:%M:%S' logging.getLogger('requests').setLevel(logging.WARNING) logging.getLogger('urllib3').setLevel(logging.WARNING) try: from colorlog import ColoredFormatter logging.getLogger().handlers[0].setFormatter(ColoredFormatter( colorfmt, datefmt=datefmt, reset=True,",
"for v in vehicles: if not v.wake_up(): _LOGGER.error(\"Unable to wake up vehicle\") continue",
"(\"%(asctime)s %(levelname)s (%(threadName)s) \" \"[%(name)s] %(message)s\") colorfmt = \"%(log_color)s{}%(reset)s\".format(fmt) datefmt = '%Y-%m-%d %H:%M:%S'",
"v.wake_up(): _LOGGER.error(\"Unable to wake up vehicle\") continue v.update() _LOGGER.info(v.drive.attributes) _LOGGER.info(v.climate.attributes) _LOGGER.info(v.charge.attributes) _LOGGER.info(v.gui_settings.attributes) _LOGGER.info(v.locked)",
"CLI \"\"\" from client import TeslaApiClient from exceptions import TeslaException import logging _LOGGER",
"\" \"[%(name)s] %(message)s\") colorfmt = \"%(log_color)s{}%(reset)s\".format(fmt) datefmt = '%Y-%m-%d %H:%M:%S' logging.getLogger('requests').setLevel(logging.WARNING) logging.getLogger('urllib3').setLevel(logging.WARNING) try:",
"up the logging.\"\"\" logging.basicConfig(level=log_level) fmt = (\"%(asctime)s %(levelname)s (%(threadName)s) \" \"[%(name)s] %(message)s\") colorfmt",
"TeslaApiClient from exceptions import TeslaException import logging _LOGGER = logging.getLogger('pyteslaapi_cli') def setup_logging(log_level=logging.INFO): \"\"\"Set",
"= (\"%(asctime)s %(levelname)s (%(threadName)s) \" \"[%(name)s] %(message)s\") colorfmt = \"%(log_color)s{}%(reset)s\".format(fmt) datefmt = '%Y-%m-%d",
"\"[%(name)s] %(message)s\") colorfmt = \"%(log_color)s{}%(reset)s\".format(fmt) datefmt = '%Y-%m-%d %H:%M:%S' logging.getLogger('requests').setLevel(logging.WARNING) logging.getLogger('urllib3').setLevel(logging.WARNING) try: from",
"command line helper.\"\"\" log_level = logging.DEBUG setup_logging(log_level) try: api = TeslaApiClient(\"<EMAIL>\",\"T1aB2stsp!ez142215\") vehicles =",
"logging.DEBUG setup_logging(log_level) try: api = TeslaApiClient(\"<EMAIL>\",\"T1aB2stsp!ez142215\") vehicles = api.vehicles for v in vehicles:",
"#!/usr/bin/python \"\"\" Tesla CLI \"\"\" from client import TeslaApiClient from exceptions import TeslaException",
"vehicles: if not v.wake_up(): _LOGGER.error(\"Unable to wake up vehicle\") continue v.update() _LOGGER.info(v.drive.attributes) _LOGGER.info(v.climate.attributes)",
"from colorlog import ColoredFormatter logging.getLogger().handlers[0].setFormatter(ColoredFormatter( colorfmt, datefmt=datefmt, reset=True, log_colors={ 'DEBUG': 'cyan', 'INFO': 'green',",
"from exceptions import TeslaException import logging _LOGGER = logging.getLogger('pyteslaapi_cli') def setup_logging(log_level=logging.INFO): \"\"\"Set up",
"_LOGGER.info(v.locked) except TeslaException as exc: _LOGGER.error(exc.message) def main(): \"\"\"Execute from command line.\"\"\" call()",
"\"%(log_color)s{}%(reset)s\".format(fmt) datefmt = '%Y-%m-%d %H:%M:%S' logging.getLogger('requests').setLevel(logging.WARNING) logging.getLogger('urllib3').setLevel(logging.WARNING) try: from colorlog import ColoredFormatter logging.getLogger().handlers[0].setFormatter(ColoredFormatter(",
"logging.getLogger('urllib3').setLevel(logging.WARNING) try: from colorlog import ColoredFormatter logging.getLogger().handlers[0].setFormatter(ColoredFormatter( colorfmt, datefmt=datefmt, reset=True, log_colors={ 'DEBUG': 'cyan',",
"'red', } )) except ImportError: pass logger = logging.getLogger('') logger.setLevel(log_level) def call(): \"\"\"Execute",
"\"\"\"Execute command line helper.\"\"\" log_level = logging.DEBUG setup_logging(log_level) try: api = TeslaApiClient(\"<EMAIL>\",\"T1aB2stsp!ez142215\") vehicles",
"setup_logging(log_level=logging.INFO): \"\"\"Set up the logging.\"\"\" logging.basicConfig(level=log_level) fmt = (\"%(asctime)s %(levelname)s (%(threadName)s) \" \"[%(name)s]",
"logging.getLogger().handlers[0].setFormatter(ColoredFormatter( colorfmt, datefmt=datefmt, reset=True, log_colors={ 'DEBUG': 'cyan', 'INFO': 'green', 'WARNING': 'yellow', 'ERROR': 'red',",
"import TeslaException import logging _LOGGER = logging.getLogger('pyteslaapi_cli') def setup_logging(log_level=logging.INFO): \"\"\"Set up the logging.\"\"\"",
"log_level = logging.DEBUG setup_logging(log_level) try: api = TeslaApiClient(\"<EMAIL>\",\"T1aB2stsp!ez142215\") vehicles = api.vehicles for v",
"setup_logging(log_level) try: api = TeslaApiClient(\"<EMAIL>\",\"T1aB2stsp!ez142215\") vehicles = api.vehicles for v in vehicles: if",
"'red', 'CRITICAL': 'red', } )) except ImportError: pass logger = logging.getLogger('') logger.setLevel(log_level) def",
")) except ImportError: pass logger = logging.getLogger('') logger.setLevel(log_level) def call(): \"\"\"Execute command line",
"_LOGGER.info(v.charge.attributes) _LOGGER.info(v.gui_settings.attributes) _LOGGER.info(v.locked) except TeslaException as exc: _LOGGER.error(exc.message) def main(): \"\"\"Execute from command",
"TeslaApiClient(\"<EMAIL>\",\"T1aB2stsp!ez142215\") vehicles = api.vehicles for v in vehicles: if not v.wake_up(): _LOGGER.error(\"Unable to",
"_LOGGER.info(v.gui_settings.attributes) _LOGGER.info(v.locked) except TeslaException as exc: _LOGGER.error(exc.message) def main(): \"\"\"Execute from command line.\"\"\"",
"\"\"\" from client import TeslaApiClient from exceptions import TeslaException import logging _LOGGER =",
"= logging.DEBUG setup_logging(log_level) try: api = TeslaApiClient(\"<EMAIL>\",\"T1aB2stsp!ez142215\") vehicles = api.vehicles for v in",
"(%(threadName)s) \" \"[%(name)s] %(message)s\") colorfmt = \"%(log_color)s{}%(reset)s\".format(fmt) datefmt = '%Y-%m-%d %H:%M:%S' logging.getLogger('requests').setLevel(logging.WARNING) logging.getLogger('urllib3').setLevel(logging.WARNING)",
"colorfmt = \"%(log_color)s{}%(reset)s\".format(fmt) datefmt = '%Y-%m-%d %H:%M:%S' logging.getLogger('requests').setLevel(logging.WARNING) logging.getLogger('urllib3').setLevel(logging.WARNING) try: from colorlog import",
"wake up vehicle\") continue v.update() _LOGGER.info(v.drive.attributes) _LOGGER.info(v.climate.attributes) _LOGGER.info(v.charge.attributes) _LOGGER.info(v.gui_settings.attributes) _LOGGER.info(v.locked) except TeslaException as",
"TeslaException import logging _LOGGER = logging.getLogger('pyteslaapi_cli') def setup_logging(log_level=logging.INFO): \"\"\"Set up the logging.\"\"\" logging.basicConfig(level=log_level)",
"logger.setLevel(log_level) def call(): \"\"\"Execute command line helper.\"\"\" log_level = logging.DEBUG setup_logging(log_level) try: api",
"from client import TeslaApiClient from exceptions import TeslaException import logging _LOGGER = logging.getLogger('pyteslaapi_cli')",
"logging.getLogger('requests').setLevel(logging.WARNING) logging.getLogger('urllib3').setLevel(logging.WARNING) try: from colorlog import ColoredFormatter logging.getLogger().handlers[0].setFormatter(ColoredFormatter( colorfmt, datefmt=datefmt, reset=True, log_colors={ 'DEBUG':",
"TeslaException as exc: _LOGGER.error(exc.message) def main(): \"\"\"Execute from command line.\"\"\" call() if __name__",
"logging.basicConfig(level=log_level) fmt = (\"%(asctime)s %(levelname)s (%(threadName)s) \" \"[%(name)s] %(message)s\") colorfmt = \"%(log_color)s{}%(reset)s\".format(fmt) datefmt",
"exc: _LOGGER.error(exc.message) def main(): \"\"\"Execute from command line.\"\"\" call() if __name__ == '__main__':",
"if not v.wake_up(): _LOGGER.error(\"Unable to wake up vehicle\") continue v.update() _LOGGER.info(v.drive.attributes) _LOGGER.info(v.climate.attributes) _LOGGER.info(v.charge.attributes)",
"= logging.getLogger('') logger.setLevel(log_level) def call(): \"\"\"Execute command line helper.\"\"\" log_level = logging.DEBUG setup_logging(log_level)",
"'green', 'WARNING': 'yellow', 'ERROR': 'red', 'CRITICAL': 'red', } )) except ImportError: pass logger",
"api.vehicles for v in vehicles: if not v.wake_up(): _LOGGER.error(\"Unable to wake up vehicle\")",
"'DEBUG': 'cyan', 'INFO': 'green', 'WARNING': 'yellow', 'ERROR': 'red', 'CRITICAL': 'red', } )) except",
"ImportError: pass logger = logging.getLogger('') logger.setLevel(log_level) def call(): \"\"\"Execute command line helper.\"\"\" log_level",
"logging.getLogger('pyteslaapi_cli') def setup_logging(log_level=logging.INFO): \"\"\"Set up the logging.\"\"\" logging.basicConfig(level=log_level) fmt = (\"%(asctime)s %(levelname)s (%(threadName)s)",
"%(levelname)s (%(threadName)s) \" \"[%(name)s] %(message)s\") colorfmt = \"%(log_color)s{}%(reset)s\".format(fmt) datefmt = '%Y-%m-%d %H:%M:%S' logging.getLogger('requests').setLevel(logging.WARNING)",
"vehicle\") continue v.update() _LOGGER.info(v.drive.attributes) _LOGGER.info(v.climate.attributes) _LOGGER.info(v.charge.attributes) _LOGGER.info(v.gui_settings.attributes) _LOGGER.info(v.locked) except TeslaException as exc: _LOGGER.error(exc.message)",
"logging _LOGGER = logging.getLogger('pyteslaapi_cli') def setup_logging(log_level=logging.INFO): \"\"\"Set up the logging.\"\"\" logging.basicConfig(level=log_level) fmt =",
"_LOGGER = logging.getLogger('pyteslaapi_cli') def setup_logging(log_level=logging.INFO): \"\"\"Set up the logging.\"\"\" logging.basicConfig(level=log_level) fmt = (\"%(asctime)s",
"logging.getLogger('') logger.setLevel(log_level) def call(): \"\"\"Execute command line helper.\"\"\" log_level = logging.DEBUG setup_logging(log_level) try:",
"ColoredFormatter logging.getLogger().handlers[0].setFormatter(ColoredFormatter( colorfmt, datefmt=datefmt, reset=True, log_colors={ 'DEBUG': 'cyan', 'INFO': 'green', 'WARNING': 'yellow', 'ERROR':",
"%H:%M:%S' logging.getLogger('requests').setLevel(logging.WARNING) logging.getLogger('urllib3').setLevel(logging.WARNING) try: from colorlog import ColoredFormatter logging.getLogger().handlers[0].setFormatter(ColoredFormatter( colorfmt, datefmt=datefmt, reset=True, log_colors={",
"api = TeslaApiClient(\"<EMAIL>\",\"T1aB2stsp!ez142215\") vehicles = api.vehicles for v in vehicles: if not v.wake_up():"
] |
[
"False self._points = 0.0 self._progress = 0.0 self._bucholz = 0.0 self._achieved_rank = 0",
"f\"\\nPLAYER (#{self._idnt}): {self._name} {self._surname}\\n\" _dump += f\"Sex: {self._sex}\\n\" # _dump += f\"Birth: {self._birth_date}\\n\"",
"name and self.get_surname() == surname: _exist = True else: _exist = False return",
"self._name = name return self def set_surname(self, surname): self._surname = surname return self",
"\"\" self._surname = \"\" self._sex = \"\" self._birth_date: date self._city = \"\" self._category",
"\"II+\": 1900, \"II\": 1800, \"III\": 1600, \"IV\": 1400, \"V\": 1200, \"bk\": 1000, \"wc\":",
"else: _exist = False return _exist def dump(self): _dump = f\"\\nPLAYER (#{self._idnt}): {self._name}",
"2100, \"k\": 2000, \"I++\": 1900, \"I+\": 1900, \"I\": 1800, \"II+\": 1700, \"II\": 1600,",
"package imports: from datetime import date # Local package imports: CATEGORY = {",
"from datetime import date # Local package imports: CATEGORY = { \"male\":{ \"m\":",
"1400, \"V\": 1200, \"bk\": 1000, \"wc\": 1000 }, \"female\":{ \"m\": 2200, \"iwm\": 2200,",
"1800, \"III\": 1600, \"IV\": 1400, \"V\": 1200, \"bk\": 1000, \"wc\": 1000 }, \"female\":{",
"0.0 self._bucholz = 0.0 self._achieved_rank = 0 self._last_played_white = False self._rounds = None",
"= birthdate return self def set_city(self, city): self._city = city return self def",
"to Player, which can b set at the begining and during the game.",
"self._city = city return self def set_category(self, category): self._category = category return self",
"data: self._name = \"\" self._surname = \"\" self._sex = \"\" self._birth_date: date self._city",
"= surname return self def set_sex(self, sex): self._sex = sex return self def",
"category): self._category = category return self def set_elo(self, elo): self._elo = elo return",
"self._birth_date: date self._city = \"\" self._category = \"bk\" self._elo = 0 self._rank =",
"1900, \"II\": 1800, \"III\": 1600, \"IV\": 1400, \"V\": 1200, \"bk\": 1000, \"wc\": 1000",
"CATEGORY[self._sex][self._category] return self def exist(self, name, surname): _exist = False if self.get_name() ==",
"{self._birth_date}\\n\" _dump += f\"City: {self._city}\\n\" _dump += f\"Category: {self._category}\\n\" _dump += f\"Elo rating:",
"= \"\" # Dynamic Player data: self._place = 0 self._idnt = 0 self._paused",
"self def set_sex(self, sex): self._sex = sex return self def set_birthdate(self, birthdate): self._birth_date",
"+= f\"Birth: {self._birth_date}\\n\" _dump += f\"City: {self._city}\\n\" _dump += f\"Category: {self._category}\\n\" _dump +=",
"def set_elo(self, elo): self._elo = elo return self def set_club(self, club): self._club =",
"= 0.0 self._achieved_rank = 0 self._last_played_white = False self._rounds = None self._oponents =",
"__repr__(self): ret = f'\\n#({self._idnt})' ret += self.dump() return ret def set_name(self, name): self._name",
"= \"\" self._category = \"bk\" self._elo = 0 self._rank = 1000 self._club =",
"set_name(self, name): self._name = name return self def set_surname(self, surname): self._surname = surname",
"sex): self._sex = sex return self def set_birthdate(self, birthdate): self._birth_date = birthdate return",
"and self.get_surname() == surname: _exist = True else: _exist = False return _exist",
"import date # Local package imports: CATEGORY = { \"male\":{ \"m\": 2400, \"im\":",
"return club def get_name(self): return self._name def get_surname(self): return self._surname def get_by_ident(self, ident):",
"self._birth_date = birthdate return self def set_city(self, city): self._city = city return self",
"False self._rounds = None self._oponents = list() self._results = list() self._set = False",
"surname): _exist = False if self.get_name() == name and self.get_surname() == surname: _exist",
"_exist = True else: _exist = False return _exist def dump(self): _dump =",
"date # Local package imports: CATEGORY = { \"male\":{ \"m\": 2400, \"im\": 2400,",
"For setting round flag self._round_done = False def __repr__(self): ret = f'\\n#({self._idnt})' ret",
"which can b set at the begining and during the game. \"\"\" #",
"2100, \"I\": 2000, \"II+\": 1900, \"II\": 1800, \"III\": 1600, \"IV\": 1400, \"V\": 1200,",
"self def set_birthdate(self, birthdate): self._birth_date = birthdate return self def set_city(self, city): self._city",
"{ \"male\":{ \"m\": 2400, \"im\": 2400, \"k++\": 2300, \"k+\": 2300, \"k\": 2200, \"I++\":",
"set_birthdate(self, birthdate): self._birth_date = birthdate return self def set_city(self, city): self._city = city",
"return self def set_elo(self, elo): self._elo = elo return self def set_club(self, club):",
"}, \"female\":{ \"m\": 2200, \"iwm\": 2200, \"k++\": 2100, \"k+\": 2100, \"k\": 2000, \"I++\":",
"return self def set_sex(self, sex): self._sex = sex return self def set_birthdate(self, birthdate):",
"birthdate return self def set_city(self, city): self._city = city return self def set_category(self,",
"{self._sex}\\n\" # _dump += f\"Birth: {self._birth_date}\\n\" _dump += f\"City: {self._city}\\n\" _dump += f\"Category:",
"2400, \"im\": 2400, \"k++\": 2300, \"k+\": 2300, \"k\": 2200, \"I++\": 2100, \"I+\": 2100,",
"= f'\\n#({self._idnt})' ret += self.dump() return ret def set_name(self, name): self._name = name",
"category return self def set_elo(self, elo): self._elo = elo return self def set_club(self,",
"2300, \"k\": 2200, \"I++\": 2100, \"I+\": 2100, \"I\": 2000, \"II+\": 1900, \"II\": 1800,",
"the game. \"\"\" # Global package imports: from datetime import date # Local",
"2000, \"II+\": 1900, \"II\": 1800, \"III\": 1600, \"IV\": 1400, \"V\": 1200, \"bk\": 1000,",
"+= f\"City: {self._city}\\n\" _dump += f\"Category: {self._category}\\n\" _dump += f\"Elo rating: {self._elo}\\n\" _dump",
"\"III\": 1400, \"IV\": 1250, \"V\": 1100, \"bk\": 1000, \"wc\": 1000 } } class",
"1000, \"wc\": 1000 } } class Player(object): def __init__(self) -> None: # Static",
"0.0 self._achieved_rank = 0 self._last_played_white = False self._rounds = None self._oponents = list()",
"# Global package imports: from datetime import date # Local package imports: CATEGORY",
"sex return self def set_birthdate(self, birthdate): self._birth_date = birthdate return self def set_city(self,",
"Chess player class. All data related to Player, which can b set at",
"self._bucholz = 0.0 self._achieved_rank = 0 self._last_played_white = False self._rounds = None self._oponents",
"\"bk\": 1000, \"wc\": 1000 }, \"female\":{ \"m\": 2200, \"iwm\": 2200, \"k++\": 2100, \"k+\":",
"\"II\": 1600, \"III\": 1400, \"IV\": 1250, \"V\": 1100, \"bk\": 1000, \"wc\": 1000 }",
"name return self def set_surname(self, surname): self._surname = surname return self def set_sex(self,",
"_dump += f\"Category: {self._category}\\n\" _dump += f\"Elo rating: {self._elo}\\n\" _dump += f\"Turnament rating:",
"{self._name} {self._surname}\\n\" _dump += f\"Sex: {self._sex}\\n\" # _dump += f\"Birth: {self._birth_date}\\n\" _dump +=",
"dump(self): _dump = f\"\\nPLAYER (#{self._idnt}): {self._name} {self._surname}\\n\" _dump += f\"Sex: {self._sex}\\n\" # _dump",
"self._surname def get_by_ident(self, ident): if ident == self._idnt: return f'{self._surname} {self._name}' else: return",
"False def __repr__(self): ret = f'\\n#({self._idnt})' ret += self.dump() return ret def set_name(self,",
"ident == self._idnt: return f'{self._surname} {self._name}' else: return None def calculate_rank(self): if self._elo",
"= 0 self._last_played_white = False self._rounds = None self._oponents = list() self._results =",
"Player, which can b set at the begining and during the game. \"\"\"",
"else: self._rank = CATEGORY[self._sex][self._category] return self def exist(self, name, surname): _exist = False",
"ret += self.dump() return ret def set_name(self, name): self._name = name return self",
"self._rank = 1000 self._club = \"\" # Dynamic Player data: self._place = 0",
"set_sex(self, sex): self._sex = sex return self def set_birthdate(self, birthdate): self._birth_date = birthdate",
"== name and self.get_surname() == surname: _exist = True else: _exist = False",
"self._sex = sex return self def set_birthdate(self, birthdate): self._birth_date = birthdate return self",
"0.0 self._progress = 0.0 self._bucholz = 0.0 self._achieved_rank = 0 self._last_played_white = False",
"self.get_name() == name and self.get_surname() == surname: _exist = True else: _exist =",
"if self._elo > 0: self._rank = self._elo else: self._rank = CATEGORY[self._sex][self._category] return self",
"= category return self def set_elo(self, elo): self._elo = elo return self def",
"self._elo > 0: self._rank = self._elo else: self._rank = CATEGORY[self._sex][self._category] return self def",
"\"II\": 1800, \"III\": 1600, \"IV\": 1400, \"V\": 1200, \"bk\": 1000, \"wc\": 1000 },",
"_dump += f\"City: {self._city}\\n\" _dump += f\"Category: {self._category}\\n\" _dump += f\"Elo rating: {self._elo}\\n\"",
"def exist(self, name, surname): _exist = False if self.get_name() == name and self.get_surname()",
"\"I+\": 1900, \"I\": 1800, \"II+\": 1700, \"II\": 1600, \"III\": 1400, \"IV\": 1250, \"V\":",
"1000, \"wc\": 1000 }, \"female\":{ \"m\": 2200, \"iwm\": 2200, \"k++\": 2100, \"k+\": 2100,",
"= club return club def get_name(self): return self._name def get_surname(self): return self._surname def",
"_exist = False return _exist def dump(self): _dump = f\"\\nPLAYER (#{self._idnt}): {self._name} {self._surname}\\n\"",
"1250, \"V\": 1100, \"bk\": 1000, \"wc\": 1000 } } class Player(object): def __init__(self)",
"\"V\": 1100, \"bk\": 1000, \"wc\": 1000 } } class Player(object): def __init__(self) ->",
"set_club(self, club): self._club = club return club def get_name(self): return self._name def get_surname(self):",
"= False self._points = 0.0 self._progress = 0.0 self._bucholz = 0.0 self._achieved_rank =",
"\"I++\": 1900, \"I+\": 1900, \"I\": 1800, \"II+\": 1700, \"II\": 1600, \"III\": 1400, \"IV\":",
"1900, \"I+\": 1900, \"I\": 1800, \"II+\": 1700, \"II\": 1600, \"III\": 1400, \"IV\": 1250,",
"self._club = \"\" # Dynamic Player data: self._place = 0 self._idnt = 0",
"} } class Player(object): def __init__(self) -> None: # Static Player data: self._name",
"+= f\"Sex: {self._sex}\\n\" # _dump += f\"Birth: {self._birth_date}\\n\" _dump += f\"City: {self._city}\\n\" _dump",
"2100, \"I+\": 2100, \"I\": 2000, \"II+\": 1900, \"II\": 1800, \"III\": 1600, \"IV\": 1400,",
"self.dump() return ret def set_name(self, name): self._name = name return self def set_surname(self,",
"birthdate): self._birth_date = birthdate return self def set_city(self, city): self._city = city return",
"\"female\":{ \"m\": 2200, \"iwm\": 2200, \"k++\": 2100, \"k+\": 2100, \"k\": 2000, \"I++\": 1900,",
"CATEGORY = { \"male\":{ \"m\": 2400, \"im\": 2400, \"k++\": 2300, \"k+\": 2300, \"k\":",
"def set_name(self, name): self._name = name return self def set_surname(self, surname): self._surname =",
"= True else: _exist = False return _exist def dump(self): _dump = f\"\\nPLAYER",
"data: self._place = 0 self._idnt = 0 self._paused = False self._points = 0.0",
"surname): self._surname = surname return self def set_sex(self, sex): self._sex = sex return",
"can b set at the begining and during the game. \"\"\" # Global",
"= f\"\\nPLAYER (#{self._idnt}): {self._name} {self._surname}\\n\" _dump += f\"Sex: {self._sex}\\n\" # _dump += f\"Birth:",
"= list() self._results = list() self._set = False # For setting round flag",
"during the game. \"\"\" # Global package imports: from datetime import date #",
"def set_club(self, club): self._club = club return club def get_name(self): return self._name def",
"set_elo(self, elo): self._elo = elo return self def set_club(self, club): self._club = club",
"= False if self.get_name() == name and self.get_surname() == surname: _exist = True",
"set at the begining and during the game. \"\"\" # Global package imports:",
"\"I++\": 2100, \"I+\": 2100, \"I\": 2000, \"II+\": 1900, \"II\": 1800, \"III\": 1600, \"IV\":",
"club): self._club = club return club def get_name(self): return self._name def get_surname(self): return",
"elo return self def set_club(self, club): self._club = club return club def get_name(self):",
"= None self._oponents = list() self._results = list() self._set = False # For",
"else: return None def calculate_rank(self): if self._elo > 0: self._rank = self._elo else:",
"return self def set_birthdate(self, birthdate): self._birth_date = birthdate return self def set_city(self, city):",
"\"k++\": 2100, \"k+\": 2100, \"k\": 2000, \"I++\": 1900, \"I+\": 1900, \"I\": 1800, \"II+\":",
"= 0 self._idnt = 0 self._paused = False self._points = 0.0 self._progress =",
"1000 }, \"female\":{ \"m\": 2200, \"iwm\": 2200, \"k++\": 2100, \"k+\": 2100, \"k\": 2000,",
"True else: _exist = False return _exist def dump(self): _dump = f\"\\nPLAYER (#{self._idnt}):",
"self._rounds = None self._oponents = list() self._results = list() self._set = False #",
"self def exist(self, name, surname): _exist = False if self.get_name() == name and",
"ret def set_name(self, name): self._name = name return self def set_surname(self, surname): self._surname",
"0 self._paused = False self._points = 0.0 self._progress = 0.0 self._bucholz = 0.0",
"= 0 self._rank = 1000 self._club = \"\" # Dynamic Player data: self._place",
"self def set_category(self, category): self._category = category return self def set_elo(self, elo): self._elo",
"ident): if ident == self._idnt: return f'{self._surname} {self._name}' else: return None def calculate_rank(self):",
"self._progress = 0.0 self._bucholz = 0.0 self._achieved_rank = 0 self._last_played_white = False self._rounds",
"set_surname(self, surname): self._surname = surname return self def set_sex(self, sex): self._sex = sex",
"name): self._name = name return self def set_surname(self, surname): self._surname = surname return",
"def set_birthdate(self, birthdate): self._birth_date = birthdate return self def set_city(self, city): self._city =",
"= \"\" self._birth_date: date self._city = \"\" self._category = \"bk\" self._elo = 0",
"return f'{self._surname} {self._name}' else: return None def calculate_rank(self): if self._elo > 0: self._rank",
"flag self._round_done = False def __repr__(self): ret = f'\\n#({self._idnt})' ret += self.dump() return",
"# _dump += f\"Birth: {self._birth_date}\\n\" _dump += f\"City: {self._city}\\n\" _dump += f\"Category: {self._category}\\n\"",
"= sex return self def set_birthdate(self, birthdate): self._birth_date = birthdate return self def",
"return None def calculate_rank(self): if self._elo > 0: self._rank = self._elo else: self._rank",
"and during the game. \"\"\" # Global package imports: from datetime import date",
"get_by_ident(self, ident): if ident == self._idnt: return f'{self._surname} {self._name}' else: return None def",
"class Player(object): def __init__(self) -> None: # Static Player data: self._name = \"\"",
"get_name(self): return self._name def get_surname(self): return self._surname def get_by_ident(self, ident): if ident ==",
"self._results = list() self._set = False # For setting round flag self._round_done =",
"_dump += f\"Sex: {self._sex}\\n\" # _dump += f\"Birth: {self._birth_date}\\n\" _dump += f\"City: {self._city}\\n\"",
"def dump(self): _dump = f\"\\nPLAYER (#{self._idnt}): {self._name} {self._surname}\\n\" _dump += f\"Sex: {self._sex}\\n\" #",
"city): self._city = city return self def set_category(self, category): self._category = category return",
"\"V\": 1200, \"bk\": 1000, \"wc\": 1000 }, \"female\":{ \"m\": 2200, \"iwm\": 2200, \"k++\":",
"\"im\": 2400, \"k++\": 2300, \"k+\": 2300, \"k\": 2200, \"I++\": 2100, \"I+\": 2100, \"I\":",
"\"\" # Dynamic Player data: self._place = 0 self._idnt = 0 self._paused =",
"_exist def dump(self): _dump = f\"\\nPLAYER (#{self._idnt}): {self._name} {self._surname}\\n\" _dump += f\"Sex: {self._sex}\\n\"",
"self._achieved_rank = 0 self._last_played_white = False self._rounds = None self._oponents = list() self._results",
"return self def set_category(self, category): self._category = category return self def set_elo(self, elo):",
"self._rank = CATEGORY[self._sex][self._category] return self def exist(self, name, surname): _exist = False if",
"False # For setting round flag self._round_done = False def __repr__(self): ret =",
"= CATEGORY[self._sex][self._category] return self def exist(self, name, surname): _exist = False if self.get_name()",
"the begining and during the game. \"\"\" # Global package imports: from datetime",
"if self.get_name() == name and self.get_surname() == surname: _exist = True else: _exist",
"== self._idnt: return f'{self._surname} {self._name}' else: return None def calculate_rank(self): if self._elo >",
"1000 } } class Player(object): def __init__(self) -> None: # Static Player data:",
"\"k+\": 2300, \"k\": 2200, \"I++\": 2100, \"I+\": 2100, \"I\": 2000, \"II+\": 1900, \"II\":",
"return _exist def dump(self): _dump = f\"\\nPLAYER (#{self._idnt}): {self._name} {self._surname}\\n\" _dump += f\"Sex:",
"self._name = \"\" self._surname = \"\" self._sex = \"\" self._birth_date: date self._city =",
"player class. All data related to Player, which can b set at the",
"return self._name def get_surname(self): return self._surname def get_by_ident(self, ident): if ident == self._idnt:",
"surname: _exist = True else: _exist = False return _exist def dump(self): _dump",
"def get_surname(self): return self._surname def get_by_ident(self, ident): if ident == self._idnt: return f'{self._surname}",
"= 0 self._paused = False self._points = 0.0 self._progress = 0.0 self._bucholz =",
"Player(object): def __init__(self) -> None: # Static Player data: self._name = \"\" self._surname",
"class. All data related to Player, which can b set at the begining",
"= self._elo else: self._rank = CATEGORY[self._sex][self._category] return self def exist(self, name, surname): _exist",
"set_city(self, city): self._city = city return self def set_category(self, category): self._category = category",
"False if self.get_name() == name and self.get_surname() == surname: _exist = True else:",
"self._category = category return self def set_elo(self, elo): self._elo = elo return self",
"def get_by_ident(self, ident): if ident == self._idnt: return f'{self._surname} {self._name}' else: return None",
"1900, \"I\": 1800, \"II+\": 1700, \"II\": 1600, \"III\": 1400, \"IV\": 1250, \"V\": 1100,",
"1100, \"bk\": 1000, \"wc\": 1000 } } class Player(object): def __init__(self) -> None:",
"f'{self._surname} {self._name}' else: return None def calculate_rank(self): if self._elo > 0: self._rank =",
"self._name def get_surname(self): return self._surname def get_by_ident(self, ident): if ident == self._idnt: return",
"data related to Player, which can b set at the begining and during",
"self._city = \"\" self._category = \"bk\" self._elo = 0 self._rank = 1000 self._club",
"Player data: self._place = 0 self._idnt = 0 self._paused = False self._points =",
"\"k\": 2200, \"I++\": 2100, \"I+\": 2100, \"I\": 2000, \"II+\": 1900, \"II\": 1800, \"III\":",
"def __repr__(self): ret = f'\\n#({self._idnt})' ret += self.dump() return ret def set_name(self, name):",
"set_category(self, category): self._category = category return self def set_elo(self, elo): self._elo = elo",
"0: self._rank = self._elo else: self._rank = CATEGORY[self._sex][self._category] return self def exist(self, name,",
"\"bk\" self._elo = 0 self._rank = 1000 self._club = \"\" # Dynamic Player",
"__init__(self) -> None: # Static Player data: self._name = \"\" self._surname = \"\"",
"None self._oponents = list() self._results = list() self._set = False # For setting",
"self._round_done = False def __repr__(self): ret = f'\\n#({self._idnt})' ret += self.dump() return ret",
"\"m\": 2400, \"im\": 2400, \"k++\": 2300, \"k+\": 2300, \"k\": 2200, \"I++\": 2100, \"I+\":",
"= False self._rounds = None self._oponents = list() self._results = list() self._set =",
"\"I+\": 2100, \"I\": 2000, \"II+\": 1900, \"II\": 1800, \"III\": 1600, \"IV\": 1400, \"V\":",
"# For setting round flag self._round_done = False def __repr__(self): ret = f'\\n#({self._idnt})'",
"+= self.dump() return ret def set_name(self, name): self._name = name return self def",
"ret = f'\\n#({self._idnt})' ret += self.dump() return ret def set_name(self, name): self._name =",
"= \"\" self._sex = \"\" self._birth_date: date self._city = \"\" self._category = \"bk\"",
"if ident == self._idnt: return f'{self._surname} {self._name}' else: return None def calculate_rank(self): if",
"\"\" self._birth_date: date self._city = \"\" self._category = \"bk\" self._elo = 0 self._rank",
"return self def exist(self, name, surname): _exist = False if self.get_name() == name",
"self._elo else: self._rank = CATEGORY[self._sex][self._category] return self def exist(self, name, surname): _exist =",
"f\"City: {self._city}\\n\" _dump += f\"Category: {self._category}\\n\" _dump += f\"Elo rating: {self._elo}\\n\" _dump +=",
"self._oponents = list() self._results = list() self._set = False # For setting round",
"= False def __repr__(self): ret = f'\\n#({self._idnt})' ret += self.dump() return ret def",
"get_surname(self): return self._surname def get_by_ident(self, ident): if ident == self._idnt: return f'{self._surname} {self._name}'",
"related to Player, which can b set at the begining and during the",
"club def get_name(self): return self._name def get_surname(self): return self._surname def get_by_ident(self, ident): if",
"elo): self._elo = elo return self def set_club(self, club): self._club = club return",
"self.get_surname() == surname: _exist = True else: _exist = False return _exist def",
"b set at the begining and during the game. \"\"\" # Global package",
"> 0: self._rank = self._elo else: self._rank = CATEGORY[self._sex][self._category] return self def exist(self,",
"imports: from datetime import date # Local package imports: CATEGORY = { \"male\":{",
"game. \"\"\" # Global package imports: from datetime import date # Local package",
"round flag self._round_done = False def __repr__(self): ret = f'\\n#({self._idnt})' ret += self.dump()",
"1200, \"bk\": 1000, \"wc\": 1000 }, \"female\":{ \"m\": 2200, \"iwm\": 2200, \"k++\": 2100,",
"self._club = club return club def get_name(self): return self._name def get_surname(self): return self._surname",
"def calculate_rank(self): if self._elo > 0: self._rank = self._elo else: self._rank = CATEGORY[self._sex][self._category]",
"{self._category}\\n\" _dump += f\"Elo rating: {self._elo}\\n\" _dump += f\"Turnament rating: {self._rank}\\n\" return _dump",
"# Local package imports: CATEGORY = { \"male\":{ \"m\": 2400, \"im\": 2400, \"k++\":",
"Player data: self._name = \"\" self._surname = \"\" self._sex = \"\" self._birth_date: date",
"\"k\": 2000, \"I++\": 1900, \"I+\": 1900, \"I\": 1800, \"II+\": 1700, \"II\": 1600, \"III\":",
"self._surname = surname return self def set_sex(self, sex): self._sex = sex return self",
"\"m\": 2200, \"iwm\": 2200, \"k++\": 2100, \"k+\": 2100, \"k\": 2000, \"I++\": 1900, \"I+\":",
"return ret def set_name(self, name): self._name = name return self def set_surname(self, surname):",
"1800, \"II+\": 1700, \"II\": 1600, \"III\": 1400, \"IV\": 1250, \"V\": 1100, \"bk\": 1000,",
"return self def set_surname(self, surname): self._surname = surname return self def set_sex(self, sex):",
"{self._city}\\n\" _dump += f\"Category: {self._category}\\n\" _dump += f\"Elo rating: {self._elo}\\n\" _dump += f\"Turnament",
"1700, \"II\": 1600, \"III\": 1400, \"IV\": 1250, \"V\": 1100, \"bk\": 1000, \"wc\": 1000",
"\"IV\": 1250, \"V\": 1100, \"bk\": 1000, \"wc\": 1000 } } class Player(object): def",
"self._idnt = 0 self._paused = False self._points = 0.0 self._progress = 0.0 self._bucholz",
"date self._city = \"\" self._category = \"bk\" self._elo = 0 self._rank = 1000",
"_dump += f\"Birth: {self._birth_date}\\n\" _dump += f\"City: {self._city}\\n\" _dump += f\"Category: {self._category}\\n\" _dump",
"list() self._results = list() self._set = False # For setting round flag self._round_done",
"def get_name(self): return self._name def get_surname(self): return self._surname def get_by_ident(self, ident): if ident",
"1400, \"IV\": 1250, \"V\": 1100, \"bk\": 1000, \"wc\": 1000 } } class Player(object):",
"def set_city(self, city): self._city = city return self def set_category(self, category): self._category =",
"self._sex = \"\" self._birth_date: date self._city = \"\" self._category = \"bk\" self._elo =",
"self._set = False # For setting round flag self._round_done = False def __repr__(self):",
"\"iwm\": 2200, \"k++\": 2100, \"k+\": 2100, \"k\": 2000, \"I++\": 1900, \"I+\": 1900, \"I\":",
"= name return self def set_surname(self, surname): self._surname = surname return self def",
"self._category = \"bk\" self._elo = 0 self._rank = 1000 self._club = \"\" #",
"def set_surname(self, surname): self._surname = surname return self def set_sex(self, sex): self._sex =",
"= { \"male\":{ \"m\": 2400, \"im\": 2400, \"k++\": 2300, \"k+\": 2300, \"k\": 2200,",
"1600, \"IV\": 1400, \"V\": 1200, \"bk\": 1000, \"wc\": 1000 }, \"female\":{ \"m\": 2200,",
"self def set_elo(self, elo): self._elo = elo return self def set_club(self, club): self._club",
"\"I\": 2000, \"II+\": 1900, \"II\": 1800, \"III\": 1600, \"IV\": 1400, \"V\": 1200, \"bk\":",
"= \"\" self._surname = \"\" self._sex = \"\" self._birth_date: date self._city = \"\"",
"def __init__(self) -> None: # Static Player data: self._name = \"\" self._surname =",
"self._points = 0.0 self._progress = 0.0 self._bucholz = 0.0 self._achieved_rank = 0 self._last_played_white",
"} class Player(object): def __init__(self) -> None: # Static Player data: self._name =",
"\"I\": 1800, \"II+\": 1700, \"II\": 1600, \"III\": 1400, \"IV\": 1250, \"V\": 1100, \"bk\":",
"1600, \"III\": 1400, \"IV\": 1250, \"V\": 1100, \"bk\": 1000, \"wc\": 1000 } }",
"2400, \"k++\": 2300, \"k+\": 2300, \"k\": 2200, \"I++\": 2100, \"I+\": 2100, \"I\": 2000,",
"self._idnt: return f'{self._surname} {self._name}' else: return None def calculate_rank(self): if self._elo > 0:",
"= False return _exist def dump(self): _dump = f\"\\nPLAYER (#{self._idnt}): {self._name} {self._surname}\\n\" _dump",
"begining and during the game. \"\"\" # Global package imports: from datetime import",
"\"III\": 1600, \"IV\": 1400, \"V\": 1200, \"bk\": 1000, \"wc\": 1000 }, \"female\":{ \"m\":",
"\"\" self._sex = \"\" self._birth_date: date self._city = \"\" self._category = \"bk\" self._elo",
"self def set_city(self, city): self._city = city return self def set_category(self, category): self._category",
"\"\"\"player.py Chess player class. All data related to Player, which can b set",
"= 0.0 self._bucholz = 0.0 self._achieved_rank = 0 self._last_played_white = False self._rounds =",
"calculate_rank(self): if self._elo > 0: self._rank = self._elo else: self._rank = CATEGORY[self._sex][self._category] return",
"surname return self def set_sex(self, sex): self._sex = sex return self def set_birthdate(self,",
"at the begining and during the game. \"\"\" # Global package imports: from",
"None: # Static Player data: self._name = \"\" self._surname = \"\" self._sex =",
"2200, \"iwm\": 2200, \"k++\": 2100, \"k+\": 2100, \"k\": 2000, \"I++\": 1900, \"I+\": 1900,",
"self._last_played_white = False self._rounds = None self._oponents = list() self._results = list() self._set",
"\"\" self._category = \"bk\" self._elo = 0 self._rank = 1000 self._club = \"\"",
"\"wc\": 1000 } } class Player(object): def __init__(self) -> None: # Static Player",
"return self def set_city(self, city): self._city = city return self def set_category(self, category):",
"self def set_surname(self, surname): self._surname = surname return self def set_sex(self, sex): self._sex",
"None def calculate_rank(self): if self._elo > 0: self._rank = self._elo else: self._rank =",
"f\"Birth: {self._birth_date}\\n\" _dump += f\"City: {self._city}\\n\" _dump += f\"Category: {self._category}\\n\" _dump += f\"Elo",
"1000 self._club = \"\" # Dynamic Player data: self._place = 0 self._idnt =",
"\"IV\": 1400, \"V\": 1200, \"bk\": 1000, \"wc\": 1000 }, \"female\":{ \"m\": 2200, \"iwm\":",
"datetime import date # Local package imports: CATEGORY = { \"male\":{ \"m\": 2400,",
"0 self._idnt = 0 self._paused = False self._points = 0.0 self._progress = 0.0",
"return self def set_club(self, club): self._club = club return club def get_name(self): return",
"\"II+\": 1700, \"II\": 1600, \"III\": 1400, \"IV\": 1250, \"V\": 1100, \"bk\": 1000, \"wc\":",
"def set_category(self, category): self._category = category return self def set_elo(self, elo): self._elo =",
"package imports: CATEGORY = { \"male\":{ \"m\": 2400, \"im\": 2400, \"k++\": 2300, \"k+\":",
"= \"bk\" self._elo = 0 self._rank = 1000 self._club = \"\" # Dynamic",
"2200, \"I++\": 2100, \"I+\": 2100, \"I\": 2000, \"II+\": 1900, \"II\": 1800, \"III\": 1600,",
"0 self._last_played_white = False self._rounds = None self._oponents = list() self._results = list()",
"0 self._rank = 1000 self._club = \"\" # Dynamic Player data: self._place =",
"False return _exist def dump(self): _dump = f\"\\nPLAYER (#{self._idnt}): {self._name} {self._surname}\\n\" _dump +=",
"f\"Sex: {self._sex}\\n\" # _dump += f\"Birth: {self._birth_date}\\n\" _dump += f\"City: {self._city}\\n\" _dump +=",
"self._paused = False self._points = 0.0 self._progress = 0.0 self._bucholz = 0.0 self._achieved_rank",
"\"wc\": 1000 }, \"female\":{ \"m\": 2200, \"iwm\": 2200, \"k++\": 2100, \"k+\": 2100, \"k\":",
"_dump = f\"\\nPLAYER (#{self._idnt}): {self._name} {self._surname}\\n\" _dump += f\"Sex: {self._sex}\\n\" # _dump +=",
"+= f\"Category: {self._category}\\n\" _dump += f\"Elo rating: {self._elo}\\n\" _dump += f\"Turnament rating: {self._rank}\\n\"",
"= list() self._set = False # For setting round flag self._round_done = False",
"(#{self._idnt}): {self._name} {self._surname}\\n\" _dump += f\"Sex: {self._sex}\\n\" # _dump += f\"Birth: {self._birth_date}\\n\" _dump",
"city return self def set_category(self, category): self._category = category return self def set_elo(self,",
"= False # For setting round flag self._round_done = False def __repr__(self): ret",
"\"bk\": 1000, \"wc\": 1000 } } class Player(object): def __init__(self) -> None: #",
"# Static Player data: self._name = \"\" self._surname = \"\" self._sex = \"\"",
"= city return self def set_category(self, category): self._category = category return self def",
"Global package imports: from datetime import date # Local package imports: CATEGORY =",
"f\"Category: {self._category}\\n\" _dump += f\"Elo rating: {self._elo}\\n\" _dump += f\"Turnament rating: {self._rank}\\n\" return",
"list() self._set = False # For setting round flag self._round_done = False def",
"{self._name}' else: return None def calculate_rank(self): if self._elo > 0: self._rank = self._elo",
"exist(self, name, surname): _exist = False if self.get_name() == name and self.get_surname() ==",
"\"male\":{ \"m\": 2400, \"im\": 2400, \"k++\": 2300, \"k+\": 2300, \"k\": 2200, \"I++\": 2100,",
"self._elo = elo return self def set_club(self, club): self._club = club return club",
"name, surname): _exist = False if self.get_name() == name and self.get_surname() == surname:",
"f'\\n#({self._idnt})' ret += self.dump() return ret def set_name(self, name): self._name = name return",
"2200, \"k++\": 2100, \"k+\": 2100, \"k\": 2000, \"I++\": 1900, \"I+\": 1900, \"I\": 1800,",
"= 1000 self._club = \"\" # Dynamic Player data: self._place = 0 self._idnt",
"-> None: # Static Player data: self._name = \"\" self._surname = \"\" self._sex",
"All data related to Player, which can b set at the begining and",
"setting round flag self._round_done = False def __repr__(self): ret = f'\\n#({self._idnt})' ret +=",
"== surname: _exist = True else: _exist = False return _exist def dump(self):",
"Dynamic Player data: self._place = 0 self._idnt = 0 self._paused = False self._points",
"imports: CATEGORY = { \"male\":{ \"m\": 2400, \"im\": 2400, \"k++\": 2300, \"k+\": 2300,",
"Static Player data: self._name = \"\" self._surname = \"\" self._sex = \"\" self._birth_date:",
"self._rank = self._elo else: self._rank = CATEGORY[self._sex][self._category] return self def exist(self, name, surname):",
"= elo return self def set_club(self, club): self._club = club return club def",
"2000, \"I++\": 1900, \"I+\": 1900, \"I\": 1800, \"II+\": 1700, \"II\": 1600, \"III\": 1400,",
"self._elo = 0 self._rank = 1000 self._club = \"\" # Dynamic Player data:",
"self._place = 0 self._idnt = 0 self._paused = False self._points = 0.0 self._progress",
"def set_sex(self, sex): self._sex = sex return self def set_birthdate(self, birthdate): self._birth_date =",
"\"k+\": 2100, \"k\": 2000, \"I++\": 1900, \"I+\": 1900, \"I\": 1800, \"II+\": 1700, \"II\":",
"self._surname = \"\" self._sex = \"\" self._birth_date: date self._city = \"\" self._category =",
"= 0.0 self._progress = 0.0 self._bucholz = 0.0 self._achieved_rank = 0 self._last_played_white =",
"2100, \"k+\": 2100, \"k\": 2000, \"I++\": 1900, \"I+\": 1900, \"I\": 1800, \"II+\": 1700,",
"# Dynamic Player data: self._place = 0 self._idnt = 0 self._paused = False",
"\"\"\" # Global package imports: from datetime import date # Local package imports:",
"2300, \"k+\": 2300, \"k\": 2200, \"I++\": 2100, \"I+\": 2100, \"I\": 2000, \"II+\": 1900,",
"self def set_club(self, club): self._club = club return club def get_name(self): return self._name",
"_exist = False if self.get_name() == name and self.get_surname() == surname: _exist =",
"\"k++\": 2300, \"k+\": 2300, \"k\": 2200, \"I++\": 2100, \"I+\": 2100, \"I\": 2000, \"II+\":",
"{self._surname}\\n\" _dump += f\"Sex: {self._sex}\\n\" # _dump += f\"Birth: {self._birth_date}\\n\" _dump += f\"City:",
"return self._surname def get_by_ident(self, ident): if ident == self._idnt: return f'{self._surname} {self._name}' else:",
"club return club def get_name(self): return self._name def get_surname(self): return self._surname def get_by_ident(self,",
"Local package imports: CATEGORY = { \"male\":{ \"m\": 2400, \"im\": 2400, \"k++\": 2300,"
] |
[
"[kwargs.pop(name, default_value) for name, default_value in name_and_values] if kwargs: args = ', '.join([\"'%s'\"",
"unexpected keyword argument(s) %s' % args) return tuple(values) def assert_kwargs_empty(kwargs): # It only",
"raise ValueError(message) def parse_kwargs(kwargs, *name_and_values): values = [kwargs.pop(name, default_value) for name, default_value in",
"ValueError(message) def parse_kwargs(kwargs, *name_and_values): values = [kwargs.pop(name, default_value) for name, default_value in name_and_values]",
"% args) return tuple(values) def assert_kwargs_empty(kwargs): # It only checks if kwargs is",
"for key, message in unexpected.items(): if key in kwargs: raise ValueError(message) def parse_kwargs(kwargs,",
"keyword argument(s) %s' % args) return tuple(values) def assert_kwargs_empty(kwargs): # It only checks",
"= ', '.join([\"'%s'\" % arg for arg in kwargs.keys()]) raise TypeError('got unexpected keyword",
"check_unexpected_kwargs(kwargs, **unexpected): for key, message in unexpected.items(): if key in kwargs: raise ValueError(message)",
"message in unexpected.items(): if key in kwargs: raise ValueError(message) def parse_kwargs(kwargs, *name_and_values): values",
"def check_unexpected_kwargs(kwargs, **unexpected): for key, message in unexpected.items(): if key in kwargs: raise",
"for name, default_value in name_and_values] if kwargs: args = ', '.join([\"'%s'\" % arg",
"', '.join([\"'%s'\" % arg for arg in kwargs.keys()]) raise TypeError('got unexpected keyword argument(s)",
"in kwargs.keys()]) raise TypeError('got unexpected keyword argument(s) %s' % args) return tuple(values) def",
"raise TypeError('got unexpected keyword argument(s) %s' % args) return tuple(values) def assert_kwargs_empty(kwargs): #",
"unexpected.items(): if key in kwargs: raise ValueError(message) def parse_kwargs(kwargs, *name_and_values): values = [kwargs.pop(name,",
"name, default_value in name_and_values] if kwargs: args = ', '.join([\"'%s'\" % arg for",
"% arg for arg in kwargs.keys()]) raise TypeError('got unexpected keyword argument(s) %s' %",
"args = ', '.join([\"'%s'\" % arg for arg in kwargs.keys()]) raise TypeError('got unexpected",
"TypeError('got unexpected keyword argument(s) %s' % args) return tuple(values) def assert_kwargs_empty(kwargs): # It",
"parse_kwargs(kwargs, *name_and_values): values = [kwargs.pop(name, default_value) for name, default_value in name_and_values] if kwargs:",
"if key in kwargs: raise ValueError(message) def parse_kwargs(kwargs, *name_and_values): values = [kwargs.pop(name, default_value)",
"return tuple(values) def assert_kwargs_empty(kwargs): # It only checks if kwargs is empty. parse_kwargs(kwargs)",
"values = [kwargs.pop(name, default_value) for name, default_value in name_and_values] if kwargs: args =",
"if kwargs: args = ', '.join([\"'%s'\" % arg for arg in kwargs.keys()]) raise",
"args) return tuple(values) def assert_kwargs_empty(kwargs): # It only checks if kwargs is empty.",
"name_and_values] if kwargs: args = ', '.join([\"'%s'\" % arg for arg in kwargs.keys()])",
"default_value in name_and_values] if kwargs: args = ', '.join([\"'%s'\" % arg for arg",
"arg for arg in kwargs.keys()]) raise TypeError('got unexpected keyword argument(s) %s' % args)",
"*name_and_values): values = [kwargs.pop(name, default_value) for name, default_value in name_and_values] if kwargs: args",
"argument(s) %s' % args) return tuple(values) def assert_kwargs_empty(kwargs): # It only checks if",
"default_value) for name, default_value in name_and_values] if kwargs: args = ', '.join([\"'%s'\" %",
"**unexpected): for key, message in unexpected.items(): if key in kwargs: raise ValueError(message) def",
"def parse_kwargs(kwargs, *name_and_values): values = [kwargs.pop(name, default_value) for name, default_value in name_and_values] if",
"%s' % args) return tuple(values) def assert_kwargs_empty(kwargs): # It only checks if kwargs",
"key, message in unexpected.items(): if key in kwargs: raise ValueError(message) def parse_kwargs(kwargs, *name_and_values):",
"in name_and_values] if kwargs: args = ', '.join([\"'%s'\" % arg for arg in",
"kwargs.keys()]) raise TypeError('got unexpected keyword argument(s) %s' % args) return tuple(values) def assert_kwargs_empty(kwargs):",
"for arg in kwargs.keys()]) raise TypeError('got unexpected keyword argument(s) %s' % args) return",
"<filename>chainer/utils/argument.py def check_unexpected_kwargs(kwargs, **unexpected): for key, message in unexpected.items(): if key in kwargs:",
"key in kwargs: raise ValueError(message) def parse_kwargs(kwargs, *name_and_values): values = [kwargs.pop(name, default_value) for",
"arg in kwargs.keys()]) raise TypeError('got unexpected keyword argument(s) %s' % args) return tuple(values)",
"kwargs: args = ', '.join([\"'%s'\" % arg for arg in kwargs.keys()]) raise TypeError('got",
"= [kwargs.pop(name, default_value) for name, default_value in name_and_values] if kwargs: args = ',",
"in kwargs: raise ValueError(message) def parse_kwargs(kwargs, *name_and_values): values = [kwargs.pop(name, default_value) for name,",
"in unexpected.items(): if key in kwargs: raise ValueError(message) def parse_kwargs(kwargs, *name_and_values): values =",
"kwargs: raise ValueError(message) def parse_kwargs(kwargs, *name_and_values): values = [kwargs.pop(name, default_value) for name, default_value",
"'.join([\"'%s'\" % arg for arg in kwargs.keys()]) raise TypeError('got unexpected keyword argument(s) %s'"
] |
[
"spectrum def Lorentzian(x, HFS_frequency, gamma, intensity): return intensity*(gamma**2/((x-HFS_frequency)**2 + gamma**2)) # Lorentzian function",
"iscool_voltage/(mass*931.494061*10**6) # alpha = eV/mc*c - in units of e/c**2 # Convert from",
"F_lower*(F_lower+1)-I*(I+1)-J_lower*(J_lower+1) alpha_lower = K_lower/2 if I <= 0.5 : beta_lower = 0 elif",
"import * from numpy import * ### Defining the DeltaJ function that will",
"= pos(I - J_lower) F_lower_max = pos(I + J_lower) F_upper_min = pos(I -",
"F_lower_min = F_lower_min +1 return HFS_frequency, HF_intensity ### Defining the intensity for each",
"Doppler_correction(freq_range_lab, mass, iscool_voltage): alpha = iscool_voltage/(mass*931.494061*10**6) freq_range_rest = freq_range_lab*( 1 + alpha -",
"# Voigt function ### Defining the Crystalball function def Crystalball(x_array, x0, N, sigma,",
"True: if (z-j1-j2-j3) < 0: break elif (z-j1-j5-j6) < 0: break elif (z-j4-j2-j6)",
"FWHM, alpha, eta): y_array = [] for i in range(len(x_array)): x = x_array[i]",
"+ alpha_upper*A_upper + beta_upper*B_upper - alpha_lower*A_lower - beta_lower*B_lower) HF_intensity.append((2*F_lower+1)*(2*F_upper+1)*Wigner6J(F_lower, F_upper, 1, J_upper, J_lower,",
"HF spectrum def Gaussian(x, HFS_frequency, FWHM, intensity): return float(intensity)*exp(- 0.5*((HFS_frequency-x)/(FWHM/2.355))**2) # Gaussian function",
"### Defining the DeltaJ function that will be used in Wigner6J def DeltaJ(a,",
"j2 j3} = {J_upper F_upper I} # {j4 j5 j6} {F_lower J_lower 1}",
"Defining an exponential pseudoVoigt function def expoVoigt(x_array, x0, intensity, FWHM, alpha, eta): y_array",
"+ J_lower) F_upper_min = pos(I - J_upper) F_upper_max = pos(I + J_upper) while",
"2.7182818284 # Maths constant alpha = iscool_voltage/(mass*931.494061*10**6) # alpha = eV/mc*c - in",
"1, J_upper, J_lower, I)**2) F_upper_min = F_upper_min +1 F_lower_min = F_lower_min +1 return",
"pos(I + J_lower) F_upper_min = pos(I - J_upper) F_upper_max = pos(I + J_upper)",
"beta_lower = 0 else: beta_lower = (3*K_lower*(K_lower+1)-4*I*(I+1)*J_lower*(J_lower+1))/(8*I*(2*I-1)*J_lower*(2*J_lower-1)) K_upper = F_upper*(F_upper+1)-I*(I+1)-J_upper*(J_upper+1) alpha_upper = K_upper/2",
"Wigner_Total = 0 z = 0 while True: if (j1+j2+j4+j5-z) < 0: break",
"Total = sqrt( float(factorial(a+b-c)*factorial(a-b+c)*factorial(-a+b+c)) / float(factorial(a+b+c+1)) ) #print \"Total: \", Total break return",
"the F values for a J_lower to J_upper transition HFS_frequency = []; HF_intensity",
"HFS_frequency, gamma, intensity): return intensity*(gamma**2/((x-HFS_frequency)**2 + gamma**2)) # Lorentzian function ### Defining the",
"### Defining wavenumber to frequency conversion def Frequency_conversion(wavenumber, mass, iscool_voltage, harmonic, frequency_correction): frequency_rest_frame",
"sqrt( float(factorial(a+b-c)*factorial(a-b+c)*factorial(-a+b+c)) / float(factorial(a+b+c+1)) ) #print \"Total: \", Total break return Total ###",
"< 0: break Wigner1 = float(factorial(z-j1-j2-j3)) Wigner2 = float(factorial(z-j1-j5-j6)) Wigner3 = float(factorial(z-j4-j2-j6)) Wigner4",
"j6)*DeltaJ(j4, j2, j6)*DeltaJ(j4, j5, j3) Wigner_Total = 0 z = 0 while True:",
"= DeltaJ(j1, j2, j3)*DeltaJ(j1, j5, j6)*DeltaJ(j4, j2, j6)*DeltaJ(j4, j5, j3) Wigner_Total = 0",
"Wigner3 = float(factorial(z-j4-j2-j6)) Wigner4 = float(factorial(z-j4-j5-j3)) Wigner5 = float(factorial(j1+j2+j4+j5-z)) Wigner6 = float(factorial(j2+j3+j5+j6-z)) Wigner7",
"pseudoVoigt(x, x0, FWHM, intensity, eta) y_array.append(y) return array(y_array) ### Defining the HF function",
") #print \"Total: \", Total break return Total ### Defining Wigner 6J function",
"the HF spectrum def HF_function(I, J_lower, J_upper, centroid_frequency, A_lower, A_upper, B_lower, B_upper): #",
"= pos(I - J_upper) while F_upper_min < (F_upper_max +1) : F_lower = F_lower_min",
"HF_intensity ### Defining the intensity for each of the HF peaks def HF_intensity(I,",
"iscool_voltage, harmonic, frequency_correction): frequency_rest_frame = array([]); frequency_lab_frame = array([]) c = 299792458.0 #",
"### Defining the HF function which simulates the HF spectrum def HF_function(I, J_lower,",
"break elif (-a+b+c) < 0: break elif (a+b+c+1) < 0: break Total =",
"CF, A_lower, A_upper, B_lower, B_upper)[1][i] intensity_1 = intensity_1*Int Bkgnd = Bkgnd + Gaussian(x,",
"J_lower, I)**2) F_upper_min = F_upper_min +1 F_lower_min = F_lower_min +1 return HFS_frequency, HF_intensity",
"F_upper I} # {j4 j5 j6} {F_lower J_lower 1} Delta_Total = DeltaJ(j1, j2,",
"be used in Wigner6J def DeltaJ(a, b, c): Total = 0 while True:",
"range(len(HF_function(I, J_lower, J_upper, CF, A_lower, A_upper, B_lower, B_upper)[0])): HFS_frequency = HF_function(I, J_lower, J_upper,",
"HF_intensity = [] F_lower_min = pos(I - J_lower) F_lower_max = pos(I + J_lower)",
"= pos(I + J_lower) F_upper_min = pos(I - J_upper) F_upper_max = pos(I +",
"print(\"Intensity = 0\") return Intensity ### Defining Doppler correction from lab frame to",
"0: break elif (-a+b+c) < 0: break elif (a+b+c+1) < 0: break Total",
"y = exp(-0.5*t*t) else: a = ((n/abs(alpha))**n)*exp(-0.5*abs(alpha)*abs(alpha)) b = n/abs(alpha) - abs(alpha) y",
"frequency in lab frame frequency_lab_frame = harmonic*wavenumber*c/10**4 # Wavenumber doubled as reading taken",
"in Wigner6J def DeltaJ(a, b, c): Total = 0 while True: if (a+b-c)",
"beta_upper*B_upper - alpha_lower*A_lower - beta_lower*B_lower) HF_intensity.append((2*F_lower+1)*(2*F_upper+1)*Wigner6J(F_lower, F_upper, 1, J_upper, J_lower, I)**2) F_upper_min =",
"= n/abs(alpha) - abs(alpha) y = a/(b - t)**n y_array.append(N*y) return array(y_array) ###",
"0.5 : beta_upper = 0 else: beta_upper = (3*K_upper*(K_upper+1)-4*I*(I+1)*J_upper*(J_upper+1))/(8*I*(2*I-1)*J_upper*(2*J_upper-1)) HFS_frequency.append(centroid_frequency + alpha_upper*A_upper +",
"FWHM, intensity, eta): Gauss = exp(-0.6931*((x-HFS_frequency)/(FWHM/2))**2) Lorentz = 1/(1+((x-HFS_frequency)/(FWHM/2))**2) Voigt = eta*Lorentz +",
"elif (a+b+c+1) < 0: break Total = sqrt( float(factorial(a+b-c)*factorial(a-b+c)*factorial(-a+b+c)) / float(factorial(a+b+c+1)) ) #print",
"* from numpy import * ### Defining the DeltaJ function that will be",
"F_upper_min +1 F_lower_min = F_lower_min +1 return HFS_frequency, HF_intensity ### Defining the intensity",
"sqrt from math import factorial from operator import * from numpy import *",
"elif J_lower <= 0.5 : beta_lower = 0 else: beta_lower = (3*K_lower*(K_lower+1)-4*I*(I+1)*J_lower*(J_lower+1))/(8*I*(2*I-1)*J_lower*(2*J_lower-1)) K_upper",
"0: break elif (j2+j3+j5+j6-z) <= 0: break elif (j3+j1+j6+j4-z) <= 0: break z",
"= array([]); frequency_lab_frame = array([]) c = 299792458.0 # Speed of light [ms-1]",
"FWHM, intensity): return float(intensity)*exp(- 0.5*((HFS_frequency-x)/(FWHM/2.355))**2) # Gaussian function ### Defining the Lorentzian function",
"<= 0: break elif (j2+j3+j5+j6-z) <= 0: break elif (j3+j1+j6+j4-z) <= 0: break",
"from lab frame to rest frame def Doppler_correction(freq_range_lab, mass, iscool_voltage): alpha = iscool_voltage/(mass*931.494061*10**6)",
"B_lower, B_upper): # Calculates the F values for a J_lower to J_upper transition",
"B_lower, B_upper)[1][i] intensity_1 = intensity_1*Int Bkgnd = Bkgnd + Gaussian(x, HFS_frequency, FWHM, intensity_1)",
"- beta_lower*B_lower) HF_intensity.append((2*F_lower+1)*(2*F_upper+1)*Wigner6J(F_lower, F_upper, 1, J_upper, J_lower, I)**2) F_upper_min = F_upper_min +1 F_lower_min",
"to frequency in lab frame frequency_lab_frame = harmonic*wavenumber*c/10**4 # Wavenumber doubled as reading",
"eta): Gauss = exp(-0.6931*((x-HFS_frequency)/(FWHM/2))**2) Lorentz = 1/(1+((x-HFS_frequency)/(FWHM/2))**2) Voigt = eta*Lorentz + (1-eta)*Gauss return",
"(a-b+c) < 0: break elif (-a+b+c) < 0: break elif (a+b+c+1) < 0:",
"Gaussian function ### Defining the Lorentzian function for the HF spectrum def Lorentzian(x,",
"while True: if (a+b-c) < 0: break elif (a-b+c) < 0: break elif",
"while F_upper_min < (F_upper_max +1) : F_lower = F_lower_min F_upper = F_upper_min F_delta",
"pseudoVoigt(x, x0, FWHM, intensity, eta)*exp(-0.5*t*t) else: y = pseudoVoigt(x, x0, FWHM, intensity, eta)",
"alpha - sqrt(2*alpha + alpha*alpha)) # Convert to relative frequency frequency_relative = frequency_rest_frame",
"CF, A_lower, A_upper, B_lower, B_upper, FWHM, Int, Bkgnd, x): for i in range(len(HF_function(I,",
"peaks def HF_intensity(I, J_lower, J_upper, F_lower, F_upper): # Intensity ratio = (2F_lower+1)(2F_upper+1){F_lower F_upper",
"Int, Bkgnd, x): for i in range(len(HF_function(I, J_lower, J_upper, CF, A_lower, A_upper, B_lower,",
"Wigner4 = float(factorial(z-j4-j5-j3)) Wigner5 = float(factorial(j1+j2+j4+j5-z)) Wigner6 = float(factorial(j2+j3+j5+j6-z)) Wigner7 = float(factorial(j3+j1+j6+j4-z)) Wigner_Denominator",
"an exponential pseudoVoigt function def expoVoigt(x_array, x0, intensity, FWHM, alpha, eta): y_array =",
"Wigner2 = float(factorial(z-j1-j5-j6)) Wigner3 = float(factorial(z-j4-j2-j6)) Wigner4 = float(factorial(z-j4-j5-j3)) Wigner5 = float(factorial(j1+j2+j4+j5-z)) Wigner6",
"while True: if (z-j1-j2-j3) < 0: break elif (z-j1-j5-j6) < 0: break elif",
"CF, A_lower, A_upper, B_lower, B_upper)[0])): HFS_frequency = HF_function(I, J_lower, J_upper, CF, A_lower, A_upper,",
"transition HFS_frequency = []; HF_intensity = [] F_lower_min = pos(I - J_lower) F_lower_max",
"return Total ### Defining the Gaussian function for the HF spectrum def Gaussian(x,",
"- in units of e/c**2 # Convert from wavenumber to frequency in lab",
"eta*Lorentz + (1-eta)*Gauss return intensity*Voigt # Voigt function ### Defining the Crystalball function",
"Wigner6J(j1, j2, j3, j4, j5, j6): # Wigner 6J angular momentum coupling #",
"Calculates the F values for a J_lower to J_upper transition HFS_frequency = [];",
"F_delta = F_upper - F_lower if (-1 <= F_delta <= 1): K_lower =",
"frequency_lab_frame = harmonic*wavenumber*c/10**4 # Wavenumber doubled as reading taken at fundamental frequency (calculate",
"= Wigner_Total + (float(-1)**z*factorial(float(z+1)))/Wigner_Denominator z = z+1 if (j1+j2+j4+j5-z) <= 0: break elif",
"break elif (z-j1-j5-j6) < 0: break elif (z-j4-j2-j6) < 0: break elif (z-j4-j5-j3)",
"- sqrt(2*alpha + alpha*alpha)) # Convert to relative frequency frequency_relative = frequency_rest_frame -",
"(F_upper_max +1) : F_lower = F_lower_min F_upper = F_upper_min F_delta = F_upper -",
"J_upper, CF, A_lower, A_upper, B_lower, B_upper)[1][i] intensity_1 = intensity_1*Int Bkgnd = Bkgnd +",
"to J_upper transition HFS_frequency = []; HF_intensity = [] F_lower_min = pos(I -",
"used in Wigner6J def DeltaJ(a, b, c): Total = 0 while True: if",
"for each of the HF peaks def HF_intensity(I, J_lower, J_upper, F_lower, F_upper): #",
"of light [ms-1] in a vacuum e = 2.7182818284 # Maths constant alpha",
"def Wigner6J(j1, j2, j3, j4, j5, j6): # Wigner 6J angular momentum coupling",
"Crystalball function def Crystalball(x_array, x0, N, sigma, alpha, n): y_array = [] for",
"# Convert from wavenumber to frequency in lab frame frequency_lab_frame = harmonic*wavenumber*c/10**4 #",
"J_upper <= 0.5 : beta_upper = 0 else: beta_upper = (3*K_upper*(K_upper+1)-4*I*(I+1)*J_upper*(J_upper+1))/(8*I*(2*I-1)*J_upper*(2*J_upper-1)) HFS_frequency.append(centroid_frequency +",
"HFS_frequency = []; HF_intensity = [] F_lower_min = pos(I - J_lower) F_lower_max =",
"= sqrt( float(factorial(a+b-c)*factorial(a-b+c)*factorial(-a+b+c)) / float(factorial(a+b+c+1)) ) #print \"Total: \", Total break return Total",
"c): Total = 0 while True: if (a+b-c) < 0: break elif (a-b+c)",
"break elif (j2+j3+j5+j6-z) < 0: break elif (j3+j1+j6+j4-z) < 0: break while True:",
"+ J_upper) while F_lower_min < (F_lower_max +1) : F_upper_min = pos(I - J_upper)",
"taken at fundamental frequency (calculate in MHz) # Convert frequency from lab frame",
"= (x-x0)/sigma if (alpha < 0): t = -t if (t >= -abs(alpha)):",
"a = ((n/abs(alpha))**n)*exp(-0.5*abs(alpha)*abs(alpha)) b = n/abs(alpha) - abs(alpha) y = a/(b - t)**n",
"N, sigma, alpha, n): y_array = [] for i in range(len(x_array)): x =",
"Defining the HF function which simulates the HF spectrum def HF_function(I, J_lower, J_upper,",
"0 z = 0 while True: if (j1+j2+j4+j5-z) < 0: break elif (j2+j3+j5+j6-z)",
"J_upper, centroid_frequency, A_lower, A_upper, B_lower, B_upper): # Calculates the F values for a",
"HF structure def HFS(I, J_lower, J_upper, CF, A_lower, A_upper, B_lower, B_upper, FWHM, Int,",
"# Wigner 6J angular momentum coupling # {j1 j2 j3} = {J_upper F_upper",
"x): for i in range(len(HF_function(I, J_lower, J_upper, CF, A_lower, A_upper, B_lower, B_upper)[0])): HFS_frequency",
"else: beta_lower = (3*K_lower*(K_lower+1)-4*I*(I+1)*J_lower*(J_lower+1))/(8*I*(2*I-1)*J_lower*(2*J_lower-1)) K_upper = F_upper*(F_upper+1)-I*(I+1)-J_upper*(J_upper+1) alpha_upper = K_upper/2 if I <=",
"return intensity*Voigt # Voigt function ### Defining the Crystalball function def Crystalball(x_array, x0,",
"function that will be used in Wigner6J def DeltaJ(a, b, c): Total =",
"i in range(len(HF_function(I, J_lower, J_upper, CF, A_lower, A_upper, B_lower, B_upper)[0])): HFS_frequency = HF_function(I,",
"= float(factorial(z-j1-j2-j3)) Wigner2 = float(factorial(z-j1-j5-j6)) Wigner3 = float(factorial(z-j4-j2-j6)) Wigner4 = float(factorial(z-j4-j5-j3)) Wigner5 =",
"HFS_frequency, FWHM, intensity): return float(intensity)*exp(- 0.5*((HFS_frequency-x)/(FWHM/2.355))**2) # Gaussian function ### Defining the Lorentzian",
"float(factorial(a+b+c+1)) ) #print \"Total: \", Total break return Total ### Defining Wigner 6J",
"fundamental frequency (calculate in MHz) # Convert frequency from lab frame to rest",
"float(factorial(z-j1-j2-j3)) Wigner2 = float(factorial(z-j1-j5-j6)) Wigner3 = float(factorial(z-j4-j2-j6)) Wigner4 = float(factorial(z-j4-j5-j3)) Wigner5 = float(factorial(j1+j2+j4+j5-z))",
"0.5*((HFS_frequency-x)/(FWHM/2.355))**2) # Gaussian function ### Defining the Lorentzian function for the HF spectrum",
"def pseudoVoigt(x, HFS_frequency, FWHM, intensity, eta): Gauss = exp(-0.6931*((x-HFS_frequency)/(FWHM/2))**2) Lorentz = 1/(1+((x-HFS_frequency)/(FWHM/2))**2) Voigt",
"= 0 while True: if (j1+j2+j4+j5-z) < 0: break elif (j2+j3+j5+j6-z) < 0:",
"y = a/(b - t)**n y_array.append(N*y) return array(y_array) ### Defining an exponential pseudoVoigt",
"(j3+j1+j6+j4-z) < 0: break while True: if (z-j1-j2-j3) < 0: break elif (z-j1-j5-j6)",
"F_lower_min < (F_lower_max +1) : F_upper_min = pos(I - J_upper) while F_upper_min <",
"= HF_function(I, J_lower, J_upper, CF, A_lower, A_upper, B_lower, B_upper)[1][i] intensity_1 = intensity_1*Int Bkgnd",
"import sqrt from math import factorial from operator import * from numpy import",
"j6} {F_lower J_lower 1} Delta_Total = DeltaJ(j1, j2, j3)*DeltaJ(j1, j5, j6)*DeltaJ(j4, j2, j6)*DeltaJ(j4,",
"mass, iscool_voltage): alpha = iscool_voltage/(mass*931.494061*10**6) freq_range_rest = freq_range_lab*( 1 + alpha - sqrt(2*alpha",
"< 0): t = -t if (t >= -abs(alpha)): y = pseudoVoigt(x, x0,",
"# Lorentzian function ### Defining the Voigt function for the HF spectrum def",
"(2*F_lower+1)*(2*F_upper+1)*Wigner6J(F_lower, F_upper, 1, J_upper, J_lower, I)**2 if Intensity == 0: print(\"Intensity = 0\")",
"J_lower) F_upper_min = pos(I - J_upper) F_upper_max = pos(I + J_upper) while F_lower_min",
"[ms-1] in a vacuum e = 2.7182818284 # Maths constant alpha = iscool_voltage/(mass*931.494061*10**6)",
"j6)*DeltaJ(j4, j5, j3) Wigner_Total = 0 z = 0 while True: if (j1+j2+j4+j5-z)",
"(t >= -abs(alpha)): y = exp(-0.5*t*t) else: a = ((n/abs(alpha))**n)*exp(-0.5*abs(alpha)*abs(alpha)) b = n/abs(alpha)",
"J_lower, I)**2 if Intensity == 0: print(\"Intensity = 0\") return Intensity ### Defining",
"values for a J_lower to J_upper transition HFS_frequency = []; HF_intensity = []",
"to relative frequency frequency_relative = frequency_rest_frame - frequency_correction return frequency_relative ### Defining the",
"J_lower to J_upper transition HFS_frequency = []; HF_intensity = [] F_lower_min = pos(I",
"Voigt function for the HF spectrum def pseudoVoigt(x, HFS_frequency, FWHM, intensity, eta): Gauss",
"J_upper, CF, A_lower, A_upper, B_lower, B_upper, FWHM, Int, Bkgnd, x): for i in",
"0.5 : beta_lower = 0 else: beta_lower = (3*K_lower*(K_lower+1)-4*I*(I+1)*J_lower*(J_lower+1))/(8*I*(2*I-1)*J_lower*(2*J_lower-1)) K_upper = F_upper*(F_upper+1)-I*(I+1)-J_upper*(J_upper+1) alpha_upper",
"freq_range_rest ### Defining wavenumber to frequency conversion def Frequency_conversion(wavenumber, mass, iscool_voltage, harmonic, frequency_correction):",
"t = -t if (t >= -abs(alpha)): y = pseudoVoigt(x, x0, FWHM, intensity,",
"< 0: break elif (j3+j1+j6+j4-z) < 0: break while True: if (z-j1-j2-j3) <",
"A_upper, B_lower, B_upper): # Calculates the F values for a J_lower to J_upper",
"J_upper transition HFS_frequency = []; HF_intensity = [] F_lower_min = pos(I - J_lower)",
"J_lower, J_upper, CF, A_lower, A_upper, B_lower, B_upper)[0][i] intensity_1 = HF_function(I, J_lower, J_upper, CF,",
"# Calculates the F values for a J_lower to J_upper transition HFS_frequency =",
"Wigner_Total = Wigner_Total + (float(-1)**z*factorial(float(z+1)))/Wigner_Denominator z = z+1 if (j1+j2+j4+j5-z) <= 0: break",
"True: if (j1+j2+j4+j5-z) < 0: break elif (j2+j3+j5+j6-z) < 0: break elif (j3+j1+j6+j4-z)",
"= pseudoVoigt(x, x0, FWHM, intensity, eta)*exp(-0.5*t*t) else: y = pseudoVoigt(x, x0, FWHM, intensity,",
"frame def Doppler_correction(freq_range_lab, mass, iscool_voltage): alpha = iscool_voltage/(mass*931.494061*10**6) freq_range_rest = freq_range_lab*( 1 +",
"rest frame def Doppler_correction(freq_range_lab, mass, iscool_voltage): alpha = iscool_voltage/(mass*931.494061*10**6) freq_range_rest = freq_range_lab*( 1",
"if (alpha < 0): t = -t if (t >= -abs(alpha)): y =",
"<= 0: break z = z+1 Total = float(Delta_Total*Wigner_Total) return Total ### Defining",
"j6): # Wigner 6J angular momentum coupling # {j1 j2 j3} = {J_upper",
"light [ms-1] in a vacuum e = 2.7182818284 # Maths constant alpha =",
"of e/c**2 # Convert from wavenumber to frequency in lab frame frequency_lab_frame =",
"Total ### Defining the Gaussian function for the HF spectrum def Gaussian(x, HFS_frequency,",
"if (z-j1-j2-j3) < 0: break elif (z-j1-j5-j6) < 0: break elif (z-j4-j2-j6) <",
"= 1/(1+((x-HFS_frequency)/(FWHM/2))**2) Voigt = eta*Lorentz + (1-eta)*Gauss return intensity*Voigt # Voigt function ###",
"= float(factorial(j3+j1+j6+j4-z)) Wigner_Denominator = Wigner1*Wigner2*Wigner3*Wigner4*Wigner5*Wigner6*Wigner7 Wigner_Total = Wigner_Total + (float(-1)**z*factorial(float(z+1)))/Wigner_Denominator z = z+1",
"= frequency_lab_frame*( 1 + alpha - sqrt(2*alpha + alpha*alpha)) # Convert to relative",
"### Defining Wigner 6J function def Wigner6J(j1, j2, j3, j4, j5, j6): #",
"in range(len(x_array)): x = x_array[i] t = (x-x0)/FWHM if (alpha < 0): t",
"F_upper, 1, J_upper, J_lower, I)**2) F_upper_min = F_upper_min +1 F_lower_min = F_lower_min +1",
"# Wavenumber doubled as reading taken at fundamental frequency (calculate in MHz) #",
"array(y_array) ### Defining an exponential pseudoVoigt function def expoVoigt(x_array, x0, intensity, FWHM, alpha,",
"Defining Doppler correction from lab frame to rest frame def Doppler_correction(freq_range_lab, mass, iscool_voltage):",
"= eta*Lorentz + (1-eta)*Gauss return intensity*Voigt # Voigt function ### Defining the Crystalball",
"0: break elif (z-j4-j5-j3) < 0: break Wigner1 = float(factorial(z-j1-j2-j3)) Wigner2 = float(factorial(z-j1-j5-j6))",
"if (j1+j2+j4+j5-z) < 0: break elif (j2+j3+j5+j6-z) < 0: break elif (j3+j1+j6+j4-z) <",
"intensity): return float(intensity)*exp(- 0.5*((HFS_frequency-x)/(FWHM/2.355))**2) # Gaussian function ### Defining the Lorentzian function for",
"alpha = eV/mc*c - in units of e/c**2 # Convert from wavenumber to",
"0: break elif (j3+j1+j6+j4-z) < 0: break while True: if (z-j1-j2-j3) < 0:",
"ratio = (2F_lower+1)(2F_upper+1){F_lower F_upper 1} # {J_upper J_lower I} Intensity = (2*F_lower+1)*(2*F_upper+1)*Wigner6J(F_lower, F_upper,",
"alpha = iscool_voltage/(mass*931.494061*10**6) freq_range_rest = freq_range_lab*( 1 + alpha - sqrt(2*alpha + alpha*alpha))",
"alpha, n): y_array = [] for i in range(len(x_array)): x = x_array[i] t",
"F_lower_min = pos(I - J_lower) F_lower_max = pos(I + J_lower) F_upper_min = pos(I",
"in range(len(HF_function(I, J_lower, J_upper, CF, A_lower, A_upper, B_lower, B_upper)[0])): HFS_frequency = HF_function(I, J_lower,",
"wavenumber to frequency in lab frame frequency_lab_frame = harmonic*wavenumber*c/10**4 # Wavenumber doubled as",
"if (t >= -abs(alpha)): y = pseudoVoigt(x, x0, FWHM, intensity, eta)*exp(-0.5*t*t) else: y",
"= float(factorial(j1+j2+j4+j5-z)) Wigner6 = float(factorial(j2+j3+j5+j6-z)) Wigner7 = float(factorial(j3+j1+j6+j4-z)) Wigner_Denominator = Wigner1*Wigner2*Wigner3*Wigner4*Wigner5*Wigner6*Wigner7 Wigner_Total =",
"Gaussian function for the HF spectrum def Gaussian(x, HFS_frequency, FWHM, intensity): return float(intensity)*exp(-",
"alpha_upper = K_upper/2 if I <= 0.5 : beta_upper = 0 elif J_upper",
"j4, j5, j6): # Wigner 6J angular momentum coupling # {j1 j2 j3}",
"frame to rest frame frequency_rest_frame = frequency_lab_frame*( 1 + alpha - sqrt(2*alpha +",
"j5, j6): # Wigner 6J angular momentum coupling # {j1 j2 j3} =",
"Gauss = exp(-0.6931*((x-HFS_frequency)/(FWHM/2))**2) Lorentz = 1/(1+((x-HFS_frequency)/(FWHM/2))**2) Voigt = eta*Lorentz + (1-eta)*Gauss return intensity*Voigt",
"B_lower, B_upper)[0])): HFS_frequency = HF_function(I, J_lower, J_upper, CF, A_lower, A_upper, B_lower, B_upper)[0][i] intensity_1",
"1, J_upper, J_lower, I)**2 if Intensity == 0: print(\"Intensity = 0\") return Intensity",
"Convert frequency from lab frame to rest frame frequency_rest_frame = frequency_lab_frame*( 1 +",
"Wigner1*Wigner2*Wigner3*Wigner4*Wigner5*Wigner6*Wigner7 Wigner_Total = Wigner_Total + (float(-1)**z*factorial(float(z+1)))/Wigner_Denominator z = z+1 if (j1+j2+j4+j5-z) <= 0:",
"conversion def Frequency_conversion(wavenumber, mass, iscool_voltage, harmonic, frequency_correction): frequency_rest_frame = array([]); frequency_lab_frame = array([])",
"<= 0.5 : beta_upper = 0 else: beta_upper = (3*K_upper*(K_upper+1)-4*I*(I+1)*J_upper*(J_upper+1))/(8*I*(2*I-1)*J_upper*(2*J_upper-1)) HFS_frequency.append(centroid_frequency + alpha_upper*A_upper",
"units of e/c**2 # Convert from wavenumber to frequency in lab frame frequency_lab_frame",
"== 0: print(\"Intensity = 0\") return Intensity ### Defining Doppler correction from lab",
"+1 return HFS_frequency, HF_intensity ### Defining the intensity for each of the HF",
"HF_function(I, J_lower, J_upper, CF, A_lower, A_upper, B_lower, B_upper)[0][i] intensity_1 = HF_function(I, J_lower, J_upper,",
"\", Total break return Total ### Defining Wigner 6J function def Wigner6J(j1, j2,",
"# Intensity ratio = (2F_lower+1)(2F_upper+1){F_lower F_upper 1} # {J_upper J_lower I} Intensity =",
"for the HF spectrum def pseudoVoigt(x, HFS_frequency, FWHM, intensity, eta): Gauss = exp(-0.6931*((x-HFS_frequency)/(FWHM/2))**2)",
"in range(len(x_array)): x = x_array[i] t = (x-x0)/sigma if (alpha < 0): t",
"y_array.append(N*y) return array(y_array) ### Defining an exponential pseudoVoigt function def expoVoigt(x_array, x0, intensity,",
"vacuum e = 2.7182818284 # Maths constant alpha = iscool_voltage/(mass*931.494061*10**6) # alpha =",
"Lorentzian function ### Defining the Voigt function for the HF spectrum def pseudoVoigt(x,",
"pseudoVoigt function def expoVoigt(x_array, x0, intensity, FWHM, alpha, eta): y_array = [] for",
"+ (float(-1)**z*factorial(float(z+1)))/Wigner_Denominator z = z+1 if (j1+j2+j4+j5-z) <= 0: break elif (j2+j3+j5+j6-z) <=",
"0): t = -t if (t >= -abs(alpha)): y = pseudoVoigt(x, x0, FWHM,",
"B_upper)[1][i] intensity_1 = intensity_1*Int Bkgnd = Bkgnd + Gaussian(x, HFS_frequency, FWHM, intensity_1) return",
"Wigner7 = float(factorial(j3+j1+j6+j4-z)) Wigner_Denominator = Wigner1*Wigner2*Wigner3*Wigner4*Wigner5*Wigner6*Wigner7 Wigner_Total = Wigner_Total + (float(-1)**z*factorial(float(z+1)))/Wigner_Denominator z =",
"J_lower <= 0.5 : beta_lower = 0 else: beta_lower = (3*K_lower*(K_lower+1)-4*I*(I+1)*J_lower*(J_lower+1))/(8*I*(2*I-1)*J_lower*(2*J_lower-1)) K_upper =",
"t = (x-x0)/FWHM if (alpha < 0): t = -t if (t >=",
"0: print(\"Intensity = 0\") return Intensity ### Defining Doppler correction from lab frame",
"iscool_voltage): alpha = iscool_voltage/(mass*931.494061*10**6) freq_range_rest = freq_range_lab*( 1 + alpha - sqrt(2*alpha +",
"A_upper, B_lower, B_upper)[0][i] intensity_1 = HF_function(I, J_lower, J_upper, CF, A_lower, A_upper, B_lower, B_upper)[1][i]",
"intensity_1 = intensity_1*Int Bkgnd = Bkgnd + Gaussian(x, HFS_frequency, FWHM, intensity_1) return Bkgnd",
"# Maths constant alpha = iscool_voltage/(mass*931.494061*10**6) # alpha = eV/mc*c - in units",
"J_upper, CF, A_lower, A_upper, B_lower, B_upper)[0])): HFS_frequency = HF_function(I, J_lower, J_upper, CF, A_lower,",
"1} Delta_Total = DeltaJ(j1, j2, j3)*DeltaJ(j1, j5, j6)*DeltaJ(j4, j2, j6)*DeltaJ(j4, j5, j3) Wigner_Total",
"for the HF spectrum def Gaussian(x, HFS_frequency, FWHM, intensity): return float(intensity)*exp(- 0.5*((HFS_frequency-x)/(FWHM/2.355))**2) #",
"+1 F_lower_min = F_lower_min +1 return HFS_frequency, HF_intensity ### Defining the intensity for",
"return Intensity ### Defining Doppler correction from lab frame to rest frame def",
"= eV/mc*c - in units of e/c**2 # Convert from wavenumber to frequency",
"Convert to relative frequency frequency_relative = frequency_rest_frame - frequency_correction return frequency_relative ### Defining",
"break elif (j3+j1+j6+j4-z) <= 0: break z = z+1 Total = float(Delta_Total*Wigner_Total) return",
"(-1 <= F_delta <= 1): K_lower = F_lower*(F_lower+1)-I*(I+1)-J_lower*(J_lower+1) alpha_lower = K_lower/2 if I",
"= (3*K_upper*(K_upper+1)-4*I*(I+1)*J_upper*(J_upper+1))/(8*I*(2*I-1)*J_upper*(2*J_upper-1)) HFS_frequency.append(centroid_frequency + alpha_upper*A_upper + beta_upper*B_upper - alpha_lower*A_lower - beta_lower*B_lower) HF_intensity.append((2*F_lower+1)*(2*F_upper+1)*Wigner6J(F_lower, F_upper,",
"t = -t if (t >= -abs(alpha)): y = exp(-0.5*t*t) else: a =",
"+ gamma**2)) # Lorentzian function ### Defining the Voigt function for the HF",
"Total = 0 while True: if (a+b-c) < 0: break elif (a-b+c) <",
"= K_lower/2 if I <= 0.5 : beta_lower = 0 elif J_lower <=",
"if I <= 0.5 : beta_upper = 0 elif J_upper <= 0.5 :",
"Voigt = eta*Lorentz + (1-eta)*Gauss return intensity*Voigt # Voigt function ### Defining the",
"Bkgnd, x): for i in range(len(HF_function(I, J_lower, J_upper, CF, A_lower, A_upper, B_lower, B_upper)[0])):",
"-abs(alpha)): y = exp(-0.5*t*t) else: a = ((n/abs(alpha))**n)*exp(-0.5*abs(alpha)*abs(alpha)) b = n/abs(alpha) - abs(alpha)",
"<= 0.5 : beta_lower = 0 else: beta_lower = (3*K_lower*(K_lower+1)-4*I*(I+1)*J_lower*(J_lower+1))/(8*I*(2*I-1)*J_lower*(2*J_lower-1)) K_upper = F_upper*(F_upper+1)-I*(I+1)-J_upper*(J_upper+1)",
"= exp(-0.5*t*t) else: a = ((n/abs(alpha))**n)*exp(-0.5*abs(alpha)*abs(alpha)) b = n/abs(alpha) - abs(alpha) y =",
"J_lower, J_upper, centroid_frequency, A_lower, A_upper, B_lower, B_upper): # Calculates the F values for",
"alpha_upper*A_upper + beta_upper*B_upper - alpha_lower*A_lower - beta_lower*B_lower) HF_intensity.append((2*F_lower+1)*(2*F_upper+1)*Wigner6J(F_lower, F_upper, 1, J_upper, J_lower, I)**2)",
"= array([]) c = 299792458.0 # Speed of light [ms-1] in a vacuum",
"a vacuum e = 2.7182818284 # Maths constant alpha = iscool_voltage/(mass*931.494061*10**6) # alpha",
"HF spectrum def HF_function(I, J_lower, J_upper, centroid_frequency, A_lower, A_upper, B_lower, B_upper): # Calculates",
"### Defining the HF structure def HFS(I, J_lower, J_upper, CF, A_lower, A_upper, B_lower,",
"< 0): t = -t if (t >= -abs(alpha)): y = exp(-0.5*t*t) else:",
"= (2*F_lower+1)*(2*F_upper+1)*Wigner6J(F_lower, F_upper, 1, J_upper, J_lower, I)**2 if Intensity == 0: print(\"Intensity =",
"0 else: beta_lower = (3*K_lower*(K_lower+1)-4*I*(I+1)*J_lower*(J_lower+1))/(8*I*(2*I-1)*J_lower*(2*J_lower-1)) K_upper = F_upper*(F_upper+1)-I*(I+1)-J_upper*(J_upper+1) alpha_upper = K_upper/2 if I",
"harmonic*wavenumber*c/10**4 # Wavenumber doubled as reading taken at fundamental frequency (calculate in MHz)",
"* ### Defining the DeltaJ function that will be used in Wigner6J def",
"F_delta <= 1): K_lower = F_lower*(F_lower+1)-I*(I+1)-J_lower*(J_lower+1) alpha_lower = K_lower/2 if I <= 0.5",
"return intensity*(gamma**2/((x-HFS_frequency)**2 + gamma**2)) # Lorentzian function ### Defining the Voigt function for",
"Delta_Total = DeltaJ(j1, j2, j3)*DeltaJ(j1, j5, j6)*DeltaJ(j4, j2, j6)*DeltaJ(j4, j5, j3) Wigner_Total =",
"Defining the HF structure def HFS(I, J_lower, J_upper, CF, A_lower, A_upper, B_lower, B_upper,",
"sqrt(2*alpha + alpha*alpha)) return freq_range_rest ### Defining wavenumber to frequency conversion def Frequency_conversion(wavenumber,",
"to rest frame frequency_rest_frame = frequency_lab_frame*( 1 + alpha - sqrt(2*alpha + alpha*alpha))",
"Maths constant alpha = iscool_voltage/(mass*931.494061*10**6) # alpha = eV/mc*c - in units of",
"J_lower 1} Delta_Total = DeltaJ(j1, j2, j3)*DeltaJ(j1, j5, j6)*DeltaJ(j4, j2, j6)*DeltaJ(j4, j5, j3)",
"t)**n y_array.append(N*y) return array(y_array) ### Defining an exponential pseudoVoigt function def expoVoigt(x_array, x0,",
"eta): y_array = [] for i in range(len(x_array)): x = x_array[i] t =",
": beta_upper = 0 else: beta_upper = (3*K_upper*(K_upper+1)-4*I*(I+1)*J_upper*(J_upper+1))/(8*I*(2*I-1)*J_upper*(2*J_upper-1)) HFS_frequency.append(centroid_frequency + alpha_upper*A_upper + beta_upper*B_upper",
"eta) y_array.append(y) return array(y_array) ### Defining the HF function which simulates the HF",
"frequency_lab_frame*( 1 + alpha - sqrt(2*alpha + alpha*alpha)) # Convert to relative frequency",
"frequency_relative ### Defining the HF structure def HFS(I, J_lower, J_upper, CF, A_lower, A_upper,",
"/ float(factorial(a+b+c+1)) ) #print \"Total: \", Total break return Total ### Defining Wigner",
"= float(factorial(z-j1-j5-j6)) Wigner3 = float(factorial(z-j4-j2-j6)) Wigner4 = float(factorial(z-j4-j5-j3)) Wigner5 = float(factorial(j1+j2+j4+j5-z)) Wigner6 =",
"= float(factorial(z-j4-j2-j6)) Wigner4 = float(factorial(z-j4-j5-j3)) Wigner5 = float(factorial(j1+j2+j4+j5-z)) Wigner6 = float(factorial(j2+j3+j5+j6-z)) Wigner7 =",
"x0, intensity, FWHM, alpha, eta): y_array = [] for i in range(len(x_array)): x",
"elif (j3+j1+j6+j4-z) <= 0: break z = z+1 Total = float(Delta_Total*Wigner_Total) return Total",
"the HF peaks def HF_intensity(I, J_lower, J_upper, F_lower, F_upper): # Intensity ratio =",
"I} Intensity = (2*F_lower+1)*(2*F_upper+1)*Wigner6J(F_lower, F_upper, 1, J_upper, J_lower, I)**2 if Intensity == 0:",
"intensity): return intensity*(gamma**2/((x-HFS_frequency)**2 + gamma**2)) # Lorentzian function ### Defining the Voigt function",
"eV/mc*c - in units of e/c**2 # Convert from wavenumber to frequency in",
"F_upper): # Intensity ratio = (2F_lower+1)(2F_upper+1){F_lower F_upper 1} # {J_upper J_lower I} Intensity",
"0: break elif (z-j4-j2-j6) < 0: break elif (z-j4-j5-j3) < 0: break Wigner1",
"F_lower, F_upper): # Intensity ratio = (2F_lower+1)(2F_upper+1){F_lower F_upper 1} # {J_upper J_lower I}",
"the HF spectrum def pseudoVoigt(x, HFS_frequency, FWHM, intensity, eta): Gauss = exp(-0.6931*((x-HFS_frequency)/(FWHM/2))**2) Lorentz",
"+ (1-eta)*Gauss return intensity*Voigt # Voigt function ### Defining the Crystalball function def",
"function def expoVoigt(x_array, x0, intensity, FWHM, alpha, eta): y_array = [] for i",
"-t if (t >= -abs(alpha)): y = exp(-0.5*t*t) else: a = ((n/abs(alpha))**n)*exp(-0.5*abs(alpha)*abs(alpha)) b",
"# {J_upper J_lower I} Intensity = (2*F_lower+1)*(2*F_upper+1)*Wigner6J(F_lower, F_upper, 1, J_upper, J_lower, I)**2 if",
"< 0: break Total = sqrt( float(factorial(a+b-c)*factorial(a-b+c)*factorial(-a+b+c)) / float(factorial(a+b+c+1)) ) #print \"Total: \",",
"Wigner 6J function def Wigner6J(j1, j2, j3, j4, j5, j6): # Wigner 6J",
"### Defining Doppler correction from lab frame to rest frame def Doppler_correction(freq_range_lab, mass,",
"Intensity == 0: print(\"Intensity = 0\") return Intensity ### Defining Doppler correction from",
"[] for i in range(len(x_array)): x = x_array[i] t = (x-x0)/FWHM if (alpha",
"j3) Wigner_Total = 0 z = 0 while True: if (j1+j2+j4+j5-z) < 0:",
"that will be used in Wigner6J def DeltaJ(a, b, c): Total = 0",
"j2, j6)*DeltaJ(j4, j5, j3) Wigner_Total = 0 z = 0 while True: if",
"(z-j4-j2-j6) < 0: break elif (z-j4-j5-j3) < 0: break Wigner1 = float(factorial(z-j1-j2-j3)) Wigner2",
"x_array[i] t = (x-x0)/sigma if (alpha < 0): t = -t if (t",
"1): K_lower = F_lower*(F_lower+1)-I*(I+1)-J_lower*(J_lower+1) alpha_lower = K_lower/2 if I <= 0.5 : beta_lower",
"< 0: break elif (j2+j3+j5+j6-z) < 0: break elif (j3+j1+j6+j4-z) < 0: break",
"J_upper, J_lower, I)**2 if Intensity == 0: print(\"Intensity = 0\") return Intensity ###",
"A_upper, B_lower, B_upper)[1][i] intensity_1 = intensity_1*Int Bkgnd = Bkgnd + Gaussian(x, HFS_frequency, FWHM,",
"pos(I - J_upper) F_upper_max = pos(I + J_upper) while F_lower_min < (F_lower_max +1)",
": beta_lower = 0 else: beta_lower = (3*K_lower*(K_lower+1)-4*I*(I+1)*J_lower*(J_lower+1))/(8*I*(2*I-1)*J_lower*(2*J_lower-1)) K_upper = F_upper*(F_upper+1)-I*(I+1)-J_upper*(J_upper+1) alpha_upper =",
"else: beta_upper = (3*K_upper*(K_upper+1)-4*I*(I+1)*J_upper*(J_upper+1))/(8*I*(2*I-1)*J_upper*(2*J_upper-1)) HFS_frequency.append(centroid_frequency + alpha_upper*A_upper + beta_upper*B_upper - alpha_lower*A_lower - beta_lower*B_lower)",
"<= 0.5 : beta_upper = 0 elif J_upper <= 0.5 : beta_upper =",
"K_upper/2 if I <= 0.5 : beta_upper = 0 elif J_upper <= 0.5",
"float(factorial(j3+j1+j6+j4-z)) Wigner_Denominator = Wigner1*Wigner2*Wigner3*Wigner4*Wigner5*Wigner6*Wigner7 Wigner_Total = Wigner_Total + (float(-1)**z*factorial(float(z+1)))/Wigner_Denominator z = z+1 if",
"function which simulates the HF spectrum def HF_function(I, J_lower, J_upper, centroid_frequency, A_lower, A_upper,",
"n/abs(alpha) - abs(alpha) y = a/(b - t)**n y_array.append(N*y) return array(y_array) ### Defining",
"Defining the Crystalball function def Crystalball(x_array, x0, N, sigma, alpha, n): y_array =",
"j3)*DeltaJ(j1, j5, j6)*DeltaJ(j4, j2, j6)*DeltaJ(j4, j5, j3) Wigner_Total = 0 z = 0",
"### Defining the Voigt function for the HF spectrum def pseudoVoigt(x, HFS_frequency, FWHM,",
"float(factorial(z-j1-j5-j6)) Wigner3 = float(factorial(z-j4-j2-j6)) Wigner4 = float(factorial(z-j4-j5-j3)) Wigner5 = float(factorial(j1+j2+j4+j5-z)) Wigner6 = float(factorial(j2+j3+j5+j6-z))",
"break while True: if (z-j1-j2-j3) < 0: break elif (z-j1-j5-j6) < 0: break",
"- alpha_lower*A_lower - beta_lower*B_lower) HF_intensity.append((2*F_lower+1)*(2*F_upper+1)*Wigner6J(F_lower, F_upper, 1, J_upper, J_lower, I)**2) F_upper_min = F_upper_min",
"frequency frequency_relative = frequency_rest_frame - frequency_correction return frequency_relative ### Defining the HF structure",
"Total ### Defining Wigner 6J function def Wigner6J(j1, j2, j3, j4, j5, j6):",
"import * ### Defining the DeltaJ function that will be used in Wigner6J",
"= ((n/abs(alpha))**n)*exp(-0.5*abs(alpha)*abs(alpha)) b = n/abs(alpha) - abs(alpha) y = a/(b - t)**n y_array.append(N*y)",
"0 elif J_lower <= 0.5 : beta_lower = 0 else: beta_lower = (3*K_lower*(K_lower+1)-4*I*(I+1)*J_lower*(J_lower+1))/(8*I*(2*I-1)*J_lower*(2*J_lower-1))",
"= F_lower_min +1 return HFS_frequency, HF_intensity ### Defining the intensity for each of",
"frame to rest frame def Doppler_correction(freq_range_lab, mass, iscool_voltage): alpha = iscool_voltage/(mass*931.494061*10**6) freq_range_rest =",
"j5, j3) Wigner_Total = 0 z = 0 while True: if (j1+j2+j4+j5-z) <",
"= pos(I - J_upper) F_upper_max = pos(I + J_upper) while F_lower_min < (F_lower_max",
"Defining wavenumber to frequency conversion def Frequency_conversion(wavenumber, mass, iscool_voltage, harmonic, frequency_correction): frequency_rest_frame =",
"function ### Defining the Crystalball function def Crystalball(x_array, x0, N, sigma, alpha, n):",
"abs(alpha) y = a/(b - t)**n y_array.append(N*y) return array(y_array) ### Defining an exponential",
"# {j4 j5 j6} {F_lower J_lower 1} Delta_Total = DeltaJ(j1, j2, j3)*DeltaJ(j1, j5,",
"z = z+1 Total = float(Delta_Total*Wigner_Total) return Total ### Defining the Gaussian function",
"Defining the intensity for each of the HF peaks def HF_intensity(I, J_lower, J_upper,",
"+ alpha - sqrt(2*alpha + alpha*alpha)) # Convert to relative frequency frequency_relative =",
"spectrum def Gaussian(x, HFS_frequency, FWHM, intensity): return float(intensity)*exp(- 0.5*((HFS_frequency-x)/(FWHM/2.355))**2) # Gaussian function ###",
"frequency_rest_frame - frequency_correction return frequency_relative ### Defining the HF structure def HFS(I, J_lower,",
"elif (j2+j3+j5+j6-z) < 0: break elif (j3+j1+j6+j4-z) < 0: break while True: if",
"pos(I + J_upper) while F_lower_min < (F_lower_max +1) : F_upper_min = pos(I -",
"(j2+j3+j5+j6-z) < 0: break elif (j3+j1+j6+j4-z) < 0: break while True: if (z-j1-j2-j3)",
"function ### Defining the Voigt function for the HF spectrum def pseudoVoigt(x, HFS_frequency,",
"x_array[i] t = (x-x0)/FWHM if (alpha < 0): t = -t if (t",
"from math import factorial from operator import * from numpy import * ###",
"j2, j3, j4, j5, j6): # Wigner 6J angular momentum coupling # {j1",
"+ alpha*alpha)) # Convert to relative frequency frequency_relative = frequency_rest_frame - frequency_correction return",
"break return Total ### Defining Wigner 6J function def Wigner6J(j1, j2, j3, j4,",
"-abs(alpha)): y = pseudoVoigt(x, x0, FWHM, intensity, eta)*exp(-0.5*t*t) else: y = pseudoVoigt(x, x0,",
"y_array = [] for i in range(len(x_array)): x = x_array[i] t = (x-x0)/sigma",
"DeltaJ(a, b, c): Total = 0 while True: if (a+b-c) < 0: break",
"simulates the HF spectrum def HF_function(I, J_lower, J_upper, centroid_frequency, A_lower, A_upper, B_lower, B_upper):",
"def Gaussian(x, HFS_frequency, FWHM, intensity): return float(intensity)*exp(- 0.5*((HFS_frequency-x)/(FWHM/2.355))**2) # Gaussian function ### Defining",
"0: break elif (a-b+c) < 0: break elif (-a+b+c) < 0: break elif",
"(j2+j3+j5+j6-z) <= 0: break elif (j3+j1+j6+j4-z) <= 0: break z = z+1 Total",
"Wigner1 = float(factorial(z-j1-j2-j3)) Wigner2 = float(factorial(z-j1-j5-j6)) Wigner3 = float(factorial(z-j4-j2-j6)) Wigner4 = float(factorial(z-j4-j5-j3)) Wigner5",
"= F_upper*(F_upper+1)-I*(I+1)-J_upper*(J_upper+1) alpha_upper = K_upper/2 if I <= 0.5 : beta_upper = 0",
"frequency conversion def Frequency_conversion(wavenumber, mass, iscool_voltage, harmonic, frequency_correction): frequency_rest_frame = array([]); frequency_lab_frame =",
"(1-eta)*Gauss return intensity*Voigt # Voigt function ### Defining the Crystalball function def Crystalball(x_array,",
"x = x_array[i] t = (x-x0)/sigma if (alpha < 0): t = -t",
"exponential pseudoVoigt function def expoVoigt(x_array, x0, intensity, FWHM, alpha, eta): y_array = []",
"alpha_lower = K_lower/2 if I <= 0.5 : beta_lower = 0 elif J_lower",
"+ alpha*alpha)) return freq_range_rest ### Defining wavenumber to frequency conversion def Frequency_conversion(wavenumber, mass,",
"y = pseudoVoigt(x, x0, FWHM, intensity, eta) y_array.append(y) return array(y_array) ### Defining the",
"Defining Wigner 6J function def Wigner6J(j1, j2, j3, j4, j5, j6): # Wigner",
"def expoVoigt(x_array, x0, intensity, FWHM, alpha, eta): y_array = [] for i in",
"for i in range(len(HF_function(I, J_lower, J_upper, CF, A_lower, A_upper, B_lower, B_upper)[0])): HFS_frequency =",
"< 0: break elif (a+b+c+1) < 0: break Total = sqrt( float(factorial(a+b-c)*factorial(a-b+c)*factorial(-a+b+c)) /",
"= a/(b - t)**n y_array.append(N*y) return array(y_array) ### Defining an exponential pseudoVoigt function",
"each of the HF peaks def HF_intensity(I, J_lower, J_upper, F_lower, F_upper): # Intensity",
"elif (-a+b+c) < 0: break elif (a+b+c+1) < 0: break Total = sqrt(",
"(z-j1-j2-j3) < 0: break elif (z-j1-j5-j6) < 0: break elif (z-j4-j2-j6) < 0:",
"math import factorial from operator import * from numpy import * ### Defining",
"J_lower, J_upper, CF, A_lower, A_upper, B_lower, B_upper)[0])): HFS_frequency = HF_function(I, J_lower, J_upper, CF,",
"elif (z-j4-j2-j6) < 0: break elif (z-j4-j5-j3) < 0: break Wigner1 = float(factorial(z-j1-j2-j3))",
"intensity, FWHM, alpha, eta): y_array = [] for i in range(len(x_array)): x =",
"Gaussian(x, HFS_frequency, FWHM, intensity): return float(intensity)*exp(- 0.5*((HFS_frequency-x)/(FWHM/2.355))**2) # Gaussian function ### Defining the",
"Total break return Total ### Defining Wigner 6J function def Wigner6J(j1, j2, j3,",
"array([]) c = 299792458.0 # Speed of light [ms-1] in a vacuum e",
"float(intensity)*exp(- 0.5*((HFS_frequency-x)/(FWHM/2.355))**2) # Gaussian function ### Defining the Lorentzian function for the HF",
"z+1 if (j1+j2+j4+j5-z) <= 0: break elif (j2+j3+j5+j6-z) <= 0: break elif (j3+j1+j6+j4-z)",
"spectrum def HF_function(I, J_lower, J_upper, centroid_frequency, A_lower, A_upper, B_lower, B_upper): # Calculates the",
"break z = z+1 Total = float(Delta_Total*Wigner_Total) return Total ### Defining the Gaussian",
"break elif (a-b+c) < 0: break elif (-a+b+c) < 0: break elif (a+b+c+1)",
": F_lower = F_lower_min F_upper = F_upper_min F_delta = F_upper - F_lower if",
"j3, j4, j5, j6): # Wigner 6J angular momentum coupling # {j1 j2",
"0: break elif (a+b+c+1) < 0: break Total = sqrt( float(factorial(a+b-c)*factorial(a-b+c)*factorial(-a+b+c)) / float(factorial(a+b+c+1))",
"alpha_lower*A_lower - beta_lower*B_lower) HF_intensity.append((2*F_lower+1)*(2*F_upper+1)*Wigner6J(F_lower, F_upper, 1, J_upper, J_lower, I)**2) F_upper_min = F_upper_min +1",
"DeltaJ function that will be used in Wigner6J def DeltaJ(a, b, c): Total",
"Wigner_Total + (float(-1)**z*factorial(float(z+1)))/Wigner_Denominator z = z+1 if (j1+j2+j4+j5-z) <= 0: break elif (j2+j3+j5+j6-z)",
"### Defining the Gaussian function for the HF spectrum def Gaussian(x, HFS_frequency, FWHM,",
"(x-x0)/sigma if (alpha < 0): t = -t if (t >= -abs(alpha)): y",
"intensity, eta)*exp(-0.5*t*t) else: y = pseudoVoigt(x, x0, FWHM, intensity, eta) y_array.append(y) return array(y_array)",
"c = 299792458.0 # Speed of light [ms-1] in a vacuum e =",
"in a vacuum e = 2.7182818284 # Maths constant alpha = iscool_voltage/(mass*931.494061*10**6) #",
"0: break Total = sqrt( float(factorial(a+b-c)*factorial(a-b+c)*factorial(-a+b+c)) / float(factorial(a+b+c+1)) ) #print \"Total: \", Total",
"elif (z-j1-j5-j6) < 0: break elif (z-j4-j2-j6) < 0: break elif (z-j4-j5-j3) <",
"F values for a J_lower to J_upper transition HFS_frequency = []; HF_intensity =",
"sigma, alpha, n): y_array = [] for i in range(len(x_array)): x = x_array[i]",
"if (j1+j2+j4+j5-z) <= 0: break elif (j2+j3+j5+j6-z) <= 0: break elif (j3+j1+j6+j4-z) <=",
"= {J_upper F_upper I} # {j4 j5 j6} {F_lower J_lower 1} Delta_Total =",
"Defining the Lorentzian function for the HF spectrum def Lorentzian(x, HFS_frequency, gamma, intensity):",
"y = pseudoVoigt(x, x0, FWHM, intensity, eta)*exp(-0.5*t*t) else: y = pseudoVoigt(x, x0, FWHM,",
"(j1+j2+j4+j5-z) < 0: break elif (j2+j3+j5+j6-z) < 0: break elif (j3+j1+j6+j4-z) < 0:",
"Defining the DeltaJ function that will be used in Wigner6J def DeltaJ(a, b,",
"from operator import * from numpy import * ### Defining the DeltaJ function",
"expoVoigt(x_array, x0, intensity, FWHM, alpha, eta): y_array = [] for i in range(len(x_array)):",
"0: break while True: if (z-j1-j2-j3) < 0: break elif (z-j1-j5-j6) < 0:",
"(j3+j1+j6+j4-z) <= 0: break z = z+1 Total = float(Delta_Total*Wigner_Total) return Total ###",
"b, c): Total = 0 while True: if (a+b-c) < 0: break elif",
"spectrum def pseudoVoigt(x, HFS_frequency, FWHM, intensity, eta): Gauss = exp(-0.6931*((x-HFS_frequency)/(FWHM/2))**2) Lorentz = 1/(1+((x-HFS_frequency)/(FWHM/2))**2)",
"beta_lower*B_lower) HF_intensity.append((2*F_lower+1)*(2*F_upper+1)*Wigner6J(F_lower, F_upper, 1, J_upper, J_lower, I)**2) F_upper_min = F_upper_min +1 F_lower_min =",
"= 0 elif J_upper <= 0.5 : beta_upper = 0 else: beta_upper =",
"### Defining the Crystalball function def Crystalball(x_array, x0, N, sigma, alpha, n): y_array",
"0 elif J_upper <= 0.5 : beta_upper = 0 else: beta_upper = (3*K_upper*(K_upper+1)-4*I*(I+1)*J_upper*(J_upper+1))/(8*I*(2*I-1)*J_upper*(2*J_upper-1))",
"the HF structure def HFS(I, J_lower, J_upper, CF, A_lower, A_upper, B_lower, B_upper, FWHM,",
"frequency_correction): frequency_rest_frame = array([]); frequency_lab_frame = array([]) c = 299792458.0 # Speed of",
"function def Wigner6J(j1, j2, j3, j4, j5, j6): # Wigner 6J angular momentum",
"intensity, eta): Gauss = exp(-0.6931*((x-HFS_frequency)/(FWHM/2))**2) Lorentz = 1/(1+((x-HFS_frequency)/(FWHM/2))**2) Voigt = eta*Lorentz + (1-eta)*Gauss",
"0: break z = z+1 Total = float(Delta_Total*Wigner_Total) return Total ### Defining the",
"< 0: break elif (z-j4-j5-j3) < 0: break Wigner1 = float(factorial(z-j1-j2-j3)) Wigner2 =",
"<= 1): K_lower = F_lower*(F_lower+1)-I*(I+1)-J_lower*(J_lower+1) alpha_lower = K_lower/2 if I <= 0.5 :",
": beta_upper = 0 elif J_upper <= 0.5 : beta_upper = 0 else:",
"mass, iscool_voltage, harmonic, frequency_correction): frequency_rest_frame = array([]); frequency_lab_frame = array([]) c = 299792458.0",
"return Total ### Defining Wigner 6J function def Wigner6J(j1, j2, j3, j4, j5,",
"# Speed of light [ms-1] in a vacuum e = 2.7182818284 # Maths",
"+ beta_upper*B_upper - alpha_lower*A_lower - beta_lower*B_lower) HF_intensity.append((2*F_lower+1)*(2*F_upper+1)*Wigner6J(F_lower, F_upper, 1, J_upper, J_lower, I)**2) F_upper_min",
"of the HF peaks def HF_intensity(I, J_lower, J_upper, F_lower, F_upper): # Intensity ratio",
"F_upper_min < (F_upper_max +1) : F_lower = F_lower_min F_upper = F_upper_min F_delta =",
"float(factorial(a+b-c)*factorial(a-b+c)*factorial(-a+b+c)) / float(factorial(a+b+c+1)) ) #print \"Total: \", Total break return Total ### Defining",
"float(factorial(j1+j2+j4+j5-z)) Wigner6 = float(factorial(j2+j3+j5+j6-z)) Wigner7 = float(factorial(j3+j1+j6+j4-z)) Wigner_Denominator = Wigner1*Wigner2*Wigner3*Wigner4*Wigner5*Wigner6*Wigner7 Wigner_Total = Wigner_Total",
"correction from lab frame to rest frame def Doppler_correction(freq_range_lab, mass, iscool_voltage): alpha =",
"HFS(I, J_lower, J_upper, CF, A_lower, A_upper, B_lower, B_upper, FWHM, Int, Bkgnd, x): for",
"= harmonic*wavenumber*c/10**4 # Wavenumber doubled as reading taken at fundamental frequency (calculate in",
"= z+1 if (j1+j2+j4+j5-z) <= 0: break elif (j2+j3+j5+j6-z) <= 0: break elif",
"K_lower = F_lower*(F_lower+1)-I*(I+1)-J_lower*(J_lower+1) alpha_lower = K_lower/2 if I <= 0.5 : beta_lower =",
"((n/abs(alpha))**n)*exp(-0.5*abs(alpha)*abs(alpha)) b = n/abs(alpha) - abs(alpha) y = a/(b - t)**n y_array.append(N*y) return",
"#print \"Total: \", Total break return Total ### Defining Wigner 6J function def",
"(3*K_lower*(K_lower+1)-4*I*(I+1)*J_lower*(J_lower+1))/(8*I*(2*I-1)*J_lower*(2*J_lower-1)) K_upper = F_upper*(F_upper+1)-I*(I+1)-J_upper*(J_upper+1) alpha_upper = K_upper/2 if I <= 0.5 : beta_upper",
"### Defining the Lorentzian function for the HF spectrum def Lorentzian(x, HFS_frequency, gamma,",
"elif J_upper <= 0.5 : beta_upper = 0 else: beta_upper = (3*K_upper*(K_upper+1)-4*I*(I+1)*J_upper*(J_upper+1))/(8*I*(2*I-1)*J_upper*(2*J_upper-1)) HFS_frequency.append(centroid_frequency",
"break elif (a+b+c+1) < 0: break Total = sqrt( float(factorial(a+b-c)*factorial(a-b+c)*factorial(-a+b+c)) / float(factorial(a+b+c+1)) )",
"MHz) # Convert frequency from lab frame to rest frame frequency_rest_frame = frequency_lab_frame*(",
"(alpha < 0): t = -t if (t >= -abs(alpha)): y = pseudoVoigt(x,",
"from wavenumber to frequency in lab frame frequency_lab_frame = harmonic*wavenumber*c/10**4 # Wavenumber doubled",
"# Convert frequency from lab frame to rest frame frequency_rest_frame = frequency_lab_frame*( 1",
"= x_array[i] t = (x-x0)/FWHM if (alpha < 0): t = -t if",
"- F_lower if (-1 <= F_delta <= 1): K_lower = F_lower*(F_lower+1)-I*(I+1)-J_lower*(J_lower+1) alpha_lower =",
"j5 j6} {F_lower J_lower 1} Delta_Total = DeltaJ(j1, j2, j3)*DeltaJ(j1, j5, j6)*DeltaJ(j4, j2,",
"K_upper = F_upper*(F_upper+1)-I*(I+1)-J_upper*(J_upper+1) alpha_upper = K_upper/2 if I <= 0.5 : beta_upper =",
"at fundamental frequency (calculate in MHz) # Convert frequency from lab frame to",
"the HF spectrum def Lorentzian(x, HFS_frequency, gamma, intensity): return intensity*(gamma**2/((x-HFS_frequency)**2 + gamma**2)) #",
"Intensity = (2*F_lower+1)*(2*F_upper+1)*Wigner6J(F_lower, F_upper, 1, J_upper, J_lower, I)**2 if Intensity == 0: print(\"Intensity",
"(float(-1)**z*factorial(float(z+1)))/Wigner_Denominator z = z+1 if (j1+j2+j4+j5-z) <= 0: break elif (j2+j3+j5+j6-z) <= 0:",
"- frequency_correction return frequency_relative ### Defining the HF structure def HFS(I, J_lower, J_upper,",
"(alpha < 0): t = -t if (t >= -abs(alpha)): y = exp(-0.5*t*t)",
"J_upper, J_lower, I)**2) F_upper_min = F_upper_min +1 F_lower_min = F_lower_min +1 return HFS_frequency,",
"= frequency_rest_frame - frequency_correction return frequency_relative ### Defining the HF structure def HFS(I,",
"HF spectrum def Lorentzian(x, HFS_frequency, gamma, intensity): return intensity*(gamma**2/((x-HFS_frequency)**2 + gamma**2)) # Lorentzian",
"as reading taken at fundamental frequency (calculate in MHz) # Convert frequency from",
"j3} = {J_upper F_upper I} # {j4 j5 j6} {F_lower J_lower 1} Delta_Total",
"A_lower, A_upper, B_lower, B_upper)[1][i] intensity_1 = intensity_1*Int Bkgnd = Bkgnd + Gaussian(x, HFS_frequency,",
"{j4 j5 j6} {F_lower J_lower 1} Delta_Total = DeltaJ(j1, j2, j3)*DeltaJ(j1, j5, j6)*DeltaJ(j4,",
"return frequency_relative ### Defining the HF structure def HFS(I, J_lower, J_upper, CF, A_lower,",
"if I <= 0.5 : beta_lower = 0 elif J_lower <= 0.5 :",
"sqrt(2*alpha + alpha*alpha)) # Convert to relative frequency frequency_relative = frequency_rest_frame - frequency_correction",
"intensity*(gamma**2/((x-HFS_frequency)**2 + gamma**2)) # Lorentzian function ### Defining the Voigt function for the",
"Lorentzian function for the HF spectrum def Lorentzian(x, HFS_frequency, gamma, intensity): return intensity*(gamma**2/((x-HFS_frequency)**2",
"intensity_1 = HF_function(I, J_lower, J_upper, CF, A_lower, A_upper, B_lower, B_upper)[1][i] intensity_1 = intensity_1*Int",
"freq_range_lab*( 1 + alpha - sqrt(2*alpha + alpha*alpha)) return freq_range_rest ### Defining wavenumber",
"J_upper) F_upper_max = pos(I + J_upper) while F_lower_min < (F_lower_max +1) : F_upper_min",
"elif (j2+j3+j5+j6-z) <= 0: break elif (j3+j1+j6+j4-z) <= 0: break z = z+1",
"0: break elif (j2+j3+j5+j6-z) < 0: break elif (j3+j1+j6+j4-z) < 0: break while",
"intensity for each of the HF peaks def HF_intensity(I, J_lower, J_upper, F_lower, F_upper):",
"1 + alpha - sqrt(2*alpha + alpha*alpha)) # Convert to relative frequency frequency_relative",
"(calculate in MHz) # Convert frequency from lab frame to rest frame frequency_rest_frame",
"the DeltaJ function that will be used in Wigner6J def DeltaJ(a, b, c):",
"e/c**2 # Convert from wavenumber to frequency in lab frame frequency_lab_frame = harmonic*wavenumber*c/10**4",
"(a+b-c) < 0: break elif (a-b+c) < 0: break elif (-a+b+c) < 0:",
"= [] F_lower_min = pos(I - J_lower) F_lower_max = pos(I + J_lower) F_upper_min",
"<= F_delta <= 1): K_lower = F_lower*(F_lower+1)-I*(I+1)-J_lower*(J_lower+1) alpha_lower = K_lower/2 if I <=",
"- J_upper) while F_upper_min < (F_upper_max +1) : F_lower = F_lower_min F_upper =",
"for a J_lower to J_upper transition HFS_frequency = []; HF_intensity = [] F_lower_min",
"frequency_rest_frame = array([]); frequency_lab_frame = array([]) c = 299792458.0 # Speed of light",
"0: break elif (j3+j1+j6+j4-z) <= 0: break z = z+1 Total = float(Delta_Total*Wigner_Total)",
"which simulates the HF spectrum def HF_function(I, J_lower, J_upper, centroid_frequency, A_lower, A_upper, B_lower,",
"1} # {J_upper J_lower I} Intensity = (2*F_lower+1)*(2*F_upper+1)*Wigner6J(F_lower, F_upper, 1, J_upper, J_lower, I)**2",
"lab frame to rest frame frequency_rest_frame = frequency_lab_frame*( 1 + alpha - sqrt(2*alpha",
"array([]); frequency_lab_frame = array([]) c = 299792458.0 # Speed of light [ms-1] in",
"A_lower, A_upper, B_lower, B_upper)[0][i] intensity_1 = HF_function(I, J_lower, J_upper, CF, A_lower, A_upper, B_lower,",
"the Lorentzian function for the HF spectrum def Lorentzian(x, HFS_frequency, gamma, intensity): return",
"momentum coupling # {j1 j2 j3} = {J_upper F_upper I} # {j4 j5",
"beta_upper = 0 else: beta_upper = (3*K_upper*(K_upper+1)-4*I*(I+1)*J_upper*(J_upper+1))/(8*I*(2*I-1)*J_upper*(2*J_upper-1)) HFS_frequency.append(centroid_frequency + alpha_upper*A_upper + beta_upper*B_upper -",
"the intensity for each of the HF peaks def HF_intensity(I, J_lower, J_upper, F_lower,",
"= freq_range_lab*( 1 + alpha - sqrt(2*alpha + alpha*alpha)) return freq_range_rest ### Defining",
"frequency from lab frame to rest frame frequency_rest_frame = frequency_lab_frame*( 1 + alpha",
"operator import * from numpy import * ### Defining the DeltaJ function that",
"range(len(x_array)): x = x_array[i] t = (x-x0)/sigma if (alpha < 0): t =",
"- t)**n y_array.append(N*y) return array(y_array) ### Defining an exponential pseudoVoigt function def expoVoigt(x_array,",
"def Frequency_conversion(wavenumber, mass, iscool_voltage, harmonic, frequency_correction): frequency_rest_frame = array([]); frequency_lab_frame = array([]) c",
"z = 0 while True: if (j1+j2+j4+j5-z) < 0: break elif (j2+j3+j5+j6-z) <",
"A_lower, A_upper, B_lower, B_upper)[0])): HFS_frequency = HF_function(I, J_lower, J_upper, CF, A_lower, A_upper, B_lower,",
"[] F_lower_min = pos(I - J_lower) F_lower_max = pos(I + J_lower) F_upper_min =",
"Wigner 6J angular momentum coupling # {j1 j2 j3} = {J_upper F_upper I}",
"= 0 else: beta_upper = (3*K_upper*(K_upper+1)-4*I*(I+1)*J_upper*(J_upper+1))/(8*I*(2*I-1)*J_upper*(2*J_upper-1)) HFS_frequency.append(centroid_frequency + alpha_upper*A_upper + beta_upper*B_upper - alpha_lower*A_lower",
"= [] for i in range(len(x_array)): x = x_array[i] t = (x-x0)/sigma if",
"<= 0.5 : beta_lower = 0 elif J_lower <= 0.5 : beta_lower =",
"for the HF spectrum def Lorentzian(x, HFS_frequency, gamma, intensity): return intensity*(gamma**2/((x-HFS_frequency)**2 + gamma**2))",
"coupling # {j1 j2 j3} = {J_upper F_upper I} # {j4 j5 j6}",
"0 else: beta_upper = (3*K_upper*(K_upper+1)-4*I*(I+1)*J_upper*(J_upper+1))/(8*I*(2*I-1)*J_upper*(2*J_upper-1)) HFS_frequency.append(centroid_frequency + alpha_upper*A_upper + beta_upper*B_upper - alpha_lower*A_lower -",
"Intensity ratio = (2F_lower+1)(2F_upper+1){F_lower F_upper 1} # {J_upper J_lower I} Intensity = (2*F_lower+1)*(2*F_upper+1)*Wigner6J(F_lower,",
"= z+1 Total = float(Delta_Total*Wigner_Total) return Total ### Defining the Gaussian function for",
"function ### Defining the Lorentzian function for the HF spectrum def Lorentzian(x, HFS_frequency,",
"J_lower I} Intensity = (2*F_lower+1)*(2*F_upper+1)*Wigner6J(F_lower, F_upper, 1, J_upper, J_lower, I)**2 if Intensity ==",
"0\") return Intensity ### Defining Doppler correction from lab frame to rest frame",
"elif (j3+j1+j6+j4-z) < 0: break while True: if (z-j1-j2-j3) < 0: break elif",
": F_upper_min = pos(I - J_upper) while F_upper_min < (F_upper_max +1) : F_lower",
"beta_lower = 0 elif J_lower <= 0.5 : beta_lower = 0 else: beta_lower",
"# Convert to relative frequency frequency_relative = frequency_rest_frame - frequency_correction return frequency_relative ###",
"J_upper, F_lower, F_upper): # Intensity ratio = (2F_lower+1)(2F_upper+1){F_lower F_upper 1} # {J_upper J_lower",
"alpha = iscool_voltage/(mass*931.494061*10**6) # alpha = eV/mc*c - in units of e/c**2 #",
"\"Total: \", Total break return Total ### Defining Wigner 6J function def Wigner6J(j1,",
"to frequency conversion def Frequency_conversion(wavenumber, mass, iscool_voltage, harmonic, frequency_correction): frequency_rest_frame = array([]); frequency_lab_frame",
"{F_lower J_lower 1} Delta_Total = DeltaJ(j1, j2, j3)*DeltaJ(j1, j5, j6)*DeltaJ(j4, j2, j6)*DeltaJ(j4, j5,",
"-t if (t >= -abs(alpha)): y = pseudoVoigt(x, x0, FWHM, intensity, eta)*exp(-0.5*t*t) else:",
"= (3*K_lower*(K_lower+1)-4*I*(I+1)*J_lower*(J_lower+1))/(8*I*(2*I-1)*J_lower*(2*J_lower-1)) K_upper = F_upper*(F_upper+1)-I*(I+1)-J_upper*(J_upper+1) alpha_upper = K_upper/2 if I <= 0.5 :",
"I)**2 if Intensity == 0: print(\"Intensity = 0\") return Intensity ### Defining Doppler",
"= (2F_lower+1)(2F_upper+1){F_lower F_upper 1} # {J_upper J_lower I} Intensity = (2*F_lower+1)*(2*F_upper+1)*Wigner6J(F_lower, F_upper, 1,",
"elif (z-j4-j5-j3) < 0: break Wigner1 = float(factorial(z-j1-j2-j3)) Wigner2 = float(factorial(z-j1-j5-j6)) Wigner3 =",
"else: a = ((n/abs(alpha))**n)*exp(-0.5*abs(alpha)*abs(alpha)) b = n/abs(alpha) - abs(alpha) y = a/(b -",
"alpha, eta): y_array = [] for i in range(len(x_array)): x = x_array[i] t",
"(2F_lower+1)(2F_upper+1){F_lower F_upper 1} # {J_upper J_lower I} Intensity = (2*F_lower+1)*(2*F_upper+1)*Wigner6J(F_lower, F_upper, 1, J_upper,",
"F_upper*(F_upper+1)-I*(I+1)-J_upper*(J_upper+1) alpha_upper = K_upper/2 if I <= 0.5 : beta_upper = 0 elif",
"while True: if (j1+j2+j4+j5-z) < 0: break elif (j2+j3+j5+j6-z) < 0: break elif",
"alpha*alpha)) # Convert to relative frequency frequency_relative = frequency_rest_frame - frequency_correction return frequency_relative",
"= Wigner1*Wigner2*Wigner3*Wigner4*Wigner5*Wigner6*Wigner7 Wigner_Total = Wigner_Total + (float(-1)**z*factorial(float(z+1)))/Wigner_Denominator z = z+1 if (j1+j2+j4+j5-z) <=",
"[]; HF_intensity = [] F_lower_min = pos(I - J_lower) F_lower_max = pos(I +",
"HFS_frequency = HF_function(I, J_lower, J_upper, CF, A_lower, A_upper, B_lower, B_upper)[0][i] intensity_1 = HF_function(I,",
"6J function def Wigner6J(j1, j2, j3, j4, j5, j6): # Wigner 6J angular",
"float(Delta_Total*Wigner_Total) return Total ### Defining the Gaussian function for the HF spectrum def",
"299792458.0 # Speed of light [ms-1] in a vacuum e = 2.7182818284 #",
"doubled as reading taken at fundamental frequency (calculate in MHz) # Convert frequency",
"Voigt function ### Defining the Crystalball function def Crystalball(x_array, x0, N, sigma, alpha,",
"frame frequency_rest_frame = frequency_lab_frame*( 1 + alpha - sqrt(2*alpha + alpha*alpha)) # Convert",
"centroid_frequency, A_lower, A_upper, B_lower, B_upper): # Calculates the F values for a J_lower",
"0 while True: if (a+b-c) < 0: break elif (a-b+c) < 0: break",
"in MHz) # Convert frequency from lab frame to rest frame frequency_rest_frame =",
"reading taken at fundamental frequency (calculate in MHz) # Convert frequency from lab",
"Defining the Gaussian function for the HF spectrum def Gaussian(x, HFS_frequency, FWHM, intensity):",
"i in range(len(x_array)): x = x_array[i] t = (x-x0)/FWHM if (alpha < 0):",
"F_upper_min F_delta = F_upper - F_lower if (-1 <= F_delta <= 1): K_lower",
"array(y_array) ### Defining the HF function which simulates the HF spectrum def HF_function(I,",
"0): t = -t if (t >= -abs(alpha)): y = exp(-0.5*t*t) else: a",
"= x_array[i] t = (x-x0)/sigma if (alpha < 0): t = -t if",
"the Voigt function for the HF spectrum def pseudoVoigt(x, HFS_frequency, FWHM, intensity, eta):",
"from math import sqrt from math import factorial from operator import * from",
"(j1+j2+j4+j5-z) <= 0: break elif (j2+j3+j5+j6-z) <= 0: break elif (j3+j1+j6+j4-z) <= 0:",
"0.5 : beta_lower = 0 elif J_lower <= 0.5 : beta_lower = 0",
"HF_intensity(I, J_lower, J_upper, F_lower, F_upper): # Intensity ratio = (2F_lower+1)(2F_upper+1){F_lower F_upper 1} #",
"frequency_correction return frequency_relative ### Defining the HF structure def HFS(I, J_lower, J_upper, CF,",
"= -t if (t >= -abs(alpha)): y = exp(-0.5*t*t) else: a = ((n/abs(alpha))**n)*exp(-0.5*abs(alpha)*abs(alpha))",
"relative frequency frequency_relative = frequency_rest_frame - frequency_correction return frequency_relative ### Defining the HF",
"else: y = pseudoVoigt(x, x0, FWHM, intensity, eta) y_array.append(y) return array(y_array) ### Defining",
"0: break Wigner1 = float(factorial(z-j1-j2-j3)) Wigner2 = float(factorial(z-j1-j5-j6)) Wigner3 = float(factorial(z-j4-j2-j6)) Wigner4 =",
"def HFS(I, J_lower, J_upper, CF, A_lower, A_upper, B_lower, B_upper, FWHM, Int, Bkgnd, x):",
"F_lower_max = pos(I + J_lower) F_upper_min = pos(I - J_upper) F_upper_max = pos(I",
"0.5 : beta_upper = 0 elif J_upper <= 0.5 : beta_upper = 0",
"F_upper, 1, J_upper, J_lower, I)**2 if Intensity == 0: print(\"Intensity = 0\") return",
"harmonic, frequency_correction): frequency_rest_frame = array([]); frequency_lab_frame = array([]) c = 299792458.0 # Speed",
"B_lower, B_upper, FWHM, Int, Bkgnd, x): for i in range(len(HF_function(I, J_lower, J_upper, CF,",
"B_upper)[0])): HFS_frequency = HF_function(I, J_lower, J_upper, CF, A_lower, A_upper, B_lower, B_upper)[0][i] intensity_1 =",
"intensity*Voigt # Voigt function ### Defining the Crystalball function def Crystalball(x_array, x0, N,",
"while F_lower_min < (F_lower_max +1) : F_upper_min = pos(I - J_upper) while F_upper_min",
"<reponame>karamarielynch/hfs-sim from math import sqrt from math import factorial from operator import *",
"= 0\") return Intensity ### Defining Doppler correction from lab frame to rest",
"def Crystalball(x_array, x0, N, sigma, alpha, n): y_array = [] for i in",
"- J_lower) F_lower_max = pos(I + J_lower) F_upper_min = pos(I - J_upper) F_upper_max",
"x0, N, sigma, alpha, n): y_array = [] for i in range(len(x_array)): x",
"float(factorial(j2+j3+j5+j6-z)) Wigner7 = float(factorial(j3+j1+j6+j4-z)) Wigner_Denominator = Wigner1*Wigner2*Wigner3*Wigner4*Wigner5*Wigner6*Wigner7 Wigner_Total = Wigner_Total + (float(-1)**z*factorial(float(z+1)))/Wigner_Denominator z",
"F_upper_min = pos(I - J_upper) while F_upper_min < (F_upper_max +1) : F_lower =",
"beta_upper = (3*K_upper*(K_upper+1)-4*I*(I+1)*J_upper*(J_upper+1))/(8*I*(2*I-1)*J_upper*(2*J_upper-1)) HFS_frequency.append(centroid_frequency + alpha_upper*A_upper + beta_upper*B_upper - alpha_lower*A_lower - beta_lower*B_lower) HF_intensity.append((2*F_lower+1)*(2*F_upper+1)*Wigner6J(F_lower,",
"(F_lower_max +1) : F_upper_min = pos(I - J_upper) while F_upper_min < (F_upper_max +1)",
"< (F_upper_max +1) : F_lower = F_lower_min F_upper = F_upper_min F_delta = F_upper",
"< 0: break elif (z-j1-j5-j6) < 0: break elif (z-j4-j2-j6) < 0: break",
"b = n/abs(alpha) - abs(alpha) y = a/(b - t)**n y_array.append(N*y) return array(y_array)",
"= exp(-0.6931*((x-HFS_frequency)/(FWHM/2))**2) Lorentz = 1/(1+((x-HFS_frequency)/(FWHM/2))**2) Voigt = eta*Lorentz + (1-eta)*Gauss return intensity*Voigt #",
"e = 2.7182818284 # Maths constant alpha = iscool_voltage/(mass*931.494061*10**6) # alpha = eV/mc*c",
"HF_function(I, J_lower, J_upper, centroid_frequency, A_lower, A_upper, B_lower, B_upper): # Calculates the F values",
"### Defining an exponential pseudoVoigt function def expoVoigt(x_array, x0, intensity, FWHM, alpha, eta):",
"<= 0: break elif (j3+j1+j6+j4-z) <= 0: break z = z+1 Total =",
"numpy import * ### Defining the DeltaJ function that will be used in",
">= -abs(alpha)): y = pseudoVoigt(x, x0, FWHM, intensity, eta)*exp(-0.5*t*t) else: y = pseudoVoigt(x,",
"J_lower, J_upper, F_lower, F_upper): # Intensity ratio = (2F_lower+1)(2F_upper+1){F_lower F_upper 1} # {J_upper",
"# Gaussian function ### Defining the Lorentzian function for the HF spectrum def",
"Wigner6J def DeltaJ(a, b, c): Total = 0 while True: if (a+b-c) <",
"from lab frame to rest frame frequency_rest_frame = frequency_lab_frame*( 1 + alpha -",
"break Wigner1 = float(factorial(z-j1-j2-j3)) Wigner2 = float(factorial(z-j1-j5-j6)) Wigner3 = float(factorial(z-j4-j2-j6)) Wigner4 = float(factorial(z-j4-j5-j3))",
"Frequency_conversion(wavenumber, mass, iscool_voltage, harmonic, frequency_correction): frequency_rest_frame = array([]); frequency_lab_frame = array([]) c =",
"FWHM, Int, Bkgnd, x): for i in range(len(HF_function(I, J_lower, J_upper, CF, A_lower, A_upper,",
"the Crystalball function def Crystalball(x_array, x0, N, sigma, alpha, n): y_array = []",
"0: break elif (z-j1-j5-j6) < 0: break elif (z-j4-j2-j6) < 0: break elif",
"pos(I - J_lower) F_lower_max = pos(I + J_lower) F_upper_min = pos(I - J_upper)",
"B_upper, FWHM, Int, Bkgnd, x): for i in range(len(HF_function(I, J_lower, J_upper, CF, A_lower,",
"J_upper, CF, A_lower, A_upper, B_lower, B_upper)[0][i] intensity_1 = HF_function(I, J_lower, J_upper, CF, A_lower,",
"for i in range(len(x_array)): x = x_array[i] t = (x-x0)/sigma if (alpha <",
"= float(factorial(z-j4-j5-j3)) Wigner5 = float(factorial(j1+j2+j4+j5-z)) Wigner6 = float(factorial(j2+j3+j5+j6-z)) Wigner7 = float(factorial(j3+j1+j6+j4-z)) Wigner_Denominator =",
"z = z+1 if (j1+j2+j4+j5-z) <= 0: break elif (j2+j3+j5+j6-z) <= 0: break",
"FWHM, intensity, eta) y_array.append(y) return array(y_array) ### Defining the HF function which simulates",
"frame frequency_lab_frame = harmonic*wavenumber*c/10**4 # Wavenumber doubled as reading taken at fundamental frequency",
"F_upper_max = pos(I + J_upper) while F_lower_min < (F_lower_max +1) : F_upper_min =",
"will be used in Wigner6J def DeltaJ(a, b, c): Total = 0 while",
"z+1 Total = float(Delta_Total*Wigner_Total) return Total ### Defining the Gaussian function for the",
"F_upper_min = F_upper_min +1 F_lower_min = F_lower_min +1 return HFS_frequency, HF_intensity ### Defining",
"j5, j6)*DeltaJ(j4, j2, j6)*DeltaJ(j4, j5, j3) Wigner_Total = 0 z = 0 while",
"gamma, intensity): return intensity*(gamma**2/((x-HFS_frequency)**2 + gamma**2)) # Lorentzian function ### Defining the Voigt",
"pos(I - J_upper) while F_upper_min < (F_upper_max +1) : F_lower = F_lower_min F_upper",
"return freq_range_rest ### Defining wavenumber to frequency conversion def Frequency_conversion(wavenumber, mass, iscool_voltage, harmonic,",
"### Defining the intensity for each of the HF peaks def HF_intensity(I, J_lower,",
"A_upper, B_lower, B_upper)[0])): HFS_frequency = HF_function(I, J_lower, J_upper, CF, A_lower, A_upper, B_lower, B_upper)[0][i]",
"Defining the Voigt function for the HF spectrum def pseudoVoigt(x, HFS_frequency, FWHM, intensity,",
"F_lower if (-1 <= F_delta <= 1): K_lower = F_lower*(F_lower+1)-I*(I+1)-J_lower*(J_lower+1) alpha_lower = K_lower/2",
"= K_upper/2 if I <= 0.5 : beta_upper = 0 elif J_upper <=",
"break Total = sqrt( float(factorial(a+b-c)*factorial(a-b+c)*factorial(-a+b+c)) / float(factorial(a+b+c+1)) ) #print \"Total: \", Total break",
"from numpy import * ### Defining the DeltaJ function that will be used",
"Lorentzian(x, HFS_frequency, gamma, intensity): return intensity*(gamma**2/((x-HFS_frequency)**2 + gamma**2)) # Lorentzian function ### Defining",
"x0, FWHM, intensity, eta)*exp(-0.5*t*t) else: y = pseudoVoigt(x, x0, FWHM, intensity, eta) y_array.append(y)",
"= F_lower_min F_upper = F_upper_min F_delta = F_upper - F_lower if (-1 <=",
"alpha*alpha)) return freq_range_rest ### Defining wavenumber to frequency conversion def Frequency_conversion(wavenumber, mass, iscool_voltage,",
"break elif (j3+j1+j6+j4-z) < 0: break while True: if (z-j1-j2-j3) < 0: break",
"< 0: break elif (z-j4-j2-j6) < 0: break elif (z-j4-j5-j3) < 0: break",
"to rest frame def Doppler_correction(freq_range_lab, mass, iscool_voltage): alpha = iscool_voltage/(mass*931.494061*10**6) freq_range_rest = freq_range_lab*(",
"break elif (z-j4-j2-j6) < 0: break elif (z-j4-j5-j3) < 0: break Wigner1 =",
"the HF function which simulates the HF spectrum def HF_function(I, J_lower, J_upper, centroid_frequency,",
"freq_range_rest = freq_range_lab*( 1 + alpha - sqrt(2*alpha + alpha*alpha)) return freq_range_rest ###",
"- abs(alpha) y = a/(b - t)**n y_array.append(N*y) return array(y_array) ### Defining an",
"float(factorial(z-j4-j2-j6)) Wigner4 = float(factorial(z-j4-j5-j3)) Wigner5 = float(factorial(j1+j2+j4+j5-z)) Wigner6 = float(factorial(j2+j3+j5+j6-z)) Wigner7 = float(factorial(j3+j1+j6+j4-z))",
"HFS_frequency, HF_intensity ### Defining the intensity for each of the HF peaks def",
"= iscool_voltage/(mass*931.494061*10**6) # alpha = eV/mc*c - in units of e/c**2 # Convert",
"y_array = [] for i in range(len(x_array)): x = x_array[i] t = (x-x0)/FWHM",
"{J_upper F_upper I} # {j4 j5 j6} {F_lower J_lower 1} Delta_Total = DeltaJ(j1,",
"break elif (z-j4-j5-j3) < 0: break Wigner1 = float(factorial(z-j1-j2-j3)) Wigner2 = float(factorial(z-j1-j5-j6)) Wigner3",
"beta_lower = (3*K_lower*(K_lower+1)-4*I*(I+1)*J_lower*(J_lower+1))/(8*I*(2*I-1)*J_lower*(2*J_lower-1)) K_upper = F_upper*(F_upper+1)-I*(I+1)-J_upper*(J_upper+1) alpha_upper = K_upper/2 if I <= 0.5",
"+1) : F_upper_min = pos(I - J_upper) while F_upper_min < (F_upper_max +1) :",
"= (x-x0)/FWHM if (alpha < 0): t = -t if (t >= -abs(alpha)):",
"I} # {j4 j5 j6} {F_lower J_lower 1} Delta_Total = DeltaJ(j1, j2, j3)*DeltaJ(j1,",
"# alpha = eV/mc*c - in units of e/c**2 # Convert from wavenumber",
"if Intensity == 0: print(\"Intensity = 0\") return Intensity ### Defining Doppler correction",
"math import sqrt from math import factorial from operator import * from numpy",
"= -t if (t >= -abs(alpha)): y = pseudoVoigt(x, x0, FWHM, intensity, eta)*exp(-0.5*t*t)",
"def DeltaJ(a, b, c): Total = 0 while True: if (a+b-c) < 0:",
"import factorial from operator import * from numpy import * ### Defining the",
"= F_upper_min +1 F_lower_min = F_lower_min +1 return HFS_frequency, HF_intensity ### Defining the",
"exp(-0.6931*((x-HFS_frequency)/(FWHM/2))**2) Lorentz = 1/(1+((x-HFS_frequency)/(FWHM/2))**2) Voigt = eta*Lorentz + (1-eta)*Gauss return intensity*Voigt # Voigt",
"(z-j1-j5-j6) < 0: break elif (z-j4-j2-j6) < 0: break elif (z-j4-j5-j3) < 0:",
"I <= 0.5 : beta_lower = 0 elif J_lower <= 0.5 : beta_lower",
"J_upper) while F_lower_min < (F_lower_max +1) : F_upper_min = pos(I - J_upper) while",
"Lorentz = 1/(1+((x-HFS_frequency)/(FWHM/2))**2) Voigt = eta*Lorentz + (1-eta)*Gauss return intensity*Voigt # Voigt function",
"(t >= -abs(alpha)): y = pseudoVoigt(x, x0, FWHM, intensity, eta)*exp(-0.5*t*t) else: y =",
"if (a+b-c) < 0: break elif (a-b+c) < 0: break elif (-a+b+c) <",
"= 0 elif J_lower <= 0.5 : beta_lower = 0 else: beta_lower =",
"lab frame frequency_lab_frame = harmonic*wavenumber*c/10**4 # Wavenumber doubled as reading taken at fundamental",
"y_array.append(y) return array(y_array) ### Defining the HF function which simulates the HF spectrum",
"x = x_array[i] t = (x-x0)/FWHM if (alpha < 0): t = -t",
"F_lower = F_lower_min F_upper = F_upper_min F_delta = F_upper - F_lower if (-1",
"structure def HFS(I, J_lower, J_upper, CF, A_lower, A_upper, B_lower, B_upper, FWHM, Int, Bkgnd,",
"I <= 0.5 : beta_upper = 0 elif J_upper <= 0.5 : beta_upper",
"< 0: break elif (-a+b+c) < 0: break elif (a+b+c+1) < 0: break",
"t = (x-x0)/sigma if (alpha < 0): t = -t if (t >=",
"B_upper): # Calculates the F values for a J_lower to J_upper transition HFS_frequency",
"Convert from wavenumber to frequency in lab frame frequency_lab_frame = harmonic*wavenumber*c/10**4 # Wavenumber",
"[] for i in range(len(x_array)): x = x_array[i] t = (x-x0)/sigma if (alpha",
"J_lower, J_upper, CF, A_lower, A_upper, B_lower, B_upper)[1][i] intensity_1 = intensity_1*Int Bkgnd = Bkgnd",
"frequency_rest_frame = frequency_lab_frame*( 1 + alpha - sqrt(2*alpha + alpha*alpha)) # Convert to",
"B_upper)[0][i] intensity_1 = HF_function(I, J_lower, J_upper, CF, A_lower, A_upper, B_lower, B_upper)[1][i] intensity_1 =",
"= F_lower*(F_lower+1)-I*(I+1)-J_lower*(J_lower+1) alpha_lower = K_lower/2 if I <= 0.5 : beta_lower = 0",
"in lab frame frequency_lab_frame = harmonic*wavenumber*c/10**4 # Wavenumber doubled as reading taken at",
"1 + alpha - sqrt(2*alpha + alpha*alpha)) return freq_range_rest ### Defining wavenumber to",
"{j1 j2 j3} = {J_upper F_upper I} # {j4 j5 j6} {F_lower J_lower",
"HFS_frequency, FWHM, intensity, eta): Gauss = exp(-0.6931*((x-HFS_frequency)/(FWHM/2))**2) Lorentz = 1/(1+((x-HFS_frequency)/(FWHM/2))**2) Voigt = eta*Lorentz",
"= float(factorial(j2+j3+j5+j6-z)) Wigner7 = float(factorial(j3+j1+j6+j4-z)) Wigner_Denominator = Wigner1*Wigner2*Wigner3*Wigner4*Wigner5*Wigner6*Wigner7 Wigner_Total = Wigner_Total + (float(-1)**z*factorial(float(z+1)))/Wigner_Denominator",
"alpha - sqrt(2*alpha + alpha*alpha)) return freq_range_rest ### Defining wavenumber to frequency conversion",
"if (-1 <= F_delta <= 1): K_lower = F_lower*(F_lower+1)-I*(I+1)-J_lower*(J_lower+1) alpha_lower = K_lower/2 if",
"Total = float(Delta_Total*Wigner_Total) return Total ### Defining the Gaussian function for the HF",
"DeltaJ(j1, j2, j3)*DeltaJ(j1, j5, j6)*DeltaJ(j4, j2, j6)*DeltaJ(j4, j5, j3) Wigner_Total = 0 z",
"def HF_intensity(I, J_lower, J_upper, F_lower, F_upper): # Intensity ratio = (2F_lower+1)(2F_upper+1){F_lower F_upper 1}",
"= pseudoVoigt(x, x0, FWHM, intensity, eta) y_array.append(y) return array(y_array) ### Defining the HF",
"Speed of light [ms-1] in a vacuum e = 2.7182818284 # Maths constant",
"Wigner5 = float(factorial(j1+j2+j4+j5-z)) Wigner6 = float(factorial(j2+j3+j5+j6-z)) Wigner7 = float(factorial(j3+j1+j6+j4-z)) Wigner_Denominator = Wigner1*Wigner2*Wigner3*Wigner4*Wigner5*Wigner6*Wigner7 Wigner_Total",
"= 2.7182818284 # Maths constant alpha = iscool_voltage/(mass*931.494061*10**6) # alpha = eV/mc*c -",
"the HF spectrum def Gaussian(x, HFS_frequency, FWHM, intensity): return float(intensity)*exp(- 0.5*((HFS_frequency-x)/(FWHM/2.355))**2) # Gaussian",
"1/(1+((x-HFS_frequency)/(FWHM/2))**2) Voigt = eta*Lorentz + (1-eta)*Gauss return intensity*Voigt # Voigt function ### Defining",
"{J_upper J_lower I} Intensity = (2*F_lower+1)*(2*F_upper+1)*Wigner6J(F_lower, F_upper, 1, J_upper, J_lower, I)**2 if Intensity",
"A_lower, A_upper, B_lower, B_upper, FWHM, Int, Bkgnd, x): for i in range(len(HF_function(I, J_lower,",
"(z-j4-j5-j3) < 0: break Wigner1 = float(factorial(z-j1-j2-j3)) Wigner2 = float(factorial(z-j1-j5-j6)) Wigner3 = float(factorial(z-j4-j2-j6))",
"= F_upper_min F_delta = F_upper - F_lower if (-1 <= F_delta <= 1):",
"K_lower/2 if I <= 0.5 : beta_lower = 0 elif J_lower <= 0.5",
"A_lower, A_upper, B_lower, B_upper): # Calculates the F values for a J_lower to",
"+ alpha - sqrt(2*alpha + alpha*alpha)) return freq_range_rest ### Defining wavenumber to frequency",
"function for the HF spectrum def Gaussian(x, HFS_frequency, FWHM, intensity): return float(intensity)*exp(- 0.5*((HFS_frequency-x)/(FWHM/2.355))**2)",
"angular momentum coupling # {j1 j2 j3} = {J_upper F_upper I} # {j4",
"= 0 else: beta_lower = (3*K_lower*(K_lower+1)-4*I*(I+1)*J_lower*(J_lower+1))/(8*I*(2*I-1)*J_lower*(2*J_lower-1)) K_upper = F_upper*(F_upper+1)-I*(I+1)-J_upper*(J_upper+1) alpha_upper = K_upper/2 if",
"CF, A_lower, A_upper, B_lower, B_upper)[0][i] intensity_1 = HF_function(I, J_lower, J_upper, CF, A_lower, A_upper,",
"function for the HF spectrum def pseudoVoigt(x, HFS_frequency, FWHM, intensity, eta): Gauss =",
"frequency_relative = frequency_rest_frame - frequency_correction return frequency_relative ### Defining the HF structure def",
"a/(b - t)**n y_array.append(N*y) return array(y_array) ### Defining an exponential pseudoVoigt function def",
"= 0 z = 0 while True: if (j1+j2+j4+j5-z) < 0: break elif",
"Doppler correction from lab frame to rest frame def Doppler_correction(freq_range_lab, mass, iscool_voltage): alpha",
"if (t >= -abs(alpha)): y = exp(-0.5*t*t) else: a = ((n/abs(alpha))**n)*exp(-0.5*abs(alpha)*abs(alpha)) b =",
"6J angular momentum coupling # {j1 j2 j3} = {J_upper F_upper I} #",
"def Doppler_correction(freq_range_lab, mass, iscool_voltage): alpha = iscool_voltage/(mass*931.494061*10**6) freq_range_rest = freq_range_lab*( 1 + alpha",
"constant alpha = iscool_voltage/(mass*931.494061*10**6) # alpha = eV/mc*c - in units of e/c**2",
"j2, j3)*DeltaJ(j1, j5, j6)*DeltaJ(j4, j2, j6)*DeltaJ(j4, j5, j3) Wigner_Total = 0 z =",
"(-a+b+c) < 0: break elif (a+b+c+1) < 0: break Total = sqrt( float(factorial(a+b-c)*factorial(a-b+c)*factorial(-a+b+c))",
"the Gaussian function for the HF spectrum def Gaussian(x, HFS_frequency, FWHM, intensity): return",
"break elif (j2+j3+j5+j6-z) <= 0: break elif (j3+j1+j6+j4-z) <= 0: break z =",
"F_lower_min F_upper = F_upper_min F_delta = F_upper - F_lower if (-1 <= F_delta",
"# {j1 j2 j3} = {J_upper F_upper I} # {j4 j5 j6} {F_lower",
"= [] for i in range(len(x_array)): x = x_array[i] t = (x-x0)/FWHM if",
"< 0: break while True: if (z-j1-j2-j3) < 0: break elif (z-j1-j5-j6) <",
"(3*K_upper*(K_upper+1)-4*I*(I+1)*J_upper*(J_upper+1))/(8*I*(2*I-1)*J_upper*(2*J_upper-1)) HFS_frequency.append(centroid_frequency + alpha_upper*A_upper + beta_upper*B_upper - alpha_lower*A_lower - beta_lower*B_lower) HF_intensity.append((2*F_lower+1)*(2*F_upper+1)*Wigner6J(F_lower, F_upper, 1,",
"return HFS_frequency, HF_intensity ### Defining the intensity for each of the HF peaks",
"F_lower_min +1 return HFS_frequency, HF_intensity ### Defining the intensity for each of the",
"function def Crystalball(x_array, x0, N, sigma, alpha, n): y_array = [] for i",
"True: if (a+b-c) < 0: break elif (a-b+c) < 0: break elif (-a+b+c)",
"Wigner_Denominator = Wigner1*Wigner2*Wigner3*Wigner4*Wigner5*Wigner6*Wigner7 Wigner_Total = Wigner_Total + (float(-1)**z*factorial(float(z+1)))/Wigner_Denominator z = z+1 if (j1+j2+j4+j5-z)",
"+1) : F_lower = F_lower_min F_upper = F_upper_min F_delta = F_upper - F_lower",
"A_upper, B_lower, B_upper, FWHM, Int, Bkgnd, x): for i in range(len(HF_function(I, J_lower, J_upper,",
"F_upper = F_upper_min F_delta = F_upper - F_lower if (-1 <= F_delta <=",
"x0, FWHM, intensity, eta) y_array.append(y) return array(y_array) ### Defining the HF function which",
"for i in range(len(x_array)): x = x_array[i] t = (x-x0)/FWHM if (alpha <",
"HFS_frequency.append(centroid_frequency + alpha_upper*A_upper + beta_upper*B_upper - alpha_lower*A_lower - beta_lower*B_lower) HF_intensity.append((2*F_lower+1)*(2*F_upper+1)*Wigner6J(F_lower, F_upper, 1, J_upper,",
"J_lower, J_upper, CF, A_lower, A_upper, B_lower, B_upper, FWHM, Int, Bkgnd, x): for i",
"= 0 while True: if (a+b-c) < 0: break elif (a-b+c) < 0:",
"factorial from operator import * from numpy import * ### Defining the DeltaJ",
"a J_lower to J_upper transition HFS_frequency = []; HF_intensity = [] F_lower_min =",
"frequency_lab_frame = array([]) c = 299792458.0 # Speed of light [ms-1] in a",
"HF_function(I, J_lower, J_upper, CF, A_lower, A_upper, B_lower, B_upper)[1][i] intensity_1 = intensity_1*Int Bkgnd =",
"= F_upper - F_lower if (-1 <= F_delta <= 1): K_lower = F_lower*(F_lower+1)-I*(I+1)-J_lower*(J_lower+1)",
"< 0: break elif (a-b+c) < 0: break elif (-a+b+c) < 0: break",
"Crystalball(x_array, x0, N, sigma, alpha, n): y_array = [] for i in range(len(x_array)):",
"= []; HF_intensity = [] F_lower_min = pos(I - J_lower) F_lower_max = pos(I",
"= 299792458.0 # Speed of light [ms-1] in a vacuum e = 2.7182818284",
"F_upper - F_lower if (-1 <= F_delta <= 1): K_lower = F_lower*(F_lower+1)-I*(I+1)-J_lower*(J_lower+1) alpha_lower",
"lab frame to rest frame def Doppler_correction(freq_range_lab, mass, iscool_voltage): alpha = iscool_voltage/(mass*931.494061*10**6) freq_range_rest",
"return float(intensity)*exp(- 0.5*((HFS_frequency-x)/(FWHM/2.355))**2) # Gaussian function ### Defining the Lorentzian function for the",
"= pos(I + J_upper) while F_lower_min < (F_lower_max +1) : F_upper_min = pos(I",
"= float(Delta_Total*Wigner_Total) return Total ### Defining the Gaussian function for the HF spectrum",
"return array(y_array) ### Defining an exponential pseudoVoigt function def expoVoigt(x_array, x0, intensity, FWHM,",
"J_lower) F_lower_max = pos(I + J_lower) F_upper_min = pos(I - J_upper) F_upper_max =",
"float(factorial(z-j4-j5-j3)) Wigner5 = float(factorial(j1+j2+j4+j5-z)) Wigner6 = float(factorial(j2+j3+j5+j6-z)) Wigner7 = float(factorial(j3+j1+j6+j4-z)) Wigner_Denominator = Wigner1*Wigner2*Wigner3*Wigner4*Wigner5*Wigner6*Wigner7",
"i in range(len(x_array)): x = x_array[i] t = (x-x0)/sigma if (alpha < 0):",
"intensity, eta) y_array.append(y) return array(y_array) ### Defining the HF function which simulates the",
"F_upper_min = pos(I - J_upper) F_upper_max = pos(I + J_upper) while F_lower_min <",
"def HF_function(I, J_lower, J_upper, centroid_frequency, A_lower, A_upper, B_lower, B_upper): # Calculates the F",
">= -abs(alpha)): y = exp(-0.5*t*t) else: a = ((n/abs(alpha))**n)*exp(-0.5*abs(alpha)*abs(alpha)) b = n/abs(alpha) -",
"wavenumber to frequency conversion def Frequency_conversion(wavenumber, mass, iscool_voltage, harmonic, frequency_correction): frequency_rest_frame = array([]);",
"I)**2) F_upper_min = F_upper_min +1 F_lower_min = F_lower_min +1 return HFS_frequency, HF_intensity ###",
"- J_upper) F_upper_max = pos(I + J_upper) while F_lower_min < (F_lower_max +1) :",
"function for the HF spectrum def Lorentzian(x, HFS_frequency, gamma, intensity): return intensity*(gamma**2/((x-HFS_frequency)**2 +",
"FWHM, intensity, eta)*exp(-0.5*t*t) else: y = pseudoVoigt(x, x0, FWHM, intensity, eta) y_array.append(y) return",
"J_upper) while F_upper_min < (F_upper_max +1) : F_lower = F_lower_min F_upper = F_upper_min",
"iscool_voltage/(mass*931.494061*10**6) freq_range_rest = freq_range_lab*( 1 + alpha - sqrt(2*alpha + alpha*alpha)) return freq_range_rest",
": beta_lower = 0 elif J_lower <= 0.5 : beta_lower = 0 else:",
"= iscool_voltage/(mass*931.494061*10**6) freq_range_rest = freq_range_lab*( 1 + alpha - sqrt(2*alpha + alpha*alpha)) return",
"def Lorentzian(x, HFS_frequency, gamma, intensity): return intensity*(gamma**2/((x-HFS_frequency)**2 + gamma**2)) # Lorentzian function ###",
"HF peaks def HF_intensity(I, J_lower, J_upper, F_lower, F_upper): # Intensity ratio = (2F_lower+1)(2F_upper+1){F_lower",
"return array(y_array) ### Defining the HF function which simulates the HF spectrum def",
"F_upper 1} # {J_upper J_lower I} Intensity = (2*F_lower+1)*(2*F_upper+1)*Wigner6J(F_lower, F_upper, 1, J_upper, J_lower,",
"- sqrt(2*alpha + alpha*alpha)) return freq_range_rest ### Defining wavenumber to frequency conversion def",
"Intensity ### Defining Doppler correction from lab frame to rest frame def Doppler_correction(freq_range_lab,",
"pseudoVoigt(x, HFS_frequency, FWHM, intensity, eta): Gauss = exp(-0.6931*((x-HFS_frequency)/(FWHM/2))**2) Lorentz = 1/(1+((x-HFS_frequency)/(FWHM/2))**2) Voigt =",
"n): y_array = [] for i in range(len(x_array)): x = x_array[i] t =",
"HF function which simulates the HF spectrum def HF_function(I, J_lower, J_upper, centroid_frequency, A_lower,",
"beta_upper = 0 elif J_upper <= 0.5 : beta_upper = 0 else: beta_upper",
"Wavenumber doubled as reading taken at fundamental frequency (calculate in MHz) # Convert",
"elif (a-b+c) < 0: break elif (-a+b+c) < 0: break elif (a+b+c+1) <",
"B_lower, B_upper)[0][i] intensity_1 = HF_function(I, J_lower, J_upper, CF, A_lower, A_upper, B_lower, B_upper)[1][i] intensity_1",
"HF_intensity.append((2*F_lower+1)*(2*F_upper+1)*Wigner6J(F_lower, F_upper, 1, J_upper, J_lower, I)**2) F_upper_min = F_upper_min +1 F_lower_min = F_lower_min",
"(a+b+c+1) < 0: break Total = sqrt( float(factorial(a+b-c)*factorial(a-b+c)*factorial(-a+b+c)) / float(factorial(a+b+c+1)) ) #print \"Total:",
"(x-x0)/FWHM if (alpha < 0): t = -t if (t >= -abs(alpha)): y",
"gamma**2)) # Lorentzian function ### Defining the Voigt function for the HF spectrum",
"Wigner6 = float(factorial(j2+j3+j5+j6-z)) Wigner7 = float(factorial(j3+j1+j6+j4-z)) Wigner_Denominator = Wigner1*Wigner2*Wigner3*Wigner4*Wigner5*Wigner6*Wigner7 Wigner_Total = Wigner_Total +",
"HF spectrum def pseudoVoigt(x, HFS_frequency, FWHM, intensity, eta): Gauss = exp(-0.6931*((x-HFS_frequency)/(FWHM/2))**2) Lorentz =",
"eta)*exp(-0.5*t*t) else: y = pseudoVoigt(x, x0, FWHM, intensity, eta) y_array.append(y) return array(y_array) ###",
"rest frame frequency_rest_frame = frequency_lab_frame*( 1 + alpha - sqrt(2*alpha + alpha*alpha)) #",
"in units of e/c**2 # Convert from wavenumber to frequency in lab frame",
"< (F_lower_max +1) : F_upper_min = pos(I - J_upper) while F_upper_min < (F_upper_max",
"0 while True: if (j1+j2+j4+j5-z) < 0: break elif (j2+j3+j5+j6-z) < 0: break",
"= HF_function(I, J_lower, J_upper, CF, A_lower, A_upper, B_lower, B_upper)[0][i] intensity_1 = HF_function(I, J_lower,",
"frequency (calculate in MHz) # Convert frequency from lab frame to rest frame",
"exp(-0.5*t*t) else: a = ((n/abs(alpha))**n)*exp(-0.5*abs(alpha)*abs(alpha)) b = n/abs(alpha) - abs(alpha) y = a/(b",
"range(len(x_array)): x = x_array[i] t = (x-x0)/FWHM if (alpha < 0): t ="
] |
[
"main_styles: with open('customizable_styles.qss') as custom_styles: all_styles = main_styles.read() + IFS + custom_styles.read() return",
"with open('customizable_styles.qss') as custom_styles: all_styles = main_styles.read() + IFS + custom_styles.read() return all_styles",
"as main_styles: with open('customizable_styles.qss') as custom_styles: all_styles = main_styles.read() + IFS + custom_styles.read()",
"IFS = \"\"\" \"\"\" def STYLES(): with open('style.qss') as main_styles: with open('customizable_styles.qss') as",
"def STYLES(): with open('style.qss') as main_styles: with open('customizable_styles.qss') as custom_styles: all_styles = main_styles.read()",
"\"\"\" \"\"\" def STYLES(): with open('style.qss') as main_styles: with open('customizable_styles.qss') as custom_styles: all_styles",
"= \"\"\" \"\"\" def STYLES(): with open('style.qss') as main_styles: with open('customizable_styles.qss') as custom_styles:",
"with open('style.qss') as main_styles: with open('customizable_styles.qss') as custom_styles: all_styles = main_styles.read() + IFS",
"\"\"\" def STYLES(): with open('style.qss') as main_styles: with open('customizable_styles.qss') as custom_styles: all_styles =",
"open('style.qss') as main_styles: with open('customizable_styles.qss') as custom_styles: all_styles = main_styles.read() + IFS +",
"STYLES(): with open('style.qss') as main_styles: with open('customizable_styles.qss') as custom_styles: all_styles = main_styles.read() +"
] |
[
"= Node(8) invalid = Node(20) root.left = left root.right = right root.left.right =",
"from nose.tools import assert_equal class TestBstValidate(object): def test_bst_validate(self): node = Node(5) insert(node, 8)",
"5) insert(node, 6) insert(node, 4) insert(node, 7) assert_equal(validate_bst(node), True) root = Node(5) left",
"= left root.right = right root.left.right = invalid assert_equal(validate_bst(root), False) print('Success: test_bst_validate') def",
"nose.tools import assert_equal class TestBstValidate(object): def test_bst_validate(self): node = Node(5) insert(node, 8) insert(node,",
"assert_equal class TestBstValidate(object): def test_bst_validate(self): node = Node(5) insert(node, 8) insert(node, 5) insert(node,",
"right = Node(8) invalid = Node(20) root.left = left root.right = right root.left.right",
"def test_bst_validate(self): node = Node(5) insert(node, 8) insert(node, 5) insert(node, 6) insert(node, 4)",
"right root.left.right = invalid assert_equal(validate_bst(root), False) print('Success: test_bst_validate') def main(): test = TestBstValidate()",
"Node(20) root.left = left root.right = right root.left.right = invalid assert_equal(validate_bst(root), False) print('Success:",
"= invalid assert_equal(validate_bst(root), False) print('Success: test_bst_validate') def main(): test = TestBstValidate() test.test_bst_validate() if",
"False) print('Success: test_bst_validate') def main(): test = TestBstValidate() test.test_bst_validate() if __name__ == '__main__':",
"Node(5) insert(node, 8) insert(node, 5) insert(node, 6) insert(node, 4) insert(node, 7) assert_equal(validate_bst(node), True)",
"assert_equal(validate_bst(node), True) root = Node(5) left = Node(5) right = Node(8) invalid =",
"TestBstValidate(object): def test_bst_validate(self): node = Node(5) insert(node, 8) insert(node, 5) insert(node, 6) insert(node,",
"True) root = Node(5) left = Node(5) right = Node(8) invalid = Node(20)",
"root.left = left root.right = right root.left.right = invalid assert_equal(validate_bst(root), False) print('Success: test_bst_validate')",
"left root.right = right root.left.right = invalid assert_equal(validate_bst(root), False) print('Success: test_bst_validate') def main():",
"= Node(20) root.left = left root.right = right root.left.right = invalid assert_equal(validate_bst(root), False)",
"invalid assert_equal(validate_bst(root), False) print('Success: test_bst_validate') def main(): test = TestBstValidate() test.test_bst_validate() if __name__",
"insert(node, 7) assert_equal(validate_bst(node), True) root = Node(5) left = Node(5) right = Node(8)",
"7) assert_equal(validate_bst(node), True) root = Node(5) left = Node(5) right = Node(8) invalid",
"= Node(5) left = Node(5) right = Node(8) invalid = Node(20) root.left =",
"insert(node, 5) insert(node, 6) insert(node, 4) insert(node, 7) assert_equal(validate_bst(node), True) root = Node(5)",
"root.left.right = invalid assert_equal(validate_bst(root), False) print('Success: test_bst_validate') def main(): test = TestBstValidate() test.test_bst_validate()",
"root = Node(5) left = Node(5) right = Node(8) invalid = Node(20) root.left",
"test_bst_validate(self): node = Node(5) insert(node, 8) insert(node, 5) insert(node, 6) insert(node, 4) insert(node,",
"6) insert(node, 4) insert(node, 7) assert_equal(validate_bst(node), True) root = Node(5) left = Node(5)",
"8) insert(node, 5) insert(node, 6) insert(node, 4) insert(node, 7) assert_equal(validate_bst(node), True) root =",
"Node(5) left = Node(5) right = Node(8) invalid = Node(20) root.left = left",
"insert(node, 6) insert(node, 4) insert(node, 7) assert_equal(validate_bst(node), True) root = Node(5) left =",
"= right root.left.right = invalid assert_equal(validate_bst(root), False) print('Success: test_bst_validate') def main(): test =",
"import assert_equal class TestBstValidate(object): def test_bst_validate(self): node = Node(5) insert(node, 8) insert(node, 5)",
"node = Node(5) insert(node, 8) insert(node, 5) insert(node, 6) insert(node, 4) insert(node, 7)",
"left = Node(5) right = Node(8) invalid = Node(20) root.left = left root.right",
"class TestBstValidate(object): def test_bst_validate(self): node = Node(5) insert(node, 8) insert(node, 5) insert(node, 6)",
"assert_equal(validate_bst(root), False) print('Success: test_bst_validate') def main(): test = TestBstValidate() test.test_bst_validate() if __name__ ==",
"Node(8) invalid = Node(20) root.left = left root.right = right root.left.right = invalid",
"root.right = right root.left.right = invalid assert_equal(validate_bst(root), False) print('Success: test_bst_validate') def main(): test",
"insert(node, 8) insert(node, 5) insert(node, 6) insert(node, 4) insert(node, 7) assert_equal(validate_bst(node), True) root",
"invalid = Node(20) root.left = left root.right = right root.left.right = invalid assert_equal(validate_bst(root),",
"<reponame>filippovitale/interactive-coding-challenges from nose.tools import assert_equal class TestBstValidate(object): def test_bst_validate(self): node = Node(5) insert(node,",
"Node(5) right = Node(8) invalid = Node(20) root.left = left root.right = right",
"insert(node, 4) insert(node, 7) assert_equal(validate_bst(node), True) root = Node(5) left = Node(5) right",
"print('Success: test_bst_validate') def main(): test = TestBstValidate() test.test_bst_validate() if __name__ == '__main__': main()",
"= Node(5) insert(node, 8) insert(node, 5) insert(node, 6) insert(node, 4) insert(node, 7) assert_equal(validate_bst(node),",
"= Node(5) right = Node(8) invalid = Node(20) root.left = left root.right =",
"4) insert(node, 7) assert_equal(validate_bst(node), True) root = Node(5) left = Node(5) right ="
] |
[
"sys import tempfile from pathlib import Path import nox from nox import Session,",
"\"requirements.txt\" else: requirements_path = requirements.name session.run( \"poetry\", \"export\", \"--without-hashes\", \"-o\", requirements_path, external=True, )",
"-> None: \"\"\"Run linting.\"\"\" pass @session(python=python_versions[0]) def docs(session: Session) -> None: \"\"\"Build and",
"build_dir = Path(\"docs\", \"_build\") if build_dir.exists(): shutil.rmtree(build_dir) session.run(\"sphinx-autobuild\", *args) def _install_via_pip(session: Session) ->",
"session.install(\"sphinx\", \"sphinx-autobuild\") build_dir = Path(\"docs\", \"_build\") if build_dir.exists(): shutil.rmtree(build_dir) session.run(\"sphinx-autobuild\", *args) def _install_via_pip(session:",
"build_dir.exists(): shutil.rmtree(build_dir) session.run(\"sphinx-autobuild\", *args) def _install_via_pip(session: Session) -> None: with tempfile.NamedTemporaryFile() as requirements:",
"] _install_via_pip(session) session.install(\"-r\", \"docs/requirements.txt\") session.install(\"sphinx\", \"sphinx-autobuild\") build_dir = Path(\"docs\", \"_build\") if build_dir.exists(): shutil.rmtree(build_dir)",
"session.install(\"pytest\", \"pytest-cov\", \"xdoctest\") session.run(\"pytest\") @nox.session def lint(session: Session) -> None: \"\"\"Run linting.\"\"\" pass",
"*args) def _install_via_pip(session: Session) -> None: with tempfile.NamedTemporaryFile() as requirements: if sys.platform ==",
"@nox.session def lint(session: Session) -> None: \"\"\"Run linting.\"\"\" pass @session(python=python_versions[0]) def docs(session: Session)",
"with live reloading on file changes.\"\"\" args = session.posargs or [ \"--open-browser\", \"docs\",",
"documentation with live reloading on file changes.\"\"\" args = session.posargs or [ \"--open-browser\",",
"if sys.platform == \"win32\": requirements_path = \"requirements.txt\" else: requirements_path = requirements.name session.run( \"poetry\",",
"-> None: with tempfile.NamedTemporaryFile() as requirements: if sys.platform == \"win32\": requirements_path = \"requirements.txt\"",
"session.run(\"sphinx-autobuild\", *args) def _install_via_pip(session: Session) -> None: with tempfile.NamedTemporaryFile() as requirements: if sys.platform",
"session.run(\"pytest\") @nox.session def lint(session: Session) -> None: \"\"\"Run linting.\"\"\" pass @session(python=python_versions[0]) def docs(session:",
"def _install_via_pip(session: Session) -> None: with tempfile.NamedTemporaryFile() as requirements: if sys.platform == \"win32\":",
"_install_via_pip(session: Session) -> None: with tempfile.NamedTemporaryFile() as requirements: if sys.platform == \"win32\": requirements_path",
"\"3.8\", \"3.7\"] @nox.session(python=python_versions) def tests(session: Session) -> None: \"\"\"Run the test suite.\"\"\" _install_via_pip(session)",
"reloading on file changes.\"\"\" args = session.posargs or [ \"--open-browser\", \"docs\", \"docs/_build\", ]",
"Path(\"docs\", \"_build\") if build_dir.exists(): shutil.rmtree(build_dir) session.run(\"sphinx-autobuild\", *args) def _install_via_pip(session: Session) -> None: with",
"\"docs\", \"docs/_build\", ] _install_via_pip(session) session.install(\"-r\", \"docs/requirements.txt\") session.install(\"sphinx\", \"sphinx-autobuild\") build_dir = Path(\"docs\", \"_build\") if",
"file changes.\"\"\" args = session.posargs or [ \"--open-browser\", \"docs\", \"docs/_build\", ] _install_via_pip(session) session.install(\"-r\",",
"\"\"\"Nox sessions.\"\"\" import shutil import sys import tempfile from pathlib import Path import",
"as requirements: if sys.platform == \"win32\": requirements_path = \"requirements.txt\" else: requirements_path = requirements.name",
"\"xdoctest\") session.run(\"pytest\") @nox.session def lint(session: Session) -> None: \"\"\"Run linting.\"\"\" pass @session(python=python_versions[0]) def",
"import Path import nox from nox import Session, session python_versions = [\"3.10\", \"3.9\",",
"sessions.\"\"\" import shutil import sys import tempfile from pathlib import Path import nox",
"def docs(session: Session) -> None: \"\"\"Build and serve the documentation with live reloading",
"_install_via_pip(session) session.install(\"-r\", \"docs/requirements.txt\") session.install(\"sphinx\", \"sphinx-autobuild\") build_dir = Path(\"docs\", \"_build\") if build_dir.exists(): shutil.rmtree(build_dir) session.run(\"sphinx-autobuild\",",
"= Path(\"docs\", \"_build\") if build_dir.exists(): shutil.rmtree(build_dir) session.run(\"sphinx-autobuild\", *args) def _install_via_pip(session: Session) -> None:",
"\"--open-browser\", \"docs\", \"docs/_build\", ] _install_via_pip(session) session.install(\"-r\", \"docs/requirements.txt\") session.install(\"sphinx\", \"sphinx-autobuild\") build_dir = Path(\"docs\", \"_build\")",
"session python_versions = [\"3.10\", \"3.9\", \"3.8\", \"3.7\"] @nox.session(python=python_versions) def tests(session: Session) -> None:",
"tempfile from pathlib import Path import nox from nox import Session, session python_versions",
"None: with tempfile.NamedTemporaryFile() as requirements: if sys.platform == \"win32\": requirements_path = \"requirements.txt\" else:",
"sys.platform == \"win32\": requirements_path = \"requirements.txt\" else: requirements_path = requirements.name session.run( \"poetry\", \"export\",",
"with tempfile.NamedTemporaryFile() as requirements: if sys.platform == \"win32\": requirements_path = \"requirements.txt\" else: requirements_path",
"\"win32\": requirements_path = \"requirements.txt\" else: requirements_path = requirements.name session.run( \"poetry\", \"export\", \"--without-hashes\", \"-o\",",
"session.install(\"-r\", \"docs/requirements.txt\") session.install(\"sphinx\", \"sphinx-autobuild\") build_dir = Path(\"docs\", \"_build\") if build_dir.exists(): shutil.rmtree(build_dir) session.run(\"sphinx-autobuild\", *args)",
"on file changes.\"\"\" args = session.posargs or [ \"--open-browser\", \"docs\", \"docs/_build\", ] _install_via_pip(session)",
"import shutil import sys import tempfile from pathlib import Path import nox from",
"-> None: \"\"\"Build and serve the documentation with live reloading on file changes.\"\"\"",
"\"3.7\"] @nox.session(python=python_versions) def tests(session: Session) -> None: \"\"\"Run the test suite.\"\"\" _install_via_pip(session) session.install(\"pytest\",",
"[ \"--open-browser\", \"docs\", \"docs/_build\", ] _install_via_pip(session) session.install(\"-r\", \"docs/requirements.txt\") session.install(\"sphinx\", \"sphinx-autobuild\") build_dir = Path(\"docs\",",
"\"3.9\", \"3.8\", \"3.7\"] @nox.session(python=python_versions) def tests(session: Session) -> None: \"\"\"Run the test suite.\"\"\"",
"Session) -> None: with tempfile.NamedTemporaryFile() as requirements: if sys.platform == \"win32\": requirements_path =",
"pass @session(python=python_versions[0]) def docs(session: Session) -> None: \"\"\"Build and serve the documentation with",
"import tempfile from pathlib import Path import nox from nox import Session, session",
"\"\"\"Build and serve the documentation with live reloading on file changes.\"\"\" args =",
"def lint(session: Session) -> None: \"\"\"Run linting.\"\"\" pass @session(python=python_versions[0]) def docs(session: Session) ->",
"from pathlib import Path import nox from nox import Session, session python_versions =",
"None: \"\"\"Run linting.\"\"\" pass @session(python=python_versions[0]) def docs(session: Session) -> None: \"\"\"Build and serve",
"Session) -> None: \"\"\"Run the test suite.\"\"\" _install_via_pip(session) session.install(\"pytest\", \"pytest-cov\", \"xdoctest\") session.run(\"pytest\") @nox.session",
"shutil import sys import tempfile from pathlib import Path import nox from nox",
"_install_via_pip(session) session.install(\"pytest\", \"pytest-cov\", \"xdoctest\") session.run(\"pytest\") @nox.session def lint(session: Session) -> None: \"\"\"Run linting.\"\"\"",
"import nox from nox import Session, session python_versions = [\"3.10\", \"3.9\", \"3.8\", \"3.7\"]",
"@nox.session(python=python_versions) def tests(session: Session) -> None: \"\"\"Run the test suite.\"\"\" _install_via_pip(session) session.install(\"pytest\", \"pytest-cov\",",
"tests(session: Session) -> None: \"\"\"Run the test suite.\"\"\" _install_via_pip(session) session.install(\"pytest\", \"pytest-cov\", \"xdoctest\") session.run(\"pytest\")",
"or [ \"--open-browser\", \"docs\", \"docs/_build\", ] _install_via_pip(session) session.install(\"-r\", \"docs/requirements.txt\") session.install(\"sphinx\", \"sphinx-autobuild\") build_dir =",
"pathlib import Path import nox from nox import Session, session python_versions = [\"3.10\",",
"nox from nox import Session, session python_versions = [\"3.10\", \"3.9\", \"3.8\", \"3.7\"] @nox.session(python=python_versions)",
"tempfile.NamedTemporaryFile() as requirements: if sys.platform == \"win32\": requirements_path = \"requirements.txt\" else: requirements_path =",
"linting.\"\"\" pass @session(python=python_versions[0]) def docs(session: Session) -> None: \"\"\"Build and serve the documentation",
"else: requirements_path = requirements.name session.run( \"poetry\", \"export\", \"--without-hashes\", \"-o\", requirements_path, external=True, ) session.install(\"-r\",",
"docs(session: Session) -> None: \"\"\"Build and serve the documentation with live reloading on",
"None: \"\"\"Run the test suite.\"\"\" _install_via_pip(session) session.install(\"pytest\", \"pytest-cov\", \"xdoctest\") session.run(\"pytest\") @nox.session def lint(session:",
"import sys import tempfile from pathlib import Path import nox from nox import",
"None: \"\"\"Build and serve the documentation with live reloading on file changes.\"\"\" args",
"requirements_path = \"requirements.txt\" else: requirements_path = requirements.name session.run( \"poetry\", \"export\", \"--without-hashes\", \"-o\", requirements_path,",
"if build_dir.exists(): shutil.rmtree(build_dir) session.run(\"sphinx-autobuild\", *args) def _install_via_pip(session: Session) -> None: with tempfile.NamedTemporaryFile() as",
"\"sphinx-autobuild\") build_dir = Path(\"docs\", \"_build\") if build_dir.exists(): shutil.rmtree(build_dir) session.run(\"sphinx-autobuild\", *args) def _install_via_pip(session: Session)",
"changes.\"\"\" args = session.posargs or [ \"--open-browser\", \"docs\", \"docs/_build\", ] _install_via_pip(session) session.install(\"-r\", \"docs/requirements.txt\")",
"Session) -> None: \"\"\"Run linting.\"\"\" pass @session(python=python_versions[0]) def docs(session: Session) -> None: \"\"\"Build",
"-> None: \"\"\"Run the test suite.\"\"\" _install_via_pip(session) session.install(\"pytest\", \"pytest-cov\", \"xdoctest\") session.run(\"pytest\") @nox.session def",
"args = session.posargs or [ \"--open-browser\", \"docs\", \"docs/_build\", ] _install_via_pip(session) session.install(\"-r\", \"docs/requirements.txt\") session.install(\"sphinx\",",
"@session(python=python_versions[0]) def docs(session: Session) -> None: \"\"\"Build and serve the documentation with live",
"== \"win32\": requirements_path = \"requirements.txt\" else: requirements_path = requirements.name session.run( \"poetry\", \"export\", \"--without-hashes\",",
"= requirements.name session.run( \"poetry\", \"export\", \"--without-hashes\", \"-o\", requirements_path, external=True, ) session.install(\"-r\", requirements_path) session.install(\".\")",
"\"pytest-cov\", \"xdoctest\") session.run(\"pytest\") @nox.session def lint(session: Session) -> None: \"\"\"Run linting.\"\"\" pass @session(python=python_versions[0])",
"and serve the documentation with live reloading on file changes.\"\"\" args = session.posargs",
"\"docs/_build\", ] _install_via_pip(session) session.install(\"-r\", \"docs/requirements.txt\") session.install(\"sphinx\", \"sphinx-autobuild\") build_dir = Path(\"docs\", \"_build\") if build_dir.exists():",
"= [\"3.10\", \"3.9\", \"3.8\", \"3.7\"] @nox.session(python=python_versions) def tests(session: Session) -> None: \"\"\"Run the",
"import Session, session python_versions = [\"3.10\", \"3.9\", \"3.8\", \"3.7\"] @nox.session(python=python_versions) def tests(session: Session)",
"requirements: if sys.platform == \"win32\": requirements_path = \"requirements.txt\" else: requirements_path = requirements.name session.run(",
"def tests(session: Session) -> None: \"\"\"Run the test suite.\"\"\" _install_via_pip(session) session.install(\"pytest\", \"pytest-cov\", \"xdoctest\")",
"live reloading on file changes.\"\"\" args = session.posargs or [ \"--open-browser\", \"docs\", \"docs/_build\",",
"= \"requirements.txt\" else: requirements_path = requirements.name session.run( \"poetry\", \"export\", \"--without-hashes\", \"-o\", requirements_path, external=True,",
"serve the documentation with live reloading on file changes.\"\"\" args = session.posargs or",
"Path import nox from nox import Session, session python_versions = [\"3.10\", \"3.9\", \"3.8\",",
"the documentation with live reloading on file changes.\"\"\" args = session.posargs or [",
"from nox import Session, session python_versions = [\"3.10\", \"3.9\", \"3.8\", \"3.7\"] @nox.session(python=python_versions) def",
"the test suite.\"\"\" _install_via_pip(session) session.install(\"pytest\", \"pytest-cov\", \"xdoctest\") session.run(\"pytest\") @nox.session def lint(session: Session) ->",
"= session.posargs or [ \"--open-browser\", \"docs\", \"docs/_build\", ] _install_via_pip(session) session.install(\"-r\", \"docs/requirements.txt\") session.install(\"sphinx\", \"sphinx-autobuild\")",
"test suite.\"\"\" _install_via_pip(session) session.install(\"pytest\", \"pytest-cov\", \"xdoctest\") session.run(\"pytest\") @nox.session def lint(session: Session) -> None:",
"suite.\"\"\" _install_via_pip(session) session.install(\"pytest\", \"pytest-cov\", \"xdoctest\") session.run(\"pytest\") @nox.session def lint(session: Session) -> None: \"\"\"Run",
"Session, session python_versions = [\"3.10\", \"3.9\", \"3.8\", \"3.7\"] @nox.session(python=python_versions) def tests(session: Session) ->",
"\"docs/requirements.txt\") session.install(\"sphinx\", \"sphinx-autobuild\") build_dir = Path(\"docs\", \"_build\") if build_dir.exists(): shutil.rmtree(build_dir) session.run(\"sphinx-autobuild\", *args) def",
"python_versions = [\"3.10\", \"3.9\", \"3.8\", \"3.7\"] @nox.session(python=python_versions) def tests(session: Session) -> None: \"\"\"Run",
"session.posargs or [ \"--open-browser\", \"docs\", \"docs/_build\", ] _install_via_pip(session) session.install(\"-r\", \"docs/requirements.txt\") session.install(\"sphinx\", \"sphinx-autobuild\") build_dir",
"\"_build\") if build_dir.exists(): shutil.rmtree(build_dir) session.run(\"sphinx-autobuild\", *args) def _install_via_pip(session: Session) -> None: with tempfile.NamedTemporaryFile()",
"Session) -> None: \"\"\"Build and serve the documentation with live reloading on file",
"shutil.rmtree(build_dir) session.run(\"sphinx-autobuild\", *args) def _install_via_pip(session: Session) -> None: with tempfile.NamedTemporaryFile() as requirements: if",
"\"\"\"Run the test suite.\"\"\" _install_via_pip(session) session.install(\"pytest\", \"pytest-cov\", \"xdoctest\") session.run(\"pytest\") @nox.session def lint(session: Session)",
"requirements_path = requirements.name session.run( \"poetry\", \"export\", \"--without-hashes\", \"-o\", requirements_path, external=True, ) session.install(\"-r\", requirements_path)",
"<reponame>tdmorello/imagecatalog \"\"\"Nox sessions.\"\"\" import shutil import sys import tempfile from pathlib import Path",
"nox import Session, session python_versions = [\"3.10\", \"3.9\", \"3.8\", \"3.7\"] @nox.session(python=python_versions) def tests(session:",
"[\"3.10\", \"3.9\", \"3.8\", \"3.7\"] @nox.session(python=python_versions) def tests(session: Session) -> None: \"\"\"Run the test",
"lint(session: Session) -> None: \"\"\"Run linting.\"\"\" pass @session(python=python_versions[0]) def docs(session: Session) -> None:",
"\"\"\"Run linting.\"\"\" pass @session(python=python_versions[0]) def docs(session: Session) -> None: \"\"\"Build and serve the"
] |
[
"\"\"\"Top-level package for contactsheet.\"\"\" __author__ = \"\"\"<NAME>\"\"\" __email__ = '<EMAIL>' __version__ = '0.1.0'",
"-*- \"\"\"Top-level package for contactsheet.\"\"\" __author__ = \"\"\"<NAME>\"\"\" __email__ = '<EMAIL>' __version__ =",
"utf-8 -*- \"\"\"Top-level package for contactsheet.\"\"\" __author__ = \"\"\"<NAME>\"\"\" __email__ = '<EMAIL>' __version__",
"coding: utf-8 -*- \"\"\"Top-level package for contactsheet.\"\"\" __author__ = \"\"\"<NAME>\"\"\" __email__ = '<EMAIL>'",
"-*- coding: utf-8 -*- \"\"\"Top-level package for contactsheet.\"\"\" __author__ = \"\"\"<NAME>\"\"\" __email__ =",
"# -*- coding: utf-8 -*- \"\"\"Top-level package for contactsheet.\"\"\" __author__ = \"\"\"<NAME>\"\"\" __email__"
] |
[
"reassembled = \"\\n\".join(list(reversed(rows))) return reassembled def build_map(self, state): x = state[\"location\"][\"x\"] y =",
"state[\"seen\"]: return state, None items = self.items.values() item_list = list(filter(lambda x: x[\"char\"] ==",
"grid): rows = [] for row in grid: rows.append(''.join(row)) reassembled = \"\\n\".join(list(reversed(rows))) return",
"\"story.yaml\") class Scene(): def __init__(self, path, state): scene_file = state[\"current_scene\"] self.load(path, scene_file) def",
"= None def load(self): f = open(self.filename(), 'r') text = f.read() f.close() data",
"or times > 100: return False if len(direction) > 1: return False if",
"x = start_x y = start_y rows = list(reversed(self.scene_map.split(\"\\n\"))) for i in range",
"= None self.scene = None self.synopsis = None def load(self): f = open(self.filename(),",
"self.__dict__ = data self.__dict__[\"scene_map\"] = self.__dict__[\"scene_map\"].strip(\"\\n\") return True def filename(self, path, scene_file): return",
"for row in rows: disassembled.append(list(row)) return disassembled def reassemble_map(self, grid): rows = []",
"tiles[x] != \"#\": return False return True def view(self, location): x = location[\"x\"]",
"= self.disassemble_map() grid[y][x] = \"@\" if self.level in state[\"seen\"]: for item in self.items.values():",
"pview[\"narration\"] return narration def look(self, state): seen = \"\\n\".join(self.items.keys()) if self.level not in",
"\"\\n\".join(self.items.keys()) if self.level not in state[\"seen\"]: state[\"seen\"].append(self.level) return state, seen def describe(self, state,",
"return False if tiles[x] != \"#\": return False return True def view(self, location):",
"view(self, location): x = location[\"x\"] y = location[\"y\"] narration = None for pview",
"f.close() data = yaml.load(text) if data == None: return False else: self.__dict__ =",
"+= 1 elif direction == \"s\": y -= 1 elif direction == \"e\":",
"= location[\"y\"] narration = None for pview in self.views.values(): if pview[\"x\"] == x",
"y = location[\"y\"] narration = None for pview in self.views.values(): if pview[\"x\"] ==",
"= self.items.values() item_list = list(filter(lambda x: x[\"char\"] == char, items)) if len(item_list) ==",
"times if not type(times) is int: return False if not type(direction) is str:",
"not in state[\"seen\"]: return state, None items = self.items.values() item_list = list(filter(lambda x:",
"grid: rows.append(''.join(row)) reassembled = \"\\n\".join(list(reversed(rows))) return reassembled def build_map(self, state): x = state[\"location\"][\"x\"]",
"= [] for row in grid: rows.append(''.join(row)) reassembled = \"\\n\".join(list(reversed(rows))) return reassembled def",
"disassemble_map(self): rows = list(reversed(self.scene_map.split(\"\\n\"))) disassembled = [] for row in rows: disassembled.append(list(row)) return",
"def describe(self, state, char): if self.level not in state[\"seen\"]: return state, None items",
"not in state[\"seen\"]: state[\"seen\"].append(self.level) return state, seen def describe(self, state, char): if self.level",
"False # find new postion x = start_x y = start_y rows =",
"__init__(self, path, state): scene_file = state[\"current_scene\"] self.load(path, scene_file) def load(self, path, scene_file): f",
"None self.synopsis = None def load(self): f = open(self.filename(), 'r') text = f.read()",
"if data == None: return False else: self.__dict__ = data self.__dict__[\"scene_map\"] = self.__dict__[\"scene_map\"].strip(\"\\n\")",
"== None: return False else: self.__dict__ = data return True def filename(self): return",
"state): scene_file = state[\"current_scene\"] self.load(path, scene_file) def load(self, path, scene_file): f = open(self.filename(path,",
"direction == \"e\": x += 1 elif direction == \"w\": x -= 1",
"elif direction == \"w\": x -= 1 if len(rows) <= y: return False",
"self.scene = None self.synopsis = None def load(self): f = open(self.filename(), 'r') text",
"state[\"seen\"].append(self.level) return state, seen def describe(self, state, char): if self.level not in state[\"seen\"]:",
"grid = self.disassemble_map() grid[y][x] = \"@\" if self.level in state[\"seen\"]: for item in",
"return reassembled def valid_move(self, location, direction, times): start_x = location[\"x\"] start_y = location[\"y\"]",
"+= 1 elif direction == \"w\": x -= 1 if len(rows) <= y:",
"in rows: disassembled.append(list(row)) return disassembled def reassemble_map(self, grid): rows = [] for row",
"if times < 1 or times > 100: return False if len(direction) >",
"seen def describe(self, state, char): if self.level not in state[\"seen\"]: return state, None",
"\"n\": y += 1 elif direction == \"s\": y -= 1 elif direction",
"new postion x = start_x y = start_y rows = list(reversed(self.scene_map.split(\"\\n\"))) for i",
"< 1 or times > 100: return False if len(direction) > 1: return",
"return join(path, \"{0}.yaml\".format(scene_file)) def disassemble_map(self): rows = list(reversed(self.scene_map.split(\"\\n\"))) disassembled = [] for row",
"def load(self): f = open(self.filename(), 'r') text = f.read() f.close() data = yaml.load(text)",
"== \"n\": y += 1 elif direction == \"s\": y -= 1 elif",
"data return True def filename(self): return join(self.path, \"story.yaml\") class Scene(): def __init__(self, path,",
"None self.scene = None self.synopsis = None def load(self): f = open(self.filename(), 'r')",
"<= x: return False if tiles[x] != \"#\": return False return True def",
"False if tiles[x] != \"#\": return False return True def view(self, location): x",
"def load(self, path, scene_file): f = open(self.filename(path, scene_file), 'r') text = f.read() f.close()",
"def view(self, location): x = location[\"x\"] y = location[\"y\"] narration = None for",
"[] for row in rows: disassembled.append(list(row)) return disassembled def reassemble_map(self, grid): rows =",
"\"{0}.yaml\".format(scene_file)) def disassemble_map(self): rows = list(reversed(self.scene_map.split(\"\\n\"))) disassembled = [] for row in rows:",
"data == None: return False else: self.__dict__ = data return True def filename(self):",
"# find new postion x = start_x y = start_y rows = list(reversed(self.scene_map.split(\"\\n\")))",
"self.__dict__[\"scene_map\"] = self.__dict__[\"scene_map\"].strip(\"\\n\") return True def filename(self, path, scene_file): return join(path, \"{0}.yaml\".format(scene_file)) def",
"scene_file) def load(self, path, scene_file): f = open(self.filename(path, scene_file), 'r') text = f.read()",
"or y < 0: return False tiles = list(rows[y]) if len(tiles) <= x:",
"1 or times > 100: return False if len(direction) > 1: return False",
"range (0, times): if direction == \"n\": y += 1 elif direction ==",
"in self.views.values(): if pview[\"x\"] == x and pview[\"y\"] == y: narration = pview[\"narration\"]",
"narration = None for pview in self.views.values(): if pview[\"x\"] == x and pview[\"y\"]",
"path): self.path = path self.author = None self.title = None self.scene = None",
"open(self.filename(path, scene_file), 'r') text = f.read() f.close() data = yaml.load(text) if data ==",
"= yaml.load(text) if data == None: return False else: self.__dict__ = data return",
"1 elif direction == \"s\": y -= 1 elif direction == \"e\": x",
"x and pview[\"y\"] == y: narration = pview[\"narration\"] return narration def look(self, state):",
"0 or y < 0: return False tiles = list(rows[y]) if len(tiles) <=",
"self.level in state[\"seen\"]: for item in self.items.values(): grid[item[\"y\"]][item[\"x\"]] = item[\"char\"] reassembled = self.reassemble_map(grid)",
"not type(times) is int: return False if not type(direction) is str: return False",
"location[\"y\"] # validate direction and times if not type(times) is int: return False",
"in range (0, times): if direction == \"n\": y += 1 elif direction",
"1 elif direction == \"w\": x -= 1 if len(rows) <= y: return",
"item[\"char\"] reassembled = self.reassemble_map(grid) return reassembled def valid_move(self, location, direction, times): start_x =",
"not direction in \"nsew\": return False # find new postion x = start_x",
"== \"e\": x += 1 elif direction == \"w\": x -= 1 if",
"list(reversed(self.scene_map.split(\"\\n\"))) disassembled = [] for row in rows: disassembled.append(list(row)) return disassembled def reassemble_map(self,",
"scene_file), 'r') text = f.read() f.close() data = yaml.load(text) if data == None:",
"times < 1 or times > 100: return False if len(direction) > 1:",
"if len(direction) > 1: return False if not direction in \"nsew\": return False",
"data self.__dict__[\"scene_map\"] = self.__dict__[\"scene_map\"].strip(\"\\n\") return True def filename(self, path, scene_file): return join(path, \"{0}.yaml\".format(scene_file))",
"open(self.filename(), 'r') text = f.read() f.close() data = yaml.load(text) if data == None:",
"scene_file): f = open(self.filename(path, scene_file), 'r') text = f.read() f.close() data = yaml.load(text)",
"self.level not in state[\"seen\"]: state[\"seen\"].append(self.level) return state, seen def describe(self, state, char): if",
"\"@\" if self.level in state[\"seen\"]: for item in self.items.values(): grid[item[\"y\"]][item[\"x\"]] = item[\"char\"] reassembled",
"y += 1 elif direction == \"s\": y -= 1 elif direction ==",
"# validate direction and times if not type(times) is int: return False if",
"return False if x < 0 or y < 0: return False tiles",
"reassembled def valid_move(self, location, direction, times): start_x = location[\"x\"] start_y = location[\"y\"] #",
"Story: def __init__(self, path): self.path = path self.author = None self.title = None",
"is str: return False if times < 1 or times > 100: return",
"times > 100: return False if len(direction) > 1: return False if not",
"None: return False else: self.__dict__ = data return True def filename(self): return join(self.path,",
"filename(self, path, scene_file): return join(path, \"{0}.yaml\".format(scene_file)) def disassemble_map(self): rows = list(reversed(self.scene_map.split(\"\\n\"))) disassembled =",
"pview[\"y\"] == y: narration = pview[\"narration\"] return narration def look(self, state): seen =",
"i in range (0, times): if direction == \"n\": y += 1 elif",
"import yaml class Story: def __init__(self, path): self.path = path self.author = None",
"in self.items.values(): grid[item[\"y\"]][item[\"x\"]] = item[\"char\"] reassembled = self.reassemble_map(grid) return reassembled def valid_move(self, location,",
"y < 0: return False tiles = list(rows[y]) if len(tiles) <= x: return",
"None for pview in self.views.values(): if pview[\"x\"] == x and pview[\"y\"] == y:",
"x: x[\"char\"] == char, items)) if len(item_list) == 0: return state, None return",
"= item[\"char\"] reassembled = self.reassemble_map(grid) return reassembled def valid_move(self, location, direction, times): start_x",
"location): x = location[\"x\"] y = location[\"y\"] narration = None for pview in",
"> 1: return False if not direction in \"nsew\": return False # find",
"def reassemble_map(self, grid): rows = [] for row in grid: rows.append(''.join(row)) reassembled =",
"return False if not direction in \"nsew\": return False # find new postion",
"state[\"location\"][\"x\"] y = state[\"location\"][\"y\"] grid = self.disassemble_map() grid[y][x] = \"@\" if self.level in",
"self.items.values() item_list = list(filter(lambda x: x[\"char\"] == char, items)) if len(item_list) == 0:",
"direction, times): start_x = location[\"x\"] start_y = location[\"y\"] # validate direction and times",
"x = location[\"x\"] y = location[\"y\"] narration = None for pview in self.views.values():",
"times): if direction == \"n\": y += 1 elif direction == \"s\": y",
"state, seen def describe(self, state, char): if self.level not in state[\"seen\"]: return state,",
"[] for row in grid: rows.append(''.join(row)) reassembled = \"\\n\".join(list(reversed(rows))) return reassembled def build_map(self,",
"return False else: self.__dict__ = data return True def filename(self): return join(self.path, \"story.yaml\")",
"if pview[\"x\"] == x and pview[\"y\"] == y: narration = pview[\"narration\"] return narration",
"pview in self.views.values(): if pview[\"x\"] == x and pview[\"y\"] == y: narration =",
"x[\"char\"] == char, items)) if len(item_list) == 0: return state, None return state,",
"scene_file): return join(path, \"{0}.yaml\".format(scene_file)) def disassemble_map(self): rows = list(reversed(self.scene_map.split(\"\\n\"))) disassembled = [] for",
"disassembled def reassemble_map(self, grid): rows = [] for row in grid: rows.append(''.join(row)) reassembled",
"None def load(self): f = open(self.filename(), 'r') text = f.read() f.close() data =",
"self.__dict__ = data return True def filename(self): return join(self.path, \"story.yaml\") class Scene(): def",
"else: self.__dict__ = data self.__dict__[\"scene_map\"] = self.__dict__[\"scene_map\"].strip(\"\\n\") return True def filename(self, path, scene_file):",
"self.items.values(): grid[item[\"y\"]][item[\"x\"]] = item[\"char\"] reassembled = self.reassemble_map(grid) return reassembled def valid_move(self, location, direction,",
"== y: narration = pview[\"narration\"] return narration def look(self, state): seen = \"\\n\".join(self.items.keys())",
"= start_x y = start_y rows = list(reversed(self.scene_map.split(\"\\n\"))) for i in range (0,",
"items = self.items.values() item_list = list(filter(lambda x: x[\"char\"] == char, items)) if len(item_list)",
"y = start_y rows = list(reversed(self.scene_map.split(\"\\n\"))) for i in range (0, times): if",
"os.path import join import yaml class Story: def __init__(self, path): self.path = path",
"Scene(): def __init__(self, path, state): scene_file = state[\"current_scene\"] self.load(path, scene_file) def load(self, path,",
"type(times) is int: return False if not type(direction) is str: return False if",
"== char, items)) if len(item_list) == 0: return state, None return state, item_list[0][\"description\"]",
"def valid_move(self, location, direction, times): start_x = location[\"x\"] start_y = location[\"y\"] # validate",
"find new postion x = start_x y = start_y rows = list(reversed(self.scene_map.split(\"\\n\"))) for",
"state, None items = self.items.values() item_list = list(filter(lambda x: x[\"char\"] == char, items))",
"item_list = list(filter(lambda x: x[\"char\"] == char, items)) if len(item_list) == 0: return",
"tiles = list(rows[y]) if len(tiles) <= x: return False if tiles[x] != \"#\":",
"= None self.title = None self.scene = None self.synopsis = None def load(self):",
"y = state[\"location\"][\"y\"] grid = self.disassemble_map() grid[y][x] = \"@\" if self.level in state[\"seen\"]:",
"= open(self.filename(), 'r') text = f.read() f.close() data = yaml.load(text) if data ==",
"for i in range (0, times): if direction == \"n\": y += 1",
"grid[y][x] = \"@\" if self.level in state[\"seen\"]: for item in self.items.values(): grid[item[\"y\"]][item[\"x\"]] =",
"in \"nsew\": return False # find new postion x = start_x y =",
"in grid: rows.append(''.join(row)) reassembled = \"\\n\".join(list(reversed(rows))) return reassembled def build_map(self, state): x =",
"path, scene_file): return join(path, \"{0}.yaml\".format(scene_file)) def disassemble_map(self): rows = list(reversed(self.scene_map.split(\"\\n\"))) disassembled = []",
"if not type(direction) is str: return False if times < 1 or times",
"if len(rows) <= y: return False if x < 0 or y <",
"(0, times): if direction == \"n\": y += 1 elif direction == \"s\":",
"= f.read() f.close() data = yaml.load(text) if data == None: return False else:",
"location[\"y\"] narration = None for pview in self.views.values(): if pview[\"x\"] == x and",
"state[\"seen\"]: state[\"seen\"].append(self.level) return state, seen def describe(self, state, char): if self.level not in",
"direction == \"n\": y += 1 elif direction == \"s\": y -= 1",
"state): x = state[\"location\"][\"x\"] y = state[\"location\"][\"y\"] grid = self.disassemble_map() grid[y][x] = \"@\"",
"direction and times if not type(times) is int: return False if not type(direction)",
"valid_move(self, location, direction, times): start_x = location[\"x\"] start_y = location[\"y\"] # validate direction",
"return False tiles = list(rows[y]) if len(tiles) <= x: return False if tiles[x]",
"else: self.__dict__ = data return True def filename(self): return join(self.path, \"story.yaml\") class Scene():",
"-= 1 if len(rows) <= y: return False if x < 0 or",
"str: return False if times < 1 or times > 100: return False",
"narration = pview[\"narration\"] return narration def look(self, state): seen = \"\\n\".join(self.items.keys()) if self.level",
"False if not type(direction) is str: return False if times < 1 or",
"'r') text = f.read() f.close() data = yaml.load(text) if data == None: return",
"rows: disassembled.append(list(row)) return disassembled def reassemble_map(self, grid): rows = [] for row in",
"\"\\n\".join(list(reversed(rows))) return reassembled def build_map(self, state): x = state[\"location\"][\"x\"] y = state[\"location\"][\"y\"] grid",
"if len(tiles) <= x: return False if tiles[x] != \"#\": return False return",
"= list(filter(lambda x: x[\"char\"] == char, items)) if len(item_list) == 0: return state,",
"< 0: return False tiles = list(rows[y]) if len(tiles) <= x: return False",
"True def filename(self, path, scene_file): return join(path, \"{0}.yaml\".format(scene_file)) def disassemble_map(self): rows = list(reversed(self.scene_map.split(\"\\n\")))",
"= list(rows[y]) if len(tiles) <= x: return False if tiles[x] != \"#\": return",
"item in self.items.values(): grid[item[\"y\"]][item[\"x\"]] = item[\"char\"] reassembled = self.reassemble_map(grid) return reassembled def valid_move(self,",
"data == None: return False else: self.__dict__ = data self.__dict__[\"scene_map\"] = self.__dict__[\"scene_map\"].strip(\"\\n\") return",
"None self.title = None self.scene = None self.synopsis = None def load(self): f",
"len(rows) <= y: return False if x < 0 or y < 0:",
"= None for pview in self.views.values(): if pview[\"x\"] == x and pview[\"y\"] ==",
"1 if len(rows) <= y: return False if x < 0 or y",
"grid[item[\"y\"]][item[\"x\"]] = item[\"char\"] reassembled = self.reassemble_map(grid) return reassembled def valid_move(self, location, direction, times):",
"True def view(self, location): x = location[\"x\"] y = location[\"y\"] narration = None",
"== None: return False else: self.__dict__ = data self.__dict__[\"scene_map\"] = self.__dict__[\"scene_map\"].strip(\"\\n\") return True",
"start_y rows = list(reversed(self.scene_map.split(\"\\n\"))) for i in range (0, times): if direction ==",
"state[\"seen\"]: for item in self.items.values(): grid[item[\"y\"]][item[\"x\"]] = item[\"char\"] reassembled = self.reassemble_map(grid) return reassembled",
"False else: self.__dict__ = data self.__dict__[\"scene_map\"] = self.__dict__[\"scene_map\"].strip(\"\\n\") return True def filename(self, path,",
"= location[\"x\"] start_y = location[\"y\"] # validate direction and times if not type(times)",
"== x and pview[\"y\"] == y: narration = pview[\"narration\"] return narration def look(self,",
"list(filter(lambda x: x[\"char\"] == char, items)) if len(item_list) == 0: return state, None",
"= path self.author = None self.title = None self.scene = None self.synopsis =",
"data = yaml.load(text) if data == None: return False else: self.__dict__ = data",
"from os.path import join import yaml class Story: def __init__(self, path): self.path =",
"rows.append(''.join(row)) reassembled = \"\\n\".join(list(reversed(rows))) return reassembled def build_map(self, state): x = state[\"location\"][\"x\"] y",
"False if not direction in \"nsew\": return False # find new postion x",
"= data self.__dict__[\"scene_map\"] = self.__dict__[\"scene_map\"].strip(\"\\n\") return True def filename(self, path, scene_file): return join(path,",
"= self.__dict__[\"scene_map\"].strip(\"\\n\") return True def filename(self, path, scene_file): return join(path, \"{0}.yaml\".format(scene_file)) def disassemble_map(self):",
"1: return False if not direction in \"nsew\": return False # find new",
"f.read() f.close() data = yaml.load(text) if data == None: return False else: self.__dict__",
"> 100: return False if len(direction) > 1: return False if not direction",
"load(self, path, scene_file): f = open(self.filename(path, scene_file), 'r') text = f.read() f.close() data",
"def disassemble_map(self): rows = list(reversed(self.scene_map.split(\"\\n\"))) disassembled = [] for row in rows: disassembled.append(list(row))",
"__init__(self, path): self.path = path self.author = None self.title = None self.scene =",
"!= \"#\": return False return True def view(self, location): x = location[\"x\"] y",
"for row in grid: rows.append(''.join(row)) reassembled = \"\\n\".join(list(reversed(rows))) return reassembled def build_map(self, state):",
"False tiles = list(rows[y]) if len(tiles) <= x: return False if tiles[x] !=",
"in state[\"seen\"]: return state, None items = self.items.values() item_list = list(filter(lambda x: x[\"char\"]",
"if self.level not in state[\"seen\"]: return state, None items = self.items.values() item_list =",
"direction == \"w\": x -= 1 if len(rows) <= y: return False if",
"return disassembled def reassemble_map(self, grid): rows = [] for row in grid: rows.append(''.join(row))",
"= yaml.load(text) if data == None: return False else: self.__dict__ = data self.__dict__[\"scene_map\"]",
"state[\"current_scene\"] self.load(path, scene_file) def load(self, path, scene_file): f = open(self.filename(path, scene_file), 'r') text",
"rows = list(reversed(self.scene_map.split(\"\\n\"))) for i in range (0, times): if direction == \"n\":",
"direction == \"s\": y -= 1 elif direction == \"e\": x += 1",
"reassembled = self.reassemble_map(grid) return reassembled def valid_move(self, location, direction, times): start_x = location[\"x\"]",
"return state, seen def describe(self, state, char): if self.level not in state[\"seen\"]: return",
"disassembled.append(list(row)) return disassembled def reassemble_map(self, grid): rows = [] for row in grid:",
"\"s\": y -= 1 elif direction == \"e\": x += 1 elif direction",
"return state, None items = self.items.values() item_list = list(filter(lambda x: x[\"char\"] == char,",
"start_x = location[\"x\"] start_y = location[\"y\"] # validate direction and times if not",
"join(path, \"{0}.yaml\".format(scene_file)) def disassemble_map(self): rows = list(reversed(self.scene_map.split(\"\\n\"))) disassembled = [] for row in",
"< 0 or y < 0: return False tiles = list(rows[y]) if len(tiles)",
"in state[\"seen\"]: for item in self.items.values(): grid[item[\"y\"]][item[\"x\"]] = item[\"char\"] reassembled = self.reassemble_map(grid) return",
"start_x y = start_y rows = list(reversed(self.scene_map.split(\"\\n\"))) for i in range (0, times):",
"-= 1 elif direction == \"e\": x += 1 elif direction == \"w\":",
"return False if len(direction) > 1: return False if not direction in \"nsew\":",
"state): seen = \"\\n\".join(self.items.keys()) if self.level not in state[\"seen\"]: state[\"seen\"].append(self.level) return state, seen",
"path self.author = None self.title = None self.scene = None self.synopsis = None",
"narration def look(self, state): seen = \"\\n\".join(self.items.keys()) if self.level not in state[\"seen\"]: state[\"seen\"].append(self.level)",
"yaml.load(text) if data == None: return False else: self.__dict__ = data self.__dict__[\"scene_map\"] =",
"join(self.path, \"story.yaml\") class Scene(): def __init__(self, path, state): scene_file = state[\"current_scene\"] self.load(path, scene_file)",
"rows = list(reversed(self.scene_map.split(\"\\n\"))) disassembled = [] for row in rows: disassembled.append(list(row)) return disassembled",
"= location[\"x\"] y = location[\"y\"] narration = None for pview in self.views.values(): if",
"= state[\"current_scene\"] self.load(path, scene_file) def load(self, path, scene_file): f = open(self.filename(path, scene_file), 'r')",
"join import yaml class Story: def __init__(self, path): self.path = path self.author =",
"return False else: self.__dict__ = data self.__dict__[\"scene_map\"] = self.__dict__[\"scene_map\"].strip(\"\\n\") return True def filename(self,",
"in state[\"seen\"]: state[\"seen\"].append(self.level) return state, seen def describe(self, state, char): if self.level not",
"return False if times < 1 or times > 100: return False if",
"== \"s\": y -= 1 elif direction == \"e\": x += 1 elif",
"f = open(self.filename(path, scene_file), 'r') text = f.read() f.close() data = yaml.load(text) if",
"if self.level in state[\"seen\"]: for item in self.items.values(): grid[item[\"y\"]][item[\"x\"]] = item[\"char\"] reassembled =",
"False else: self.__dict__ = data return True def filename(self): return join(self.path, \"story.yaml\") class",
"self.load(path, scene_file) def load(self, path, scene_file): f = open(self.filename(path, scene_file), 'r') text =",
"list(reversed(self.scene_map.split(\"\\n\"))) for i in range (0, times): if direction == \"n\": y +=",
"if not type(times) is int: return False if not type(direction) is str: return",
"for item in self.items.values(): grid[item[\"y\"]][item[\"x\"]] = item[\"char\"] reassembled = self.reassemble_map(grid) return reassembled def",
"= None self.synopsis = None def load(self): f = open(self.filename(), 'r') text =",
"\"e\": x += 1 elif direction == \"w\": x -= 1 if len(rows)",
"def __init__(self, path): self.path = path self.author = None self.title = None self.scene",
"validate direction and times if not type(times) is int: return False if not",
"if self.level not in state[\"seen\"]: state[\"seen\"].append(self.level) return state, seen def describe(self, state, char):",
"if data == None: return False else: self.__dict__ = data return True def",
"start_y = location[\"y\"] # validate direction and times if not type(times) is int:",
"build_map(self, state): x = state[\"location\"][\"x\"] y = state[\"location\"][\"y\"] grid = self.disassemble_map() grid[y][x] =",
"x -= 1 if len(rows) <= y: return False if x < 0",
"text = f.read() f.close() data = yaml.load(text) if data == None: return False",
"x = state[\"location\"][\"x\"] y = state[\"location\"][\"y\"] grid = self.disassemble_map() grid[y][x] = \"@\" if",
"= pview[\"narration\"] return narration def look(self, state): seen = \"\\n\".join(self.items.keys()) if self.level not",
"state, char): if self.level not in state[\"seen\"]: return state, None items = self.items.values()",
"for pview in self.views.values(): if pview[\"x\"] == x and pview[\"y\"] == y: narration",
"class Scene(): def __init__(self, path, state): scene_file = state[\"current_scene\"] self.load(path, scene_file) def load(self,",
"= self.reassemble_map(grid) return reassembled def valid_move(self, location, direction, times): start_x = location[\"x\"] start_y",
"if not direction in \"nsew\": return False # find new postion x =",
"== \"w\": x -= 1 if len(rows) <= y: return False if x",
"location[\"x\"] y = location[\"y\"] narration = None for pview in self.views.values(): if pview[\"x\"]",
"describe(self, state, char): if self.level not in state[\"seen\"]: return state, None items =",
"pview[\"x\"] == x and pview[\"y\"] == y: narration = pview[\"narration\"] return narration def",
"if direction == \"n\": y += 1 elif direction == \"s\": y -=",
"direction in \"nsew\": return False # find new postion x = start_x y",
"reassemble_map(self, grid): rows = [] for row in grid: rows.append(''.join(row)) reassembled = \"\\n\".join(list(reversed(rows)))",
"= \"\\n\".join(self.items.keys()) if self.level not in state[\"seen\"]: state[\"seen\"].append(self.level) return state, seen def describe(self,",
"False if x < 0 or y < 0: return False tiles =",
"load(self): f = open(self.filename(), 'r') text = f.read() f.close() data = yaml.load(text) if",
"self.title = None self.scene = None self.synopsis = None def load(self): f =",
"location[\"x\"] start_y = location[\"y\"] # validate direction and times if not type(times) is",
"100: return False if len(direction) > 1: return False if not direction in",
"1 elif direction == \"e\": x += 1 elif direction == \"w\": x",
"y: narration = pview[\"narration\"] return narration def look(self, state): seen = \"\\n\".join(self.items.keys()) if",
"elif direction == \"e\": x += 1 elif direction == \"w\": x -=",
"self.disassemble_map() grid[y][x] = \"@\" if self.level in state[\"seen\"]: for item in self.items.values(): grid[item[\"y\"]][item[\"x\"]]",
"= state[\"location\"][\"y\"] grid = self.disassemble_map() grid[y][x] = \"@\" if self.level in state[\"seen\"]: for",
"self.__dict__[\"scene_map\"].strip(\"\\n\") return True def filename(self, path, scene_file): return join(path, \"{0}.yaml\".format(scene_file)) def disassemble_map(self): rows",
"= list(reversed(self.scene_map.split(\"\\n\"))) for i in range (0, times): if direction == \"n\": y",
"rows = [] for row in grid: rows.append(''.join(row)) reassembled = \"\\n\".join(list(reversed(rows))) return reassembled",
"return False if not type(direction) is str: return False if times < 1",
"import join import yaml class Story: def __init__(self, path): self.path = path self.author",
"return narration def look(self, state): seen = \"\\n\".join(self.items.keys()) if self.level not in state[\"seen\"]:",
"<= y: return False if x < 0 or y < 0: return",
"= [] for row in rows: disassembled.append(list(row)) return disassembled def reassemble_map(self, grid): rows",
"def __init__(self, path, state): scene_file = state[\"current_scene\"] self.load(path, scene_file) def load(self, path, scene_file):",
"return reassembled def build_map(self, state): x = state[\"location\"][\"x\"] y = state[\"location\"][\"y\"] grid =",
"f = open(self.filename(), 'r') text = f.read() f.close() data = yaml.load(text) if data",
"reassembled def build_map(self, state): x = state[\"location\"][\"x\"] y = state[\"location\"][\"y\"] grid = self.disassemble_map()",
"self.reassemble_map(grid) return reassembled def valid_move(self, location, direction, times): start_x = location[\"x\"] start_y =",
"elif direction == \"s\": y -= 1 elif direction == \"e\": x +=",
"times): start_x = location[\"x\"] start_y = location[\"y\"] # validate direction and times if",
"x += 1 elif direction == \"w\": x -= 1 if len(rows) <=",
"postion x = start_x y = start_y rows = list(reversed(self.scene_map.split(\"\\n\"))) for i in",
"self.synopsis = None def load(self): f = open(self.filename(), 'r') text = f.read() f.close()",
"= location[\"y\"] # validate direction and times if not type(times) is int: return",
"char): if self.level not in state[\"seen\"]: return state, None items = self.items.values() item_list",
"not type(direction) is str: return False if times < 1 or times >",
"= \"\\n\".join(list(reversed(rows))) return reassembled def build_map(self, state): x = state[\"location\"][\"x\"] y = state[\"location\"][\"y\"]",
"row in grid: rows.append(''.join(row)) reassembled = \"\\n\".join(list(reversed(rows))) return reassembled def build_map(self, state): x",
"len(tiles) <= x: return False if tiles[x] != \"#\": return False return True",
"return True def filename(self): return join(self.path, \"story.yaml\") class Scene(): def __init__(self, path, state):",
"seen = \"\\n\".join(self.items.keys()) if self.level not in state[\"seen\"]: state[\"seen\"].append(self.level) return state, seen def",
"path, state): scene_file = state[\"current_scene\"] self.load(path, scene_file) def load(self, path, scene_file): f =",
"= open(self.filename(path, scene_file), 'r') text = f.read() f.close() data = yaml.load(text) if data",
"= data return True def filename(self): return join(self.path, \"story.yaml\") class Scene(): def __init__(self,",
"= list(reversed(self.scene_map.split(\"\\n\"))) disassembled = [] for row in rows: disassembled.append(list(row)) return disassembled def",
"disassembled = [] for row in rows: disassembled.append(list(row)) return disassembled def reassemble_map(self, grid):",
"= state[\"location\"][\"x\"] y = state[\"location\"][\"y\"] grid = self.disassemble_map() grid[y][x] = \"@\" if self.level",
"y: return False if x < 0 or y < 0: return False",
"and pview[\"y\"] == y: narration = pview[\"narration\"] return narration def look(self, state): seen",
"self.author = None self.title = None self.scene = None self.synopsis = None def",
"if x < 0 or y < 0: return False tiles = list(rows[y])",
"is int: return False if not type(direction) is str: return False if times",
"return True def filename(self, path, scene_file): return join(path, \"{0}.yaml\".format(scene_file)) def disassemble_map(self): rows =",
"\"#\": return False return True def view(self, location): x = location[\"x\"] y =",
"row in rows: disassembled.append(list(row)) return disassembled def reassemble_map(self, grid): rows = [] for",
"scene_file = state[\"current_scene\"] self.load(path, scene_file) def load(self, path, scene_file): f = open(self.filename(path, scene_file),",
"= \"@\" if self.level in state[\"seen\"]: for item in self.items.values(): grid[item[\"y\"]][item[\"x\"]] = item[\"char\"]",
"True def filename(self): return join(self.path, \"story.yaml\") class Scene(): def __init__(self, path, state): scene_file",
"type(direction) is str: return False if times < 1 or times > 100:",
"look(self, state): seen = \"\\n\".join(self.items.keys()) if self.level not in state[\"seen\"]: state[\"seen\"].append(self.level) return state,",
"int: return False if not type(direction) is str: return False if times <",
"location, direction, times): start_x = location[\"x\"] start_y = location[\"y\"] # validate direction and",
"yaml.load(text) if data == None: return False else: self.__dict__ = data return True",
"False if len(direction) > 1: return False if not direction in \"nsew\": return",
"False if times < 1 or times > 100: return False if len(direction)",
"x: return False if tiles[x] != \"#\": return False return True def view(self,",
"0: return False tiles = list(rows[y]) if len(tiles) <= x: return False if",
"return True def view(self, location): x = location[\"x\"] y = location[\"y\"] narration =",
"yaml class Story: def __init__(self, path): self.path = path self.author = None self.title",
"len(direction) > 1: return False if not direction in \"nsew\": return False #",
"None: return False else: self.__dict__ = data self.__dict__[\"scene_map\"] = self.__dict__[\"scene_map\"].strip(\"\\n\") return True def",
"def build_map(self, state): x = state[\"location\"][\"x\"] y = state[\"location\"][\"y\"] grid = self.disassemble_map() grid[y][x]",
"return join(self.path, \"story.yaml\") class Scene(): def __init__(self, path, state): scene_file = state[\"current_scene\"] self.load(path,",
"\"nsew\": return False # find new postion x = start_x y = start_y",
"return False return True def view(self, location): x = location[\"x\"] y = location[\"y\"]",
"if tiles[x] != \"#\": return False return True def view(self, location): x =",
"list(rows[y]) if len(tiles) <= x: return False if tiles[x] != \"#\": return False",
"class Story: def __init__(self, path): self.path = path self.author = None self.title =",
"def filename(self): return join(self.path, \"story.yaml\") class Scene(): def __init__(self, path, state): scene_file =",
"self.path = path self.author = None self.title = None self.scene = None self.synopsis",
"filename(self): return join(self.path, \"story.yaml\") class Scene(): def __init__(self, path, state): scene_file = state[\"current_scene\"]",
"x < 0 or y < 0: return False tiles = list(rows[y]) if",
"def look(self, state): seen = \"\\n\".join(self.items.keys()) if self.level not in state[\"seen\"]: state[\"seen\"].append(self.level) return",
"return False # find new postion x = start_x y = start_y rows",
"\"w\": x -= 1 if len(rows) <= y: return False if x <",
"None items = self.items.values() item_list = list(filter(lambda x: x[\"char\"] == char, items)) if",
"y -= 1 elif direction == \"e\": x += 1 elif direction ==",
"self.views.values(): if pview[\"x\"] == x and pview[\"y\"] == y: narration = pview[\"narration\"] return",
"path, scene_file): f = open(self.filename(path, scene_file), 'r') text = f.read() f.close() data =",
"and times if not type(times) is int: return False if not type(direction) is",
"def filename(self, path, scene_file): return join(path, \"{0}.yaml\".format(scene_file)) def disassemble_map(self): rows = list(reversed(self.scene_map.split(\"\\n\"))) disassembled",
"state[\"location\"][\"y\"] grid = self.disassemble_map() grid[y][x] = \"@\" if self.level in state[\"seen\"]: for item",
"= start_y rows = list(reversed(self.scene_map.split(\"\\n\"))) for i in range (0, times): if direction",
"False return True def view(self, location): x = location[\"x\"] y = location[\"y\"] narration",
"self.level not in state[\"seen\"]: return state, None items = self.items.values() item_list = list(filter(lambda"
] |
[
"reach this route ... table_name = \"my_table\" # Name of the table to",
"cls.table_name: raise RuntimeError(\"Routes using DBViewMixin must define `table_name`\") cls._db = ref(manager.db) manager.db.create_table(cls.table_name, primary_key=cls.table_primary_key)",
"classes. For example: >>> class MyView(APIView, DBMixin): ... name = \"my_view\" # Flask",
"using the attributes set at class-level. This class is intended to be mixed",
"(used to get a handle for the database object) :param blueprint: Current Flask",
"flask import Blueprint from rethinkdb.ast import Table from _weakref import ref from pysite.database",
"\"\"\" Set up the view by creating the table specified by the class",
"\"id\" # type: str @classmethod def setup(cls: \"DBMixin\", manager: \"pysite.route_manager.RouteManager\", blueprint: Blueprint): \"\"\"",
"= \"username\" # Primary key to set for this table This class will",
"# type: str table_primary_key = \"id\" # type: str @classmethod def setup(cls: \"DBMixin\",",
"table_name = \"my_table\" # Name of the table to create ... table_primary_key =",
"manager: \"pysite.route_manager.RouteManager\", blueprint: Blueprint): \"\"\" Set up the view by creating the table",
"not cls.table_name: raise RuntimeError(\"Routes using DBViewMixin must define `table_name`\") cls._db = ref(manager.db) manager.db.create_table(cls.table_name,",
"at class-level. This class is intended to be mixed in alongside one of",
"This class will also work with Websockets: >>> class MyWebsocket(WS, DBMixin): ... name",
"one of the other view classes. For example: >>> class MyView(APIView, DBMixin): ...",
"alongside one of the other view classes. For example: >>> class MyView(APIView, DBMixin):",
"MyWebsocket(WS, DBMixin): ... name = \"my_websocket\" ... path = \"/my_websocket\" ... table_name =",
"For example: >>> class MyView(APIView, DBMixin): ... name = \"my_view\" # Flask internal",
"... table_name = \"my_table\" ... table_primary_key = \"username\" You may omit `table_primary_key` and",
"Current Flask blueprint \"\"\" if hasattr(super(), \"setup\"): super().setup(manager, blueprint) # pragma: no cover",
"specified primary key using the attributes set at class-level. This class is intended",
"... table_name = \"my_table\" # Name of the table to create ... table_primary_key",
"specified by the class attributes - this will also deal with multiple inheritance",
"key to set for this table This class will also work with Websockets:",
"# type: str @classmethod def setup(cls: \"DBMixin\", manager: \"pysite.route_manager.RouteManager\", blueprint: Blueprint): \"\"\" Set",
"default column - \"id\". \"\"\" table_name = \"\" # type: str table_primary_key =",
"creating the table specified by the class attributes - this will also deal",
"deal with multiple inheritance by calling `super().setup()` as appropriate. :param manager: Instance of",
"`table_name`\") cls._db = ref(manager.db) manager.db.create_table(cls.table_name, primary_key=cls.table_primary_key) @property def table(self) -> Table: return self.db.query(self.table_name)",
"super().setup(manager, blueprint) # pragma: no cover if not cls.table_name: raise RuntimeError(\"Routes using DBViewMixin",
"@classmethod def setup(cls: \"DBMixin\", manager: \"pysite.route_manager.RouteManager\", blueprint: Blueprint): \"\"\" Set up the view",
"set for this table This class will also work with Websockets: >>> class",
"table specified by the class attributes - this will also deal with multiple",
"def setup(cls: \"DBMixin\", manager: \"pysite.route_manager.RouteManager\", blueprint: Blueprint): \"\"\" Set up the view by",
"this table This class will also work with Websockets: >>> class MyWebsocket(WS, DBMixin):",
"this will also deal with multiple inheritance by calling `super().setup()` as appropriate. :param",
"database object) :param blueprint: Current Flask blueprint \"\"\" if hasattr(super(), \"setup\"): super().setup(manager, blueprint)",
"\"setup\"): super().setup(manager, blueprint) # pragma: no cover if not cls.table_name: raise RuntimeError(\"Routes using",
"raise RuntimeError(\"Routes using DBViewMixin must define `table_name`\") cls._db = ref(manager.db) manager.db.create_table(cls.table_name, primary_key=cls.table_primary_key) @property",
"table_primary_key = \"username\" # Primary key to set for this table This class",
"table to create ... table_primary_key = \"username\" # Primary key to set for",
"# Flask internal name for this route ... path = \"/my_view\" # Actual",
"from rethinkdb.ast import Table from _weakref import ref from pysite.database import RethinkDB class",
"with Websockets: >>> class MyWebsocket(WS, DBMixin): ... name = \"my_websocket\" ... path =",
"manager: Instance of the current RouteManager (used to get a handle for the",
"= \"/my_websocket\" ... table_name = \"my_table\" ... table_primary_key = \"username\" You may omit",
"for classes that make use of RethinkDB. It can automatically create a table",
"the other view classes. For example: >>> class MyView(APIView, DBMixin): ... name =",
"from pysite.database import RethinkDB class DBMixin(): \"\"\" Mixin for classes that make use",
"class-level. This class is intended to be mixed in alongside one of the",
"= \"id\" # type: str @classmethod def setup(cls: \"DBMixin\", manager: \"pysite.route_manager.RouteManager\", blueprint: Blueprint):",
"\"/my_view\" # Actual URL path to reach this route ... table_name = \"my_table\"",
"multiple inheritance by calling `super().setup()` as appropriate. :param manager: Instance of the current",
"Flask blueprint \"\"\" if hasattr(super(), \"setup\"): super().setup(manager, blueprint) # pragma: no cover if",
"\"username\" # Primary key to set for this table This class will also",
"- \"id\". \"\"\" table_name = \"\" # type: str table_primary_key = \"id\" #",
"<reponame>schwartzadev/site<filename>pysite/mixins.py # coding=utf-8 from flask import Blueprint from rethinkdb.ast import Table from _weakref",
"up the view by creating the table specified by the class attributes -",
"coding=utf-8 from flask import Blueprint from rethinkdb.ast import Table from _weakref import ref",
"by creating the table specified by the class attributes - this will also",
"by calling `super().setup()` as appropriate. :param manager: Instance of the current RouteManager (used",
"\"/my_websocket\" ... table_name = \"my_table\" ... table_primary_key = \"username\" You may omit `table_primary_key`",
"Mixin for classes that make use of RethinkDB. It can automatically create a",
"Websockets: >>> class MyWebsocket(WS, DBMixin): ... name = \"my_websocket\" ... path = \"/my_websocket\"",
"route ... path = \"/my_view\" # Actual URL path to reach this route",
"be defaulted to RethinkDB's default column - \"id\". \"\"\" table_name = \"\" #",
"# Primary key to set for this table This class will also work",
"path = \"/my_view\" # Actual URL path to reach this route ... table_name",
"Instance of the current RouteManager (used to get a handle for the database",
"ref(manager.db) manager.db.create_table(cls.table_name, primary_key=cls.table_primary_key) @property def table(self) -> Table: return self.db.query(self.table_name) @property def db(self)",
"= \"my_table\" # Name of the table to create ... table_primary_key = \"username\"",
"class MyWebsocket(WS, DBMixin): ... name = \"my_websocket\" ... path = \"/my_websocket\" ... table_name",
"cover if not cls.table_name: raise RuntimeError(\"Routes using DBViewMixin must define `table_name`\") cls._db =",
"... path = \"/my_view\" # Actual URL path to reach this route ...",
"of the current RouteManager (used to get a handle for the database object)",
"mixed in alongside one of the other view classes. For example: >>> class",
"table_primary_key = \"username\" You may omit `table_primary_key` and it will be defaulted to",
"view classes. For example: >>> class MyView(APIView, DBMixin): ... name = \"my_view\" #",
"for this table This class will also work with Websockets: >>> class MyWebsocket(WS,",
"view by creating the table specified by the class attributes - this will",
"the table to create ... table_primary_key = \"username\" # Primary key to set",
"by the class attributes - this will also deal with multiple inheritance by",
"blueprint \"\"\" if hasattr(super(), \"setup\"): super().setup(manager, blueprint) # pragma: no cover if not",
"table_name = \"my_table\" ... table_primary_key = \"username\" You may omit `table_primary_key` and it",
"defaulted to RethinkDB's default column - \"id\". \"\"\" table_name = \"\" # type:",
"inheritance by calling `super().setup()` as appropriate. :param manager: Instance of the current RouteManager",
"class attributes - this will also deal with multiple inheritance by calling `super().setup()`",
"... table_primary_key = \"username\" # Primary key to set for this table This",
"\"\" # type: str table_primary_key = \"id\" # type: str @classmethod def setup(cls:",
"# coding=utf-8 from flask import Blueprint from rethinkdb.ast import Table from _weakref import",
"= \"\" # type: str table_primary_key = \"id\" # type: str @classmethod def",
"pragma: no cover if not cls.table_name: raise RuntimeError(\"Routes using DBViewMixin must define `table_name`\")",
"import Table from _weakref import ref from pysite.database import RethinkDB class DBMixin(): \"\"\"",
"the table specified by the class attributes - this will also deal with",
"make use of RethinkDB. It can automatically create a table with the specified",
"= \"my_websocket\" ... path = \"/my_websocket\" ... table_name = \"my_table\" ... table_primary_key =",
"if hasattr(super(), \"setup\"): super().setup(manager, blueprint) # pragma: no cover if not cls.table_name: raise",
"Flask internal name for this route ... path = \"/my_view\" # Actual URL",
"to set for this table This class will also work with Websockets: >>>",
"object) :param blueprint: Current Flask blueprint \"\"\" if hasattr(super(), \"setup\"): super().setup(manager, blueprint) #",
"def table(self) -> Table: return self.db.query(self.table_name) @property def db(self) -> RethinkDB: return self._db()",
"blueprint: Blueprint): \"\"\" Set up the view by creating the table specified by",
"... name = \"my_view\" # Flask internal name for this route ... path",
"classes that make use of RethinkDB. It can automatically create a table with",
"name = \"my_view\" # Flask internal name for this route ... path =",
"primary_key=cls.table_primary_key) @property def table(self) -> Table: return self.db.query(self.table_name) @property def db(self) -> RethinkDB:",
"that make use of RethinkDB. It can automatically create a table with the",
"route ... table_name = \"my_table\" # Name of the table to create ...",
"of the table to create ... table_primary_key = \"username\" # Primary key to",
"DBMixin(): \"\"\" Mixin for classes that make use of RethinkDB. It can automatically",
"create ... table_primary_key = \"username\" # Primary key to set for this table",
"name = \"my_websocket\" ... path = \"/my_websocket\" ... table_name = \"my_table\" ... table_primary_key",
"if not cls.table_name: raise RuntimeError(\"Routes using DBViewMixin must define `table_name`\") cls._db = ref(manager.db)",
"rethinkdb.ast import Table from _weakref import ref from pysite.database import RethinkDB class DBMixin():",
"\"my_view\" # Flask internal name for this route ... path = \"/my_view\" #",
"blueprint: Current Flask blueprint \"\"\" if hasattr(super(), \"setup\"): super().setup(manager, blueprint) # pragma: no",
"internal name for this route ... path = \"/my_view\" # Actual URL path",
"for the database object) :param blueprint: Current Flask blueprint \"\"\" if hasattr(super(), \"setup\"):",
"# Actual URL path to reach this route ... table_name = \"my_table\" #",
"Table from _weakref import ref from pysite.database import RethinkDB class DBMixin(): \"\"\" Mixin",
"DBViewMixin must define `table_name`\") cls._db = ref(manager.db) manager.db.create_table(cls.table_name, primary_key=cls.table_primary_key) @property def table(self) ->",
"example: >>> class MyView(APIView, DBMixin): ... name = \"my_view\" # Flask internal name",
"current RouteManager (used to get a handle for the database object) :param blueprint:",
"\"\"\" table_name = \"\" # type: str table_primary_key = \"id\" # type: str",
"this route ... path = \"/my_view\" # Actual URL path to reach this",
"table_name = \"\" # type: str table_primary_key = \"id\" # type: str @classmethod",
"ref from pysite.database import RethinkDB class DBMixin(): \"\"\" Mixin for classes that make",
"Blueprint from rethinkdb.ast import Table from _weakref import ref from pysite.database import RethinkDB",
"You may omit `table_primary_key` and it will be defaulted to RethinkDB's default column",
"\"DBMixin\", manager: \"pysite.route_manager.RouteManager\", blueprint: Blueprint): \"\"\" Set up the view by creating the",
"automatically create a table with the specified primary key using the attributes set",
"will also deal with multiple inheritance by calling `super().setup()` as appropriate. :param manager:",
"\"pysite.route_manager.RouteManager\", blueprint: Blueprint): \"\"\" Set up the view by creating the table specified",
"- this will also deal with multiple inheritance by calling `super().setup()` as appropriate.",
"omit `table_primary_key` and it will be defaulted to RethinkDB's default column - \"id\".",
"name for this route ... path = \"/my_view\" # Actual URL path to",
"type: str @classmethod def setup(cls: \"DBMixin\", manager: \"pysite.route_manager.RouteManager\", blueprint: Blueprint): \"\"\" Set up",
"str table_primary_key = \"id\" # type: str @classmethod def setup(cls: \"DBMixin\", manager: \"pysite.route_manager.RouteManager\",",
"RethinkDB class DBMixin(): \"\"\" Mixin for classes that make use of RethinkDB. It",
"class will also work with Websockets: >>> class MyWebsocket(WS, DBMixin): ... name =",
"column - \"id\". \"\"\" table_name = \"\" # type: str table_primary_key = \"id\"",
"also work with Websockets: >>> class MyWebsocket(WS, DBMixin): ... name = \"my_websocket\" ...",
"\"my_websocket\" ... path = \"/my_websocket\" ... table_name = \"my_table\" ... table_primary_key = \"username\"",
"to RethinkDB's default column - \"id\". \"\"\" table_name = \"\" # type: str",
"will be defaulted to RethinkDB's default column - \"id\". \"\"\" table_name = \"\"",
"a handle for the database object) :param blueprint: Current Flask blueprint \"\"\" if",
"\"\"\" if hasattr(super(), \"setup\"): super().setup(manager, blueprint) # pragma: no cover if not cls.table_name:",
"the database object) :param blueprint: Current Flask blueprint \"\"\" if hasattr(super(), \"setup\"): super().setup(manager,",
"table with the specified primary key using the attributes set at class-level. This",
"may omit `table_primary_key` and it will be defaulted to RethinkDB's default column -",
"Name of the table to create ... table_primary_key = \"username\" # Primary key",
"attributes set at class-level. This class is intended to be mixed in alongside",
"class MyView(APIView, DBMixin): ... name = \"my_view\" # Flask internal name for this",
"DBMixin): ... name = \"my_websocket\" ... path = \"/my_websocket\" ... table_name = \"my_table\"",
"It can automatically create a table with the specified primary key using the",
"to create ... table_primary_key = \"username\" # Primary key to set for this",
"the class attributes - this will also deal with multiple inheritance by calling",
"# Name of the table to create ... table_primary_key = \"username\" # Primary",
"calling `super().setup()` as appropriate. :param manager: Instance of the current RouteManager (used to",
"\"my_table\" ... table_primary_key = \"username\" You may omit `table_primary_key` and it will be",
"manager.db.create_table(cls.table_name, primary_key=cls.table_primary_key) @property def table(self) -> Table: return self.db.query(self.table_name) @property def db(self) ->",
"get a handle for the database object) :param blueprint: Current Flask blueprint \"\"\"",
"This class is intended to be mixed in alongside one of the other",
"can automatically create a table with the specified primary key using the attributes",
"with the specified primary key using the attributes set at class-level. This class",
"= \"my_view\" # Flask internal name for this route ... path = \"/my_view\"",
"cls._db = ref(manager.db) manager.db.create_table(cls.table_name, primary_key=cls.table_primary_key) @property def table(self) -> Table: return self.db.query(self.table_name) @property",
"Primary key to set for this table This class will also work with",
"as appropriate. :param manager: Instance of the current RouteManager (used to get a",
"use of RethinkDB. It can automatically create a table with the specified primary",
"= \"username\" You may omit `table_primary_key` and it will be defaulted to RethinkDB's",
"a table with the specified primary key using the attributes set at class-level.",
"table This class will also work with Websockets: >>> class MyWebsocket(WS, DBMixin): ...",
"Actual URL path to reach this route ... table_name = \"my_table\" # Name",
"import RethinkDB class DBMixin(): \"\"\" Mixin for classes that make use of RethinkDB.",
"\"my_table\" # Name of the table to create ... table_primary_key = \"username\" #",
"in alongside one of the other view classes. For example: >>> class MyView(APIView,",
"MyView(APIView, DBMixin): ... name = \"my_view\" # Flask internal name for this route",
"blueprint) # pragma: no cover if not cls.table_name: raise RuntimeError(\"Routes using DBViewMixin must",
"... table_primary_key = \"username\" You may omit `table_primary_key` and it will be defaulted",
"RethinkDB's default column - \"id\". \"\"\" table_name = \"\" # type: str table_primary_key",
"is intended to be mixed in alongside one of the other view classes.",
"be mixed in alongside one of the other view classes. For example: >>>",
"using DBViewMixin must define `table_name`\") cls._db = ref(manager.db) manager.db.create_table(cls.table_name, primary_key=cls.table_primary_key) @property def table(self)",
"RouteManager (used to get a handle for the database object) :param blueprint: Current",
"with multiple inheritance by calling `super().setup()` as appropriate. :param manager: Instance of the",
"= \"my_table\" ... table_primary_key = \"username\" You may omit `table_primary_key` and it will",
"RuntimeError(\"Routes using DBViewMixin must define `table_name`\") cls._db = ref(manager.db) manager.db.create_table(cls.table_name, primary_key=cls.table_primary_key) @property def",
"work with Websockets: >>> class MyWebsocket(WS, DBMixin): ... name = \"my_websocket\" ... path",
"from _weakref import ref from pysite.database import RethinkDB class DBMixin(): \"\"\" Mixin for",
"\"\"\" Mixin for classes that make use of RethinkDB. It can automatically create",
"hasattr(super(), \"setup\"): super().setup(manager, blueprint) # pragma: no cover if not cls.table_name: raise RuntimeError(\"Routes",
"of the other view classes. For example: >>> class MyView(APIView, DBMixin): ... name",
"pysite.database import RethinkDB class DBMixin(): \"\"\" Mixin for classes that make use of",
"of RethinkDB. It can automatically create a table with the specified primary key",
"primary key using the attributes set at class-level. This class is intended to",
"DBMixin): ... name = \"my_view\" # Flask internal name for this route ...",
"`super().setup()` as appropriate. :param manager: Instance of the current RouteManager (used to get",
":param blueprint: Current Flask blueprint \"\"\" if hasattr(super(), \"setup\"): super().setup(manager, blueprint) # pragma:",
"will also work with Websockets: >>> class MyWebsocket(WS, DBMixin): ... name = \"my_websocket\"",
"no cover if not cls.table_name: raise RuntimeError(\"Routes using DBViewMixin must define `table_name`\") cls._db",
"define `table_name`\") cls._db = ref(manager.db) manager.db.create_table(cls.table_name, primary_key=cls.table_primary_key) @property def table(self) -> Table: return",
"this route ... table_name = \"my_table\" # Name of the table to create",
"= \"/my_view\" # Actual URL path to reach this route ... table_name =",
"also deal with multiple inheritance by calling `super().setup()` as appropriate. :param manager: Instance",
">>> class MyWebsocket(WS, DBMixin): ... name = \"my_websocket\" ... path = \"/my_websocket\" ...",
"`table_primary_key` and it will be defaulted to RethinkDB's default column - \"id\". \"\"\"",
"key using the attributes set at class-level. This class is intended to be",
">>> class MyView(APIView, DBMixin): ... name = \"my_view\" # Flask internal name for",
"URL path to reach this route ... table_name = \"my_table\" # Name of",
"Set up the view by creating the table specified by the class attributes",
"path = \"/my_websocket\" ... table_name = \"my_table\" ... table_primary_key = \"username\" You may",
"path to reach this route ... table_name = \"my_table\" # Name of the",
"table_primary_key = \"id\" # type: str @classmethod def setup(cls: \"DBMixin\", manager: \"pysite.route_manager.RouteManager\", blueprint:",
"Blueprint): \"\"\" Set up the view by creating the table specified by the",
"import ref from pysite.database import RethinkDB class DBMixin(): \"\"\" Mixin for classes that",
"@property def table(self) -> Table: return self.db.query(self.table_name) @property def db(self) -> RethinkDB: return",
"handle for the database object) :param blueprint: Current Flask blueprint \"\"\" if hasattr(super(),",
"RethinkDB. It can automatically create a table with the specified primary key using",
"class is intended to be mixed in alongside one of the other view",
"other view classes. For example: >>> class MyView(APIView, DBMixin): ... name = \"my_view\"",
"\"id\". \"\"\" table_name = \"\" # type: str table_primary_key = \"id\" # type:",
"setup(cls: \"DBMixin\", manager: \"pysite.route_manager.RouteManager\", blueprint: Blueprint): \"\"\" Set up the view by creating",
"_weakref import ref from pysite.database import RethinkDB class DBMixin(): \"\"\" Mixin for classes",
":param manager: Instance of the current RouteManager (used to get a handle for",
"for this route ... path = \"/my_view\" # Actual URL path to reach",
"= ref(manager.db) manager.db.create_table(cls.table_name, primary_key=cls.table_primary_key) @property def table(self) -> Table: return self.db.query(self.table_name) @property def",
"and it will be defaulted to RethinkDB's default column - \"id\". \"\"\" table_name",
"the view by creating the table specified by the class attributes - this",
"the specified primary key using the attributes set at class-level. This class is",
"... name = \"my_websocket\" ... path = \"/my_websocket\" ... table_name = \"my_table\" ...",
"create a table with the specified primary key using the attributes set at",
"to get a handle for the database object) :param blueprint: Current Flask blueprint",
"... path = \"/my_websocket\" ... table_name = \"my_table\" ... table_primary_key = \"username\" You",
"import Blueprint from rethinkdb.ast import Table from _weakref import ref from pysite.database import",
"the attributes set at class-level. This class is intended to be mixed in",
"intended to be mixed in alongside one of the other view classes. For",
"set at class-level. This class is intended to be mixed in alongside one",
"type: str table_primary_key = \"id\" # type: str @classmethod def setup(cls: \"DBMixin\", manager:",
"to reach this route ... table_name = \"my_table\" # Name of the table",
"class DBMixin(): \"\"\" Mixin for classes that make use of RethinkDB. It can",
"it will be defaulted to RethinkDB's default column - \"id\". \"\"\" table_name =",
"str @classmethod def setup(cls: \"DBMixin\", manager: \"pysite.route_manager.RouteManager\", blueprint: Blueprint): \"\"\" Set up the",
"\"username\" You may omit `table_primary_key` and it will be defaulted to RethinkDB's default",
"attributes - this will also deal with multiple inheritance by calling `super().setup()` as",
"# pragma: no cover if not cls.table_name: raise RuntimeError(\"Routes using DBViewMixin must define",
"the current RouteManager (used to get a handle for the database object) :param",
"from flask import Blueprint from rethinkdb.ast import Table from _weakref import ref from",
"must define `table_name`\") cls._db = ref(manager.db) manager.db.create_table(cls.table_name, primary_key=cls.table_primary_key) @property def table(self) -> Table:",
"to be mixed in alongside one of the other view classes. For example:",
"appropriate. :param manager: Instance of the current RouteManager (used to get a handle"
] |
[
"= HTTPBasicAuth(FABRIC_API_LOGIN, FABRIC_API_PASSWORD) def get_analysis(analysis_id=None, genome_id=None): \"\"\"Use the Omicia API to get an",
"def main(): \"\"\"Main function. Get analyses or one analysis by ID. \"\"\" parser",
"Example usages: python get_analysis.py --id 1802 python get_analysis.py \"\"\" import os import requests",
"= \"{}/analysis/{}/\" url = url.format(FABRIC_API_URL, analysis_id) else: url = \"{}/analysis\" url = url.format(FABRIC_API_URL,",
"os import requests from requests.auth import HTTPBasicAuth import sys import simplejson as json",
"type=int) parser.add_argument('--genome_id', metavar='genome_id', type=int) args = parser.parse_args() analysis_id = args.id genome_id = args.genome_id",
"parameters if \"FABRIC_API_PASSWORD\" not in os.environ: sys.exit(\"FABRIC_API_PASSWORD environment variable missing\") if \"FABRIC_API_LOGIN\" not",
"parser.add_argument('--id', metavar='analysis_id', type=int) parser.add_argument('--genome_id', metavar='genome_id', type=int) args = parser.parse_args() analysis_id = args.id genome_id",
"url = \"{}/analysis\" url = url.format(FABRIC_API_URL, analysis_id) if genome_id: url = '{}?genome_id={}'.format(url, genome_id)",
"= os.environ.get('FABRIC_API_URL', 'https://api.fabricgenomics.com') auth = HTTPBasicAuth(FABRIC_API_LOGIN, FABRIC_API_PASSWORD) def get_analysis(analysis_id=None, genome_id=None): \"\"\"Use the Omicia",
"if genome_id: url = '{}?genome_id={}'.format(url, genome_id) sys.stdout.flush() result = requests.get(url, auth=auth) return result.json()",
"sys import simplejson as json import argparse # Load environment variables for request",
"\"\"\" # Construct request if analysis_id: url = \"{}/analysis/{}/\" url = url.format(FABRIC_API_URL, analysis_id)",
"metavar='analysis_id', type=int) parser.add_argument('--genome_id', metavar='genome_id', type=int) args = parser.parse_args() analysis_id = args.id genome_id =",
"API to get an analysis \"\"\" # Construct request if analysis_id: url =",
"all analyses in the workspace. Example usages: python get_analysis.py --id 1802 python get_analysis.py",
"Omicia API to get an analysis \"\"\" # Construct request if analysis_id: url",
"environment variable missing\") if \"FABRIC_API_LOGIN\" not in os.environ: sys.exit(\"FABRIC_API_LOGIN environment variable missing\") FABRIC_API_LOGIN",
"by ID. \"\"\" parser = argparse.ArgumentParser(description='Fetch a Variant, VAAST or Phevor Report') parser.add_argument('--id',",
"get_analysis.py \"\"\" import os import requests from requests.auth import HTTPBasicAuth import sys import",
"parser = argparse.ArgumentParser(description='Fetch a Variant, VAAST or Phevor Report') parser.add_argument('--id', metavar='analysis_id', type=int) parser.add_argument('--genome_id',",
"genome_id=None): \"\"\"Use the Omicia API to get an analysis \"\"\" # Construct request",
"import argparse # Load environment variables for request authentication parameters if \"FABRIC_API_PASSWORD\" not",
"= os.environ['FABRIC_API_LOGIN'] FABRIC_API_PASSWORD = os.environ['FABRIC_API_PASSWORD'] FABRIC_API_URL = os.environ.get('FABRIC_API_URL', 'https://api.fabricgenomics.com') auth = HTTPBasicAuth(FABRIC_API_LOGIN, FABRIC_API_PASSWORD)",
"args = parser.parse_args() analysis_id = args.id genome_id = args.genome_id json_response = get_analysis(analysis_id=analysis_id, genome_id=genome_id)",
"FABRIC_API_LOGIN = os.environ['FABRIC_API_LOGIN'] FABRIC_API_PASSWORD = os.environ['FABRIC_API_PASSWORD'] FABRIC_API_URL = os.environ.get('FABRIC_API_URL', 'https://api.fabricgenomics.com') auth = HTTPBasicAuth(FABRIC_API_LOGIN,",
"\"FABRIC_API_LOGIN\" not in os.environ: sys.exit(\"FABRIC_API_LOGIN environment variable missing\") FABRIC_API_LOGIN = os.environ['FABRIC_API_LOGIN'] FABRIC_API_PASSWORD =",
"or all analyses in the workspace. Example usages: python get_analysis.py --id 1802 python",
"\"\"\"Main function. Get analyses or one analysis by ID. \"\"\" parser = argparse.ArgumentParser(description='Fetch",
"'https://api.fabricgenomics.com') auth = HTTPBasicAuth(FABRIC_API_LOGIN, FABRIC_API_PASSWORD) def get_analysis(analysis_id=None, genome_id=None): \"\"\"Use the Omicia API to",
"url = '{}?genome_id={}'.format(url, genome_id) sys.stdout.flush() result = requests.get(url, auth=auth) return result.json() def main():",
"in the workspace. Example usages: python get_analysis.py --id 1802 python get_analysis.py \"\"\" import",
"\"\"\"Use the Omicia API to get an analysis \"\"\" # Construct request if",
"requests.get(url, auth=auth) return result.json() def main(): \"\"\"Main function. Get analyses or one analysis",
"requests.auth import HTTPBasicAuth import sys import simplejson as json import argparse # Load",
"os.environ['FABRIC_API_LOGIN'] FABRIC_API_PASSWORD = os.environ['FABRIC_API_PASSWORD'] FABRIC_API_URL = os.environ.get('FABRIC_API_URL', 'https://api.fabricgenomics.com') auth = HTTPBasicAuth(FABRIC_API_LOGIN, FABRIC_API_PASSWORD) def",
"# Construct request if analysis_id: url = \"{}/analysis/{}/\" url = url.format(FABRIC_API_URL, analysis_id) else:",
"analysis by ID. \"\"\" parser = argparse.ArgumentParser(description='Fetch a Variant, VAAST or Phevor Report')",
"= args.id genome_id = args.genome_id json_response = get_analysis(analysis_id=analysis_id, genome_id=genome_id) sys.stdout.write(json.dumps(json_response, indent=4)) if __name__",
"os.environ.get('FABRIC_API_URL', 'https://api.fabricgenomics.com') auth = HTTPBasicAuth(FABRIC_API_LOGIN, FABRIC_API_PASSWORD) def get_analysis(analysis_id=None, genome_id=None): \"\"\"Use the Omicia API",
"the Omicia API to get an analysis \"\"\" # Construct request if analysis_id:",
"argparse # Load environment variables for request authentication parameters if \"FABRIC_API_PASSWORD\" not in",
"variables for request authentication parameters if \"FABRIC_API_PASSWORD\" not in os.environ: sys.exit(\"FABRIC_API_PASSWORD environment variable",
"missing\") FABRIC_API_LOGIN = os.environ['FABRIC_API_LOGIN'] FABRIC_API_PASSWORD = os.environ['FABRIC_API_PASSWORD'] FABRIC_API_URL = os.environ.get('FABRIC_API_URL', 'https://api.fabricgenomics.com') auth =",
"workspace. Example usages: python get_analysis.py --id 1802 python get_analysis.py \"\"\" import os import",
"analysis_id) else: url = \"{}/analysis\" url = url.format(FABRIC_API_URL, analysis_id) if genome_id: url =",
"VAAST or Phevor Report') parser.add_argument('--id', metavar='analysis_id', type=int) parser.add_argument('--genome_id', metavar='genome_id', type=int) args = parser.parse_args()",
"else: url = \"{}/analysis\" url = url.format(FABRIC_API_URL, analysis_id) if genome_id: url = '{}?genome_id={}'.format(url,",
"os.environ: sys.exit(\"FABRIC_API_LOGIN environment variable missing\") FABRIC_API_LOGIN = os.environ['FABRIC_API_LOGIN'] FABRIC_API_PASSWORD = os.environ['FABRIC_API_PASSWORD'] FABRIC_API_URL =",
"url = \"{}/analysis/{}/\" url = url.format(FABRIC_API_URL, analysis_id) else: url = \"{}/analysis\" url =",
"Construct request if analysis_id: url = \"{}/analysis/{}/\" url = url.format(FABRIC_API_URL, analysis_id) else: url",
"url.format(FABRIC_API_URL, analysis_id) else: url = \"{}/analysis\" url = url.format(FABRIC_API_URL, analysis_id) if genome_id: url",
"analyses or one analysis by ID. \"\"\" parser = argparse.ArgumentParser(description='Fetch a Variant, VAAST",
"ID. \"\"\" parser = argparse.ArgumentParser(description='Fetch a Variant, VAAST or Phevor Report') parser.add_argument('--id', metavar='analysis_id',",
"= parser.parse_args() analysis_id = args.id genome_id = args.genome_id json_response = get_analysis(analysis_id=analysis_id, genome_id=genome_id) sys.stdout.write(json.dumps(json_response,",
"environment variable missing\") FABRIC_API_LOGIN = os.environ['FABRIC_API_LOGIN'] FABRIC_API_PASSWORD = os.environ['FABRIC_API_PASSWORD'] FABRIC_API_URL = os.environ.get('FABRIC_API_URL', 'https://api.fabricgenomics.com')",
"to get an analysis \"\"\" # Construct request if analysis_id: url = \"{}/analysis/{}/\"",
"auth=auth) return result.json() def main(): \"\"\"Main function. Get analyses or one analysis by",
"\"\"\"Get an analysis, or all analyses in the workspace. Example usages: python get_analysis.py",
"result = requests.get(url, auth=auth) return result.json() def main(): \"\"\"Main function. Get analyses or",
"HTTPBasicAuth import sys import simplejson as json import argparse # Load environment variables",
"simplejson as json import argparse # Load environment variables for request authentication parameters",
"= url.format(FABRIC_API_URL, analysis_id) else: url = \"{}/analysis\" url = url.format(FABRIC_API_URL, analysis_id) if genome_id:",
"request if analysis_id: url = \"{}/analysis/{}/\" url = url.format(FABRIC_API_URL, analysis_id) else: url =",
"metavar='genome_id', type=int) args = parser.parse_args() analysis_id = args.id genome_id = args.genome_id json_response =",
"sys.exit(\"FABRIC_API_PASSWORD environment variable missing\") if \"FABRIC_API_LOGIN\" not in os.environ: sys.exit(\"FABRIC_API_LOGIN environment variable missing\")",
"import os import requests from requests.auth import HTTPBasicAuth import sys import simplejson as",
"analysis_id: url = \"{}/analysis/{}/\" url = url.format(FABRIC_API_URL, analysis_id) else: url = \"{}/analysis\" url",
"if \"FABRIC_API_PASSWORD\" not in os.environ: sys.exit(\"FABRIC_API_PASSWORD environment variable missing\") if \"FABRIC_API_LOGIN\" not in",
"an analysis \"\"\" # Construct request if analysis_id: url = \"{}/analysis/{}/\" url =",
"python get_analysis.py \"\"\" import os import requests from requests.auth import HTTPBasicAuth import sys",
"variable missing\") if \"FABRIC_API_LOGIN\" not in os.environ: sys.exit(\"FABRIC_API_LOGIN environment variable missing\") FABRIC_API_LOGIN =",
"url = url.format(FABRIC_API_URL, analysis_id) else: url = \"{}/analysis\" url = url.format(FABRIC_API_URL, analysis_id) if",
"get_analysis.py --id 1802 python get_analysis.py \"\"\" import os import requests from requests.auth import",
"authentication parameters if \"FABRIC_API_PASSWORD\" not in os.environ: sys.exit(\"FABRIC_API_PASSWORD environment variable missing\") if \"FABRIC_API_LOGIN\"",
"argparse.ArgumentParser(description='Fetch a Variant, VAAST or Phevor Report') parser.add_argument('--id', metavar='analysis_id', type=int) parser.add_argument('--genome_id', metavar='genome_id', type=int)",
"genome_id) sys.stdout.flush() result = requests.get(url, auth=auth) return result.json() def main(): \"\"\"Main function. Get",
"variable missing\") FABRIC_API_LOGIN = os.environ['FABRIC_API_LOGIN'] FABRIC_API_PASSWORD = os.environ['FABRIC_API_PASSWORD'] FABRIC_API_URL = os.environ.get('FABRIC_API_URL', 'https://api.fabricgenomics.com') auth",
"if analysis_id: url = \"{}/analysis/{}/\" url = url.format(FABRIC_API_URL, analysis_id) else: url = \"{}/analysis\"",
"sys.stdout.flush() result = requests.get(url, auth=auth) return result.json() def main(): \"\"\"Main function. Get analyses",
"Report') parser.add_argument('--id', metavar='analysis_id', type=int) parser.add_argument('--genome_id', metavar='genome_id', type=int) args = parser.parse_args() analysis_id = args.id",
"get_analysis(analysis_id=None, genome_id=None): \"\"\"Use the Omicia API to get an analysis \"\"\" # Construct",
"= '{}?genome_id={}'.format(url, genome_id) sys.stdout.flush() result = requests.get(url, auth=auth) return result.json() def main(): \"\"\"Main",
"\"{}/analysis\" url = url.format(FABRIC_API_URL, analysis_id) if genome_id: url = '{}?genome_id={}'.format(url, genome_id) sys.stdout.flush() result",
"type=int) args = parser.parse_args() analysis_id = args.id genome_id = args.genome_id json_response = get_analysis(analysis_id=analysis_id,",
"os.environ['FABRIC_API_PASSWORD'] FABRIC_API_URL = os.environ.get('FABRIC_API_URL', 'https://api.fabricgenomics.com') auth = HTTPBasicAuth(FABRIC_API_LOGIN, FABRIC_API_PASSWORD) def get_analysis(analysis_id=None, genome_id=None): \"\"\"Use",
"analysis_id) if genome_id: url = '{}?genome_id={}'.format(url, genome_id) sys.stdout.flush() result = requests.get(url, auth=auth) return",
"result.json() def main(): \"\"\"Main function. Get analyses or one analysis by ID. \"\"\"",
"function. Get analyses or one analysis by ID. \"\"\" parser = argparse.ArgumentParser(description='Fetch a",
"1802 python get_analysis.py \"\"\" import os import requests from requests.auth import HTTPBasicAuth import",
"args.id genome_id = args.genome_id json_response = get_analysis(analysis_id=analysis_id, genome_id=genome_id) sys.stdout.write(json.dumps(json_response, indent=4)) if __name__ ==",
"sys.exit(\"FABRIC_API_LOGIN environment variable missing\") FABRIC_API_LOGIN = os.environ['FABRIC_API_LOGIN'] FABRIC_API_PASSWORD = os.environ['FABRIC_API_PASSWORD'] FABRIC_API_URL = os.environ.get('FABRIC_API_URL',",
"get an analysis \"\"\" # Construct request if analysis_id: url = \"{}/analysis/{}/\" url",
"one analysis by ID. \"\"\" parser = argparse.ArgumentParser(description='Fetch a Variant, VAAST or Phevor",
"FABRIC_API_PASSWORD = os.environ['FABRIC_API_PASSWORD'] FABRIC_API_URL = os.environ.get('FABRIC_API_URL', 'https://api.fabricgenomics.com') auth = HTTPBasicAuth(FABRIC_API_LOGIN, FABRIC_API_PASSWORD) def get_analysis(analysis_id=None,",
"or one analysis by ID. \"\"\" parser = argparse.ArgumentParser(description='Fetch a Variant, VAAST or",
"import requests from requests.auth import HTTPBasicAuth import sys import simplejson as json import",
"auth = HTTPBasicAuth(FABRIC_API_LOGIN, FABRIC_API_PASSWORD) def get_analysis(analysis_id=None, genome_id=None): \"\"\"Use the Omicia API to get",
"--id 1802 python get_analysis.py \"\"\" import os import requests from requests.auth import HTTPBasicAuth",
"requests from requests.auth import HTTPBasicAuth import sys import simplejson as json import argparse",
"'{}?genome_id={}'.format(url, genome_id) sys.stdout.flush() result = requests.get(url, auth=auth) return result.json() def main(): \"\"\"Main function.",
"python get_analysis.py --id 1802 python get_analysis.py \"\"\" import os import requests from requests.auth",
"json import argparse # Load environment variables for request authentication parameters if \"FABRIC_API_PASSWORD\"",
"Get analyses or one analysis by ID. \"\"\" parser = argparse.ArgumentParser(description='Fetch a Variant,",
"request authentication parameters if \"FABRIC_API_PASSWORD\" not in os.environ: sys.exit(\"FABRIC_API_PASSWORD environment variable missing\") if",
"from requests.auth import HTTPBasicAuth import sys import simplejson as json import argparse #",
"Load environment variables for request authentication parameters if \"FABRIC_API_PASSWORD\" not in os.environ: sys.exit(\"FABRIC_API_PASSWORD",
"= argparse.ArgumentParser(description='Fetch a Variant, VAAST or Phevor Report') parser.add_argument('--id', metavar='analysis_id', type=int) parser.add_argument('--genome_id', metavar='genome_id',",
"not in os.environ: sys.exit(\"FABRIC_API_LOGIN environment variable missing\") FABRIC_API_LOGIN = os.environ['FABRIC_API_LOGIN'] FABRIC_API_PASSWORD = os.environ['FABRIC_API_PASSWORD']",
"def get_analysis(analysis_id=None, genome_id=None): \"\"\"Use the Omicia API to get an analysis \"\"\" #",
"genome_id: url = '{}?genome_id={}'.format(url, genome_id) sys.stdout.flush() result = requests.get(url, auth=auth) return result.json() def",
"analyses in the workspace. Example usages: python get_analysis.py --id 1802 python get_analysis.py \"\"\"",
"= requests.get(url, auth=auth) return result.json() def main(): \"\"\"Main function. Get analyses or one",
"if \"FABRIC_API_LOGIN\" not in os.environ: sys.exit(\"FABRIC_API_LOGIN environment variable missing\") FABRIC_API_LOGIN = os.environ['FABRIC_API_LOGIN'] FABRIC_API_PASSWORD",
"in os.environ: sys.exit(\"FABRIC_API_PASSWORD environment variable missing\") if \"FABRIC_API_LOGIN\" not in os.environ: sys.exit(\"FABRIC_API_LOGIN environment",
"\"{}/analysis/{}/\" url = url.format(FABRIC_API_URL, analysis_id) else: url = \"{}/analysis\" url = url.format(FABRIC_API_URL, analysis_id)",
"a Variant, VAAST or Phevor Report') parser.add_argument('--id', metavar='analysis_id', type=int) parser.add_argument('--genome_id', metavar='genome_id', type=int) args",
"main(): \"\"\"Main function. Get analyses or one analysis by ID. \"\"\" parser =",
"FABRIC_API_URL = os.environ.get('FABRIC_API_URL', 'https://api.fabricgenomics.com') auth = HTTPBasicAuth(FABRIC_API_LOGIN, FABRIC_API_PASSWORD) def get_analysis(analysis_id=None, genome_id=None): \"\"\"Use the",
"not in os.environ: sys.exit(\"FABRIC_API_PASSWORD environment variable missing\") if \"FABRIC_API_LOGIN\" not in os.environ: sys.exit(\"FABRIC_API_LOGIN",
"missing\") if \"FABRIC_API_LOGIN\" not in os.environ: sys.exit(\"FABRIC_API_LOGIN environment variable missing\") FABRIC_API_LOGIN = os.environ['FABRIC_API_LOGIN']",
"parser.parse_args() analysis_id = args.id genome_id = args.genome_id json_response = get_analysis(analysis_id=analysis_id, genome_id=genome_id) sys.stdout.write(json.dumps(json_response, indent=4))",
"= args.genome_id json_response = get_analysis(analysis_id=analysis_id, genome_id=genome_id) sys.stdout.write(json.dumps(json_response, indent=4)) if __name__ == \"__main__\": main()",
"analysis, or all analyses in the workspace. Example usages: python get_analysis.py --id 1802",
"\"\"\" parser = argparse.ArgumentParser(description='Fetch a Variant, VAAST or Phevor Report') parser.add_argument('--id', metavar='analysis_id', type=int)",
"genome_id = args.genome_id json_response = get_analysis(analysis_id=analysis_id, genome_id=genome_id) sys.stdout.write(json.dumps(json_response, indent=4)) if __name__ == \"__main__\":",
"Phevor Report') parser.add_argument('--id', metavar='analysis_id', type=int) parser.add_argument('--genome_id', metavar='genome_id', type=int) args = parser.parse_args() analysis_id =",
"import HTTPBasicAuth import sys import simplejson as json import argparse # Load environment",
"an analysis, or all analyses in the workspace. Example usages: python get_analysis.py --id",
"environment variables for request authentication parameters if \"FABRIC_API_PASSWORD\" not in os.environ: sys.exit(\"FABRIC_API_PASSWORD environment",
"analysis \"\"\" # Construct request if analysis_id: url = \"{}/analysis/{}/\" url = url.format(FABRIC_API_URL,",
"= \"{}/analysis\" url = url.format(FABRIC_API_URL, analysis_id) if genome_id: url = '{}?genome_id={}'.format(url, genome_id) sys.stdout.flush()",
"in os.environ: sys.exit(\"FABRIC_API_LOGIN environment variable missing\") FABRIC_API_LOGIN = os.environ['FABRIC_API_LOGIN'] FABRIC_API_PASSWORD = os.environ['FABRIC_API_PASSWORD'] FABRIC_API_URL",
"analysis_id = args.id genome_id = args.genome_id json_response = get_analysis(analysis_id=analysis_id, genome_id=genome_id) sys.stdout.write(json.dumps(json_response, indent=4)) if",
"for request authentication parameters if \"FABRIC_API_PASSWORD\" not in os.environ: sys.exit(\"FABRIC_API_PASSWORD environment variable missing\")",
"= os.environ['FABRIC_API_PASSWORD'] FABRIC_API_URL = os.environ.get('FABRIC_API_URL', 'https://api.fabricgenomics.com') auth = HTTPBasicAuth(FABRIC_API_LOGIN, FABRIC_API_PASSWORD) def get_analysis(analysis_id=None, genome_id=None):",
"import simplejson as json import argparse # Load environment variables for request authentication",
"url = url.format(FABRIC_API_URL, analysis_id) if genome_id: url = '{}?genome_id={}'.format(url, genome_id) sys.stdout.flush() result =",
"FABRIC_API_PASSWORD) def get_analysis(analysis_id=None, genome_id=None): \"\"\"Use the Omicia API to get an analysis \"\"\"",
"or Phevor Report') parser.add_argument('--id', metavar='analysis_id', type=int) parser.add_argument('--genome_id', metavar='genome_id', type=int) args = parser.parse_args() analysis_id",
"Variant, VAAST or Phevor Report') parser.add_argument('--id', metavar='analysis_id', type=int) parser.add_argument('--genome_id', metavar='genome_id', type=int) args =",
"<filename>python/AnalysisLaunchers/get_analysis.py<gh_stars>1-10 \"\"\"Get an analysis, or all analyses in the workspace. Example usages: python",
"import sys import simplejson as json import argparse # Load environment variables for",
"\"\"\" import os import requests from requests.auth import HTTPBasicAuth import sys import simplejson",
"as json import argparse # Load environment variables for request authentication parameters if",
"the workspace. Example usages: python get_analysis.py --id 1802 python get_analysis.py \"\"\" import os",
"= url.format(FABRIC_API_URL, analysis_id) if genome_id: url = '{}?genome_id={}'.format(url, genome_id) sys.stdout.flush() result = requests.get(url,",
"\"FABRIC_API_PASSWORD\" not in os.environ: sys.exit(\"FABRIC_API_PASSWORD environment variable missing\") if \"FABRIC_API_LOGIN\" not in os.environ:",
"HTTPBasicAuth(FABRIC_API_LOGIN, FABRIC_API_PASSWORD) def get_analysis(analysis_id=None, genome_id=None): \"\"\"Use the Omicia API to get an analysis",
"usages: python get_analysis.py --id 1802 python get_analysis.py \"\"\" import os import requests from",
"parser.add_argument('--genome_id', metavar='genome_id', type=int) args = parser.parse_args() analysis_id = args.id genome_id = args.genome_id json_response",
"# Load environment variables for request authentication parameters if \"FABRIC_API_PASSWORD\" not in os.environ:",
"return result.json() def main(): \"\"\"Main function. Get analyses or one analysis by ID.",
"os.environ: sys.exit(\"FABRIC_API_PASSWORD environment variable missing\") if \"FABRIC_API_LOGIN\" not in os.environ: sys.exit(\"FABRIC_API_LOGIN environment variable",
"url.format(FABRIC_API_URL, analysis_id) if genome_id: url = '{}?genome_id={}'.format(url, genome_id) sys.stdout.flush() result = requests.get(url, auth=auth)"
] |
[
"configspace) in enumerate( zip(benchmark.dev_trajectory, benchmark.configspace_trajectory), 1 ): if args.runtype.type == \"reference\" and step",
"task\") if step > 1 and args.runtype.type.startswith(\"eval_reference\"): trials_until_loss = reference_losses[step][f\"{args.runtype.dim_factor}_loss\"] logger.info( f\"Also performing",
"tf.random.set_seed(seed) @hydra.main(config_path=\"configs\", config_name=\"run\") def run(args): _set_seeds(args.seed) working_directory = Path().cwd() # Log general information",
"reference_losses def _get_trial_parameters(args, reference_losses, step): if step == 1 and args.runtype.type in [\"eval_dim\",",
"self, config_id, config, budget, working_directory, *args, **kwargs, ): task_identifier = kwargs[\"task_identifier\"] development_stage =",
"torch.manual_seed(seed) # tf.random.set_seed(seed) @hydra.main(config_path=\"configs\", config_name=\"run\") def run(args): _set_seeds(args.seed) working_directory = Path().cwd() # Log",
"self._previous_development_stage or self._previous_task_identifier != task_identifier ) if task_changed: # Only read task once",
"------- {step :04d}\") trials_per_task, trials_until_loss = _get_trial_parameters( args, reference_losses, step ) logger.info(f\"Using configspace\\n{configspace}\".rstrip())",
"None self._task = None # pylint: disable=unused-argument def compute( self, config_id, config, budget,",
"import random import time from pathlib import Path import hp_transfer_benchmarks # pylint: disable=unused-import",
"result_path = Path( hydra.utils.to_absolute_path(\"results\"), args.experiment_group, f\"results/{args.experiment_name.replace('/', ',')}.csv\", ) result_path.parent.mkdir(exist_ok=True, parents=True) with result_path.open(\"a\") as",
"* trials_per_task})\" ) else: trials_until_loss = None return trials_per_task, trials_until_loss def _write_batch_result(args, result_batch):",
") result_trajectory.insert(batch_result) class _HPOWorker(Worker): def __init__(self, benchmark, **kwargs): super().__init__(**kwargs) # Only read task",
") result_path.parent.mkdir(exist_ok=True, parents=True) with result_path.open(\"a\") as result_stream: result_stream.write(\"\\t\".join([str(value) for value in batch_result_row]) +",
"0: _run_master(args, benchmark, working_directory) else: _run_worker(args, benchmark, working_directory) logger.info(f\"Run finished\") if __name__ ==",
"result_trajectory, trials_per_task, trials_until_loss, args, ): do_transfer = args.approach.name.startswith(\"transfer\") previous_results = result_trajectory if do_transfer",
"= reference_losses[args.benchmark.name] reference_losses = reference_losses[str(args.benchmark.benchmark.trajectory_id)] reference_losses = reference_losses[str(args.benchmark.benchmark.adjustment_id)] return reference_losses def _get_trial_parameters(args, reference_losses,",
"configspace=configspace, task=task, n_iterations=trials_per_task, trials_until_loss=trials_until_loss, previous_results=previous_results, ) result_batch.insert(task_result, task) if step > 1: _write_batch_result(args,",
"and step == 1: continue logger.info(f\"Step ------- {step :04d}\") trials_per_task, trials_until_loss = _get_trial_parameters(",
"step): if step == 1 and args.runtype.type in [\"eval_dim\", \"eval_reference\"]: trials_per_task = args.runtype.dim_factor_pre_adjustment",
"logger.info(f\"Using working_directory={working_directory}\") with contextlib.suppress(TypeError): git_info = gitinfo.get_git_info() logger.info(f\"Commit hash: {git_info['commit']}\") logger.info(f\"Commit date: {git_info['author_date']}\")",
"def _read_reference_losses(args): reference_losses = None if args.runtype.type.startswith(\"eval_reference\"): reference_losses_path = hydra.utils.to_absolute_path(args.reference_losses_path) with Path(reference_losses_path).open(\"r\") as",
"and args.runtype.type in [\"eval_dim\", \"eval_reference\"]: trials_per_task = args.runtype.dim_factor_pre_adjustment else: trials_per_task = args.runtype.dim_factor logger.info(f\"Using",
"logging.config import random import time from pathlib import Path import hp_transfer_benchmarks # pylint:",
"hpns.NameServer( run_id=args.run_id, working_directory=str(working_directory), nic_name=args.nic_name, ) ns_host, ns_port = nameserver.start() # Start a background",
"nic_name=args.nic_name, ) ns_host, ns_port = nameserver.start() # Start a background worker for the",
"the master node w = _HPOWorker( benchmark, run_id=args.run_id, host=ns_host, nameserver=ns_host, nameserver_port=ns_port, logger=logging.getLogger(\"worker\"), )",
"hpns.nic_name_to_host(args.nic_name) w = _HPOWorker( benchmark, run_id=args.run_id, host=host, logger=logging.getLogger(\"worker\"), ) w.load_nameserver_credentials(working_directory=str(working_directory)) w.run(background=False) def _run_master(args,",
"if step == 1 and args.runtype.type in [\"eval_dim\", \"eval_reference\"]: trials_per_task = args.runtype.dim_factor_pre_adjustment else:",
"args.benchmark.benchmark.data_path ) benchmark = hydra.utils.instantiate(args.benchmark.benchmark) # Actually run if args.worker_id == 0: _run_master(args,",
"== 1 and args.runtype.type in [\"eval_dim\", \"eval_reference\"]: trials_per_task = args.runtype.dim_factor_pre_adjustment else: trials_per_task =",
") else: trials_until_loss = None return trials_per_task, trials_until_loss def _write_batch_result(args, result_batch): batch_result_row =",
") result_path = Path( hydra.utils.to_absolute_path(\"results\"), args.experiment_group, f\"results/{args.experiment_name.replace('/', ',')}.csv\", ) result_path.parent.mkdir(exist_ok=True, parents=True) with result_path.open(\"a\")",
"run_id=args.run_id, host=ns_host, nameserver=ns_host, nameserver_port=ns_port, logger=logging.getLogger(\"worker\"), ) w.run(background=True) # Create an optimizer optimizer =",
"def _run_worker(args, benchmark, working_directory): time.sleep(5) # short artificial delay to make sure the",
"host=ns_host, nameserver=ns_host, nameserver_port=ns_port, logger=logging.getLogger(\"worker\"), ) w.run(background=True) # Create an optimizer optimizer = hydra.utils.instantiate(",
"trials_until_loss = _get_trial_parameters( args, reference_losses, step ) logger.info(f\"Using configspace\\n{configspace}\".rstrip()) batch_result = _run_on_task_batch( optimizer,",
"result_batch def _train_and_eval(optimizer, benchmark, args): reference_losses = _read_reference_losses(args) result_trajectory = result_utils.TrajectoryResult() for step,",
"task=task, n_iterations=trials_per_task, trials_until_loss=trials_until_loss, previous_results=previous_results, ) result_batch.insert(task_result, task) if step > 1: _write_batch_result(args, result_batch)",
"\"eval_reference\"]: trials_per_task = args.runtype.dim_factor_pre_adjustment else: trials_per_task = args.runtype.dim_factor logger.info(f\"Using {trials_per_task} trials per task\")",
"result_trajectory = result_utils.TrajectoryResult() for step, (train_batch, configspace) in enumerate( zip(benchmark.dev_trajectory, benchmark.configspace_trajectory), 1 ):",
"step > 1 and args.runtype.type.startswith(\"eval_reference\"): trials_until_loss = reference_losses[step][f\"{args.runtype.dim_factor}_loss\"] logger.info( f\"Also performing trials until",
"= None return trials_per_task, trials_until_loss def _write_batch_result(args, result_batch): batch_result_row = get_batch_result_row( args.benchmark.name, args.runtype.dim_factor_pre_adjustment,",
"benchmark, working_directory) else: _run_worker(args, benchmark, working_directory) logger.info(f\"Run finished\") if __name__ == \"__main__\": run()",
"task_identifier self._previous_development_stage = development_stage self._task = self._benchmark.get_task_from_identifier( task_identifier, development_stage ) if \"development_step\" in",
"budget, working_directory, *args, **kwargs, ): task_identifier = kwargs[\"task_identifier\"] development_stage = kwargs[\"development_stage\"] task_changed =",
"time from pathlib import Path import hp_transfer_benchmarks # pylint: disable=unused-import import hp_transfer_optimizers #",
"once self._benchmark = benchmark self._previous_task_identifier = None self._previous_development_stage = None self._task = None",
"args, ): do_transfer = args.approach.name.startswith(\"transfer\") previous_results = result_trajectory if do_transfer else None result_batch",
"hp_transfer_benchmarks # pylint: disable=unused-import import hp_transfer_optimizers # pylint: disable=unused-import import hydra import numpy",
"== 1: continue logger.info(f\"Step ------- {step :04d}\") trials_per_task, trials_until_loss = _get_trial_parameters( args, reference_losses,",
"host = hpns.nic_name_to_host(args.nic_name) w = _HPOWorker( benchmark, run_id=args.run_id, host=host, logger=logging.getLogger(\"worker\"), ) w.load_nameserver_credentials(working_directory=str(working_directory)) w.run(background=False)",
"zip(benchmark.dev_trajectory, benchmark.configspace_trajectory), 1 ): if args.runtype.type == \"reference\" and step == 1: continue",
"= Path( hydra.utils.to_absolute_path(\"results\"), args.experiment_group, f\"results/{args.experiment_name.replace('/', ',')}.csv\", ) result_path.parent.mkdir(exist_ok=True, parents=True) with result_path.open(\"a\") as result_stream:",
"step, result_trajectory, trials_per_task, trials_until_loss, args, ) result_trajectory.insert(batch_result) class _HPOWorker(Worker): def __init__(self, benchmark, **kwargs):",
"# Create an optimizer optimizer = hydra.utils.instantiate( args.approach.approach, host=ns_host, nameserver=ns_host, nameserver_port=ns_port, logger=logging.getLogger(\"master\"), )",
"enumerate( zip(benchmark.dev_trajectory, benchmark.configspace_trajectory), 1 ): if args.runtype.type == \"reference\" and step == 1:",
"None return trials_per_task, trials_until_loss def _write_batch_result(args, result_batch): batch_result_row = get_batch_result_row( args.benchmark.name, args.runtype.dim_factor_pre_adjustment, args.approach.name,",
"args.approach.approach, host=ns_host, nameserver=ns_host, nameserver_port=ns_port, logger=logging.getLogger(\"master\"), ) # Train and evaluate the optimizer try:",
"def _set_seeds(seed): random.seed(seed) np.random.seed(seed) # torch.backends.cudnn.benchmark = False # torch.backends.cudnn.deterministic = True #",
"args.benchmark.benchmark.trajectory_id, args.benchmark.benchmark.adjustment_id, args.run_id, result_batch, ) result_path = Path( hydra.utils.to_absolute_path(\"results\"), args.experiment_group, f\"results/{args.experiment_name.replace('/', ',')}.csv\", )",
"args.benchmark.name, args.runtype.dim_factor_pre_adjustment, args.approach.name, args.benchmark.benchmark.trajectory_id, args.benchmark.benchmark.adjustment_id, args.run_id, result_batch, ) result_path = Path( hydra.utils.to_absolute_path(\"results\"), args.experiment_group,",
"the nameserver is already running host = hpns.nic_name_to_host(args.nic_name) w = _HPOWorker( benchmark, run_id=args.run_id,",
"trials_per_task = args.runtype.dim_factor logger.info(f\"Using {trials_per_task} trials per task\") if step > 1 and",
"node w = _HPOWorker( benchmark, run_id=args.run_id, host=ns_host, nameserver=ns_host, nameserver_port=ns_port, logger=logging.getLogger(\"worker\"), ) w.run(background=True) #",
"import Path import hp_transfer_benchmarks # pylint: disable=unused-import import hp_transfer_optimizers # pylint: disable=unused-import import",
"import nameserver as hpns from hp_transfer_optimizers.core import result as result_utils from hp_transfer_optimizers.core.worker import",
"logger.info(f\"Using configspace\\n{configspace}\".rstrip()) batch_result = _run_on_task_batch( optimizer, train_batch, configspace, step, result_trajectory, trials_per_task, trials_until_loss, args,",
"running host = hpns.nic_name_to_host(args.nic_name) w = _HPOWorker( benchmark, run_id=args.run_id, host=host, logger=logging.getLogger(\"worker\"), ) w.load_nameserver_credentials(working_directory=str(working_directory))",
"args) finally: optimizer.shutdown(shutdown_workers=True) nameserver.shutdown() def _set_seeds(seed): random.seed(seed) np.random.seed(seed) # torch.backends.cudnn.benchmark = False #",
"development_stage = kwargs[\"development_stage\"] task_changed = ( development_stage != self._previous_development_stage or self._previous_task_identifier != task_identifier",
"optimizer.shutdown(shutdown_workers=True) nameserver.shutdown() def _set_seeds(seed): random.seed(seed) np.random.seed(seed) # torch.backends.cudnn.benchmark = False # torch.backends.cudnn.deterministic =",
"as np import yaml from gitinfo import gitinfo from hp_transfer_optimizers.core import nameserver as",
"nameserver.start() # Start a background worker for the master node w = _HPOWorker(",
"read task once self._benchmark = benchmark self._previous_task_identifier = None self._previous_development_stage = None self._task",
"reference_losses = reference_losses[str(args.benchmark.benchmark.trajectory_id)] reference_losses = reference_losses[str(args.benchmark.benchmark.adjustment_id)] return reference_losses def _get_trial_parameters(args, reference_losses, step): if",
"nameserver_port=ns_port, logger=logging.getLogger(\"worker\"), ) w.run(background=True) # Create an optimizer optimizer = hydra.utils.instantiate( args.approach.approach, host=ns_host,",
"as result_stream: result_stream.write(\"\\t\".join([str(value) for value in batch_result_row]) + \"\\n\") def _run_on_task_batch( optimizer, task_batch,",
"worker for the master node w = _HPOWorker( benchmark, run_id=args.run_id, host=ns_host, nameserver=ns_host, nameserver_port=ns_port,",
"n_iterations=trials_per_task, trials_until_loss=trials_until_loss, previous_results=previous_results, ) result_batch.insert(task_result, task) if step > 1: _write_batch_result(args, result_batch) return",
"if do_transfer else None result_batch = result_utils.BatchResult(step, configspace) for task in task_batch: logger.info(f\"Running",
"continue logger.info(f\"Step ------- {step :04d}\") trials_per_task, trials_until_loss = _get_trial_parameters( args, reference_losses, step )",
"_write_batch_result(args, result_batch): batch_result_row = get_batch_result_row( args.benchmark.name, args.runtype.dim_factor_pre_adjustment, args.approach.name, args.benchmark.benchmark.trajectory_id, args.benchmark.benchmark.adjustment_id, args.run_id, result_batch, )",
"in [\"eval_dim\", \"eval_reference\"]: trials_per_task = args.runtype.dim_factor_pre_adjustment else: trials_per_task = args.runtype.dim_factor logger.info(f\"Using {trials_per_task} trials",
"batch_result_row]) + \"\\n\") def _run_on_task_batch( optimizer, task_batch, configspace, step, result_trajectory, trials_per_task, trials_until_loss, args,",
"reference_losses[str(args.benchmark.benchmark.adjustment_id)] return reference_losses def _get_trial_parameters(args, reference_losses, step): if step == 1 and args.runtype.type",
"= hydra.utils.to_absolute_path( args.benchmark.benchmark.data_path ) benchmark = hydra.utils.instantiate(args.benchmark.benchmark) # Actually run if args.worker_id ==",
"Only read task once self._benchmark = benchmark self._previous_task_identifier = None self._previous_development_stage = None",
"in batch_result_row]) + \"\\n\") def _run_on_task_batch( optimizer, task_batch, configspace, step, result_trajectory, trials_per_task, trials_until_loss,",
"hydra.utils.instantiate( args.approach.approach, host=ns_host, nameserver=ns_host, nameserver_port=ns_port, logger=logging.getLogger(\"master\"), ) # Train and evaluate the optimizer",
"yaml from gitinfo import gitinfo from hp_transfer_optimizers.core import nameserver as hpns from hp_transfer_optimizers.core",
"): task_identifier = kwargs[\"task_identifier\"] development_stage = kwargs[\"development_stage\"] task_changed = ( development_stage != self._previous_development_stage",
"_read_reference_losses(args) result_trajectory = result_utils.TrajectoryResult() for step, (train_batch, configspace) in enumerate( zip(benchmark.dev_trajectory, benchmark.configspace_trajectory), 1",
"= kwargs[\"development_stage\"] task_changed = ( development_stage != self._previous_development_stage or self._previous_task_identifier != task_identifier )",
"self._previous_task_identifier = task_identifier self._previous_development_stage = development_stage self._task = self._benchmark.get_task_from_identifier( task_identifier, development_stage ) if",
"benchmark self._previous_task_identifier = None self._previous_development_stage = None self._task = None # pylint: disable=unused-argument",
"= logging.getLogger(\"hp_transfer_aa_experiments.run\") def _read_reference_losses(args): reference_losses = None if args.runtype.type.startswith(\"eval_reference\"): reference_losses_path = hydra.utils.to_absolute_path(args.reference_losses_path) with",
"= reference_losses[str(args.benchmark.benchmark.adjustment_id)] return reference_losses def _get_trial_parameters(args, reference_losses, step): if step == 1 and",
"random import time from pathlib import Path import hp_transfer_benchmarks # pylint: disable=unused-import import",
"= None self._previous_development_stage = None self._task = None # pylint: disable=unused-argument def compute(",
"= task_identifier self._previous_development_stage = development_stage self._task = self._benchmark.get_task_from_identifier( task_identifier, development_stage ) if \"development_step\"",
"random.seed(seed) np.random.seed(seed) # torch.backends.cudnn.benchmark = False # torch.backends.cudnn.deterministic = True # torch.manual_seed(seed) #",
"gitinfo from hp_transfer_optimizers.core import nameserver as hpns from hp_transfer_optimizers.core import result as result_utils",
"make sure the nameserver is already running host = hpns.nic_name_to_host(args.nic_name) w = _HPOWorker(",
"config_id, config, budget, working_directory, *args, **kwargs, ): task_identifier = kwargs[\"task_identifier\"] development_stage = kwargs[\"development_stage\"]",
"hydra.utils.to_absolute_path( args.benchmark.benchmark.data_path ) benchmark = hydra.utils.instantiate(args.benchmark.benchmark) # Actually run if args.worker_id == 0:",
"trials_until_loss = None return trials_per_task, trials_until_loss def _write_batch_result(args, result_batch): batch_result_row = get_batch_result_row( args.benchmark.name,",
"benchmark, working_directory): time.sleep(5) # short artificial delay to make sure the nameserver is",
"general information logger.info(f\"Using working_directory={working_directory}\") with contextlib.suppress(TypeError): git_info = gitinfo.get_git_info() logger.info(f\"Commit hash: {git_info['commit']}\") logger.info(f\"Commit",
"host=host, logger=logging.getLogger(\"worker\"), ) w.load_nameserver_credentials(working_directory=str(working_directory)) w.run(background=False) def _run_master(args, benchmark, working_directory): nameserver = hpns.NameServer( run_id=args.run_id,",
"import hydra import numpy as np import yaml from gitinfo import gitinfo from",
"# pylint: disable=unused-import import hydra import numpy as np import yaml from gitinfo",
"hp_transfer_aa_experiments.analyse.read_results import get_batch_result_row logger = logging.getLogger(\"hp_transfer_aa_experiments.run\") def _read_reference_losses(args): reference_losses = None if args.runtype.type.startswith(\"eval_reference\"):",
"self._benchmark.get_task_from_identifier( task_identifier, development_stage ) if \"development_step\" in config: del config[\"development_step\"] return self._task.evaluate(config) def",
"{trials_per_task} trials per task\") if step > 1 and args.runtype.type.startswith(\"eval_reference\"): trials_until_loss = reference_losses[step][f\"{args.runtype.dim_factor}_loss\"]",
"self._previous_task_identifier = None self._previous_development_stage = None self._task = None # pylint: disable=unused-argument def",
"# torch.manual_seed(seed) # tf.random.set_seed(seed) @hydra.main(config_path=\"configs\", config_name=\"run\") def run(args): _set_seeds(args.seed) working_directory = Path().cwd() #",
"return reference_losses def _get_trial_parameters(args, reference_losses, step): if step == 1 and args.runtype.type in",
"contextlib.suppress(TypeError): git_info = gitinfo.get_git_info() logger.info(f\"Commit hash: {git_info['commit']}\") logger.info(f\"Commit date: {git_info['author_date']}\") logger.info(f\"Arguments:\\n{OmegaConf.to_yaml(args)}\") # Construct",
"Start a background worker for the master node w = _HPOWorker( benchmark, run_id=args.run_id,",
"nameserver=ns_host, nameserver_port=ns_port, logger=logging.getLogger(\"worker\"), ) w.run(background=True) # Create an optimizer optimizer = hydra.utils.instantiate( args.approach.approach,",
"benchmark, run_id=args.run_id, host=ns_host, nameserver=ns_host, nameserver_port=ns_port, logger=logging.getLogger(\"worker\"), ) w.run(background=True) # Create an optimizer optimizer",
"*args, **kwargs, ): task_identifier = kwargs[\"task_identifier\"] development_stage = kwargs[\"development_stage\"] task_changed = ( development_stage",
"# torch.backends.cudnn.benchmark = False # torch.backends.cudnn.deterministic = True # torch.manual_seed(seed) # tf.random.set_seed(seed) @hydra.main(config_path=\"configs\",",
"logger.info(f\"Running on task {task.identifier}\") task_result = optimizer.run( configspace=configspace, task=task, n_iterations=trials_per_task, trials_until_loss=trials_until_loss, previous_results=previous_results, )",
"= development_stage self._task = self._benchmark.get_task_from_identifier( task_identifier, development_stage ) if \"development_step\" in config: del",
"= kwargs[\"task_identifier\"] development_stage = kwargs[\"development_stage\"] task_changed = ( development_stage != self._previous_development_stage or self._previous_task_identifier",
"w = _HPOWorker( benchmark, run_id=args.run_id, host=ns_host, nameserver=ns_host, nameserver_port=ns_port, logger=logging.getLogger(\"worker\"), ) w.run(background=True) # Create",
"if args.runtype.type == \"reference\" and step == 1: continue logger.info(f\"Step ------- {step :04d}\")",
"reference_losses = _read_reference_losses(args) result_trajectory = result_utils.TrajectoryResult() for step, (train_batch, configspace) in enumerate( zip(benchmark.dev_trajectory,",
"args.runtype.dim_factor_pre_adjustment, args.approach.name, args.benchmark.benchmark.trajectory_id, args.benchmark.benchmark.adjustment_id, args.run_id, result_batch, ) result_path = Path( hydra.utils.to_absolute_path(\"results\"), args.experiment_group, f\"results/{args.experiment_name.replace('/',",
"# Log general information logger.info(f\"Using working_directory={working_directory}\") with contextlib.suppress(TypeError): git_info = gitinfo.get_git_info() logger.info(f\"Commit hash:",
"= hydra.utils.to_absolute_path(args.reference_losses_path) with Path(reference_losses_path).open(\"r\") as stream: reference_losses = yaml.safe_load(stream) reference_losses = reference_losses[args.benchmark.name] reference_losses",
"result_stream.write(\"\\t\".join([str(value) for value in batch_result_row]) + \"\\n\") def _run_on_task_batch( optimizer, task_batch, configspace, step,",
"optimizer, train_batch, configspace, step, result_trajectory, trials_per_task, trials_until_loss, args, ) result_trajectory.insert(batch_result) class _HPOWorker(Worker): def",
"= nameserver.start() # Start a background worker for the master node w =",
"args.benchmark.benchmark: args.benchmark.benchmark.data_path = hydra.utils.to_absolute_path( args.benchmark.benchmark.data_path ) benchmark = hydra.utils.instantiate(args.benchmark.benchmark) # Actually run if",
"self._benchmark = benchmark self._previous_task_identifier = None self._previous_development_stage = None self._task = None #",
"result_path.open(\"a\") as result_stream: result_stream.write(\"\\t\".join([str(value) for value in batch_result_row]) + \"\\n\") def _run_on_task_batch( optimizer,",
"and args.runtype.type.startswith(\"eval_reference\"): trials_until_loss = reference_losses[step][f\"{args.runtype.dim_factor}_loss\"] logger.info( f\"Also performing trials until loss {trials_until_loss :.4f}\"",
"w.run(background=False) def _run_master(args, benchmark, working_directory): nameserver = hpns.NameServer( run_id=args.run_id, working_directory=str(working_directory), nic_name=args.nic_name, ) ns_host,",
"hydra.utils.instantiate(args.benchmark.benchmark) # Actually run if args.worker_id == 0: _run_master(args, benchmark, working_directory) else: _run_worker(args,",
"disable=unused-import import hp_transfer_optimizers # pylint: disable=unused-import import hydra import numpy as np import",
"read task once self._previous_task_identifier = task_identifier self._previous_development_stage = development_stage self._task = self._benchmark.get_task_from_identifier( task_identifier,",
"if step > 1 and args.runtype.type.startswith(\"eval_reference\"): trials_until_loss = reference_losses[step][f\"{args.runtype.dim_factor}_loss\"] logger.info( f\"Also performing trials",
") ns_host, ns_port = nameserver.start() # Start a background worker for the master",
"args.runtype.dim_factor_pre_adjustment else: trials_per_task = args.runtype.dim_factor logger.info(f\"Using {trials_per_task} trials per task\") if step >",
"run(args): _set_seeds(args.seed) working_directory = Path().cwd() # Log general information logger.info(f\"Using working_directory={working_directory}\") with contextlib.suppress(TypeError):",
"import Worker from omegaconf import OmegaConf from hp_transfer_aa_experiments.analyse.read_results import get_batch_result_row logger = logging.getLogger(\"hp_transfer_aa_experiments.run\")",
"= args.approach.name.startswith(\"transfer\") previous_results = result_trajectory if do_transfer else None result_batch = result_utils.BatchResult(step, configspace)",
"# pylint: disable=unused-argument def compute( self, config_id, config, budget, working_directory, *args, **kwargs, ):",
"a background worker for the master node w = _HPOWorker( benchmark, run_id=args.run_id, host=ns_host,",
"nameserver=ns_host, nameserver_port=ns_port, logger=logging.getLogger(\"master\"), ) # Train and evaluate the optimizer try: _train_and_eval(optimizer, benchmark,",
"result_batch) return result_batch def _train_and_eval(optimizer, benchmark, args): reference_losses = _read_reference_losses(args) result_trajectory = result_utils.TrajectoryResult()",
"self._previous_task_identifier != task_identifier ) if task_changed: # Only read task once self._previous_task_identifier =",
"config, budget, working_directory, *args, **kwargs, ): task_identifier = kwargs[\"task_identifier\"] development_stage = kwargs[\"development_stage\"] task_changed",
"get_batch_result_row logger = logging.getLogger(\"hp_transfer_aa_experiments.run\") def _read_reference_losses(args): reference_losses = None if args.runtype.type.startswith(\"eval_reference\"): reference_losses_path =",
"return trials_per_task, trials_until_loss def _write_batch_result(args, result_batch): batch_result_row = get_batch_result_row( args.benchmark.name, args.runtype.dim_factor_pre_adjustment, args.approach.name, args.benchmark.benchmark.trajectory_id,",
"Construct benchmark if \"data_path\" in args.benchmark.benchmark: args.benchmark.benchmark.data_path = hydra.utils.to_absolute_path( args.benchmark.benchmark.data_path ) benchmark =",
"Path( hydra.utils.to_absolute_path(\"results\"), args.experiment_group, f\"results/{args.experiment_name.replace('/', ',')}.csv\", ) result_path.parent.mkdir(exist_ok=True, parents=True) with result_path.open(\"a\") as result_stream: result_stream.write(\"\\t\".join([str(value)",
"# pylint: disable=unused-import import hp_transfer_optimizers # pylint: disable=unused-import import hydra import numpy as",
"\"\\n\") def _run_on_task_batch( optimizer, task_batch, configspace, step, result_trajectory, trials_per_task, trials_until_loss, args, ): do_transfer",
"args.approach.name.startswith(\"transfer\") previous_results = result_trajectory if do_transfer else None result_batch = result_utils.BatchResult(step, configspace) for",
"gitinfo import gitinfo from hp_transfer_optimizers.core import nameserver as hpns from hp_transfer_optimizers.core import result",
"Actually run if args.worker_id == 0: _run_master(args, benchmark, working_directory) else: _run_worker(args, benchmark, working_directory)",
"self._task = self._benchmark.get_task_from_identifier( task_identifier, development_stage ) if \"development_step\" in config: del config[\"development_step\"] return",
"disable=unused-import import hydra import numpy as np import yaml from gitinfo import gitinfo",
"= hpns.nic_name_to_host(args.nic_name) w = _HPOWorker( benchmark, run_id=args.run_id, host=host, logger=logging.getLogger(\"worker\"), ) w.load_nameserver_credentials(working_directory=str(working_directory)) w.run(background=False) def",
"git_info = gitinfo.get_git_info() logger.info(f\"Commit hash: {git_info['commit']}\") logger.info(f\"Commit date: {git_info['author_date']}\") logger.info(f\"Arguments:\\n{OmegaConf.to_yaml(args)}\") # Construct benchmark",
"1 ): if args.runtype.type == \"reference\" and step == 1: continue logger.info(f\"Step -------",
"= Path().cwd() # Log general information logger.info(f\"Using working_directory={working_directory}\") with contextlib.suppress(TypeError): git_info = gitinfo.get_git_info()",
"do_transfer else None result_batch = result_utils.BatchResult(step, configspace) for task in task_batch: logger.info(f\"Running on",
"self._previous_development_stage = None self._task = None # pylint: disable=unused-argument def compute( self, config_id,",
"benchmark if \"data_path\" in args.benchmark.benchmark: args.benchmark.benchmark.data_path = hydra.utils.to_absolute_path( args.benchmark.benchmark.data_path ) benchmark = hydra.utils.instantiate(args.benchmark.benchmark)",
"args.run_id, result_batch, ) result_path = Path( hydra.utils.to_absolute_path(\"results\"), args.experiment_group, f\"results/{args.experiment_name.replace('/', ',')}.csv\", ) result_path.parent.mkdir(exist_ok=True, parents=True)",
"args.benchmark.benchmark.data_path = hydra.utils.to_absolute_path( args.benchmark.benchmark.data_path ) benchmark = hydra.utils.instantiate(args.benchmark.benchmark) # Actually run if args.worker_id",
"): do_transfer = args.approach.name.startswith(\"transfer\") previous_results = result_trajectory if do_transfer else None result_batch =",
"gitinfo.get_git_info() logger.info(f\"Commit hash: {git_info['commit']}\") logger.info(f\"Commit date: {git_info['author_date']}\") logger.info(f\"Arguments:\\n{OmegaConf.to_yaml(args)}\") # Construct benchmark if \"data_path\"",
"args.runtype.type == \"reference\" and step == 1: continue logger.info(f\"Step ------- {step :04d}\") trials_per_task,",
"!= task_identifier ) if task_changed: # Only read task once self._previous_task_identifier = task_identifier",
"benchmark, working_directory): nameserver = hpns.NameServer( run_id=args.run_id, working_directory=str(working_directory), nic_name=args.nic_name, ) ns_host, ns_port = nameserver.start()",
"np import yaml from gitinfo import gitinfo from hp_transfer_optimizers.core import nameserver as hpns",
"else: _run_worker(args, benchmark, working_directory) logger.info(f\"Run finished\") if __name__ == \"__main__\": run() # pylint:",
"class _HPOWorker(Worker): def __init__(self, benchmark, **kwargs): super().__init__(**kwargs) # Only read task once self._benchmark",
"**kwargs): super().__init__(**kwargs) # Only read task once self._benchmark = benchmark self._previous_task_identifier = None",
"= hpns.NameServer( run_id=args.run_id, working_directory=str(working_directory), nic_name=args.nic_name, ) ns_host, ns_port = nameserver.start() # Start a",
":.4f}\" f\" (max {10 * trials_per_task})\" ) else: trials_until_loss = None return trials_per_task,",
"# Train and evaluate the optimizer try: _train_and_eval(optimizer, benchmark, args) finally: optimizer.shutdown(shutdown_workers=True) nameserver.shutdown()",
"from gitinfo import gitinfo from hp_transfer_optimizers.core import nameserver as hpns from hp_transfer_optimizers.core import",
"reference_losses[args.benchmark.name] reference_losses = reference_losses[str(args.benchmark.benchmark.trajectory_id)] reference_losses = reference_losses[str(args.benchmark.benchmark.adjustment_id)] return reference_losses def _get_trial_parameters(args, reference_losses, step):",
"optimizer optimizer = hydra.utils.instantiate( args.approach.approach, host=ns_host, nameserver=ns_host, nameserver_port=ns_port, logger=logging.getLogger(\"master\"), ) # Train and",
") result_batch.insert(task_result, task) if step > 1: _write_batch_result(args, result_batch) return result_batch def _train_and_eval(optimizer,",
"args.approach.name, args.benchmark.benchmark.trajectory_id, args.benchmark.benchmark.adjustment_id, args.run_id, result_batch, ) result_path = Path( hydra.utils.to_absolute_path(\"results\"), args.experiment_group, f\"results/{args.experiment_name.replace('/', ',')}.csv\",",
"def _run_master(args, benchmark, working_directory): nameserver = hpns.NameServer( run_id=args.run_id, working_directory=str(working_directory), nic_name=args.nic_name, ) ns_host, ns_port",
"is already running host = hpns.nic_name_to_host(args.nic_name) w = _HPOWorker( benchmark, run_id=args.run_id, host=host, logger=logging.getLogger(\"worker\"),",
"= False # torch.backends.cudnn.deterministic = True # torch.manual_seed(seed) # tf.random.set_seed(seed) @hydra.main(config_path=\"configs\", config_name=\"run\") def",
"= _read_reference_losses(args) result_trajectory = result_utils.TrajectoryResult() for step, (train_batch, configspace) in enumerate( zip(benchmark.dev_trajectory, benchmark.configspace_trajectory),",
"reference_losses, step ) logger.info(f\"Using configspace\\n{configspace}\".rstrip()) batch_result = _run_on_task_batch( optimizer, train_batch, configspace, step, result_trajectory,",
"= hydra.utils.instantiate( args.approach.approach, host=ns_host, nameserver=ns_host, nameserver_port=ns_port, logger=logging.getLogger(\"master\"), ) # Train and evaluate the",
"ns_host, ns_port = nameserver.start() # Start a background worker for the master node",
"= result_utils.TrajectoryResult() for step, (train_batch, configspace) in enumerate( zip(benchmark.dev_trajectory, benchmark.configspace_trajectory), 1 ): if",
"_write_batch_result(args, result_batch) return result_batch def _train_and_eval(optimizer, benchmark, args): reference_losses = _read_reference_losses(args) result_trajectory =",
"if task_changed: # Only read task once self._previous_task_identifier = task_identifier self._previous_development_stage = development_stage",
"with result_path.open(\"a\") as result_stream: result_stream.write(\"\\t\".join([str(value) for value in batch_result_row]) + \"\\n\") def _run_on_task_batch(",
"None # pylint: disable=unused-argument def compute( self, config_id, config, budget, working_directory, *args, **kwargs,",
"numpy as np import yaml from gitinfo import gitinfo from hp_transfer_optimizers.core import nameserver",
"compute( self, config_id, config, budget, working_directory, *args, **kwargs, ): task_identifier = kwargs[\"task_identifier\"] development_stage",
"import get_batch_result_row logger = logging.getLogger(\"hp_transfer_aa_experiments.run\") def _read_reference_losses(args): reference_losses = None if args.runtype.type.startswith(\"eval_reference\"): reference_losses_path",
"step == 1: continue logger.info(f\"Step ------- {step :04d}\") trials_per_task, trials_until_loss = _get_trial_parameters( args,",
"kwargs[\"task_identifier\"] development_stage = kwargs[\"development_stage\"] task_changed = ( development_stage != self._previous_development_stage or self._previous_task_identifier !=",
"development_stage != self._previous_development_stage or self._previous_task_identifier != task_identifier ) if task_changed: # Only read",
"{task.identifier}\") task_result = optimizer.run( configspace=configspace, task=task, n_iterations=trials_per_task, trials_until_loss=trials_until_loss, previous_results=previous_results, ) result_batch.insert(task_result, task) if",
") benchmark = hydra.utils.instantiate(args.benchmark.benchmark) # Actually run if args.worker_id == 0: _run_master(args, benchmark,",
"value in batch_result_row]) + \"\\n\") def _run_on_task_batch( optimizer, task_batch, configspace, step, result_trajectory, trials_per_task,",
"logger=logging.getLogger(\"master\"), ) # Train and evaluate the optimizer try: _train_and_eval(optimizer, benchmark, args) finally:",
"task once self._benchmark = benchmark self._previous_task_identifier = None self._previous_development_stage = None self._task =",
"= args.runtype.dim_factor_pre_adjustment else: trials_per_task = args.runtype.dim_factor logger.info(f\"Using {trials_per_task} trials per task\") if step",
"logger.info(f\"Using {trials_per_task} trials per task\") if step > 1 and args.runtype.type.startswith(\"eval_reference\"): trials_until_loss =",
"= reference_losses[str(args.benchmark.benchmark.trajectory_id)] reference_losses = reference_losses[str(args.benchmark.benchmark.adjustment_id)] return reference_losses def _get_trial_parameters(args, reference_losses, step): if step",
"in args.benchmark.benchmark: args.benchmark.benchmark.data_path = hydra.utils.to_absolute_path( args.benchmark.benchmark.data_path ) benchmark = hydra.utils.instantiate(args.benchmark.benchmark) # Actually run",
"(max {10 * trials_per_task})\" ) else: trials_until_loss = None return trials_per_task, trials_until_loss def",
"from hp_transfer_optimizers.core.worker import Worker from omegaconf import OmegaConf from hp_transfer_aa_experiments.analyse.read_results import get_batch_result_row logger",
"= get_batch_result_row( args.benchmark.name, args.runtype.dim_factor_pre_adjustment, args.approach.name, args.benchmark.benchmark.trajectory_id, args.benchmark.benchmark.adjustment_id, args.run_id, result_batch, ) result_path = Path(",
"trials_per_task, trials_until_loss, args, ): do_transfer = args.approach.name.startswith(\"transfer\") previous_results = result_trajectory if do_transfer else",
"if args.runtype.type.startswith(\"eval_reference\"): reference_losses_path = hydra.utils.to_absolute_path(args.reference_losses_path) with Path(reference_losses_path).open(\"r\") as stream: reference_losses = yaml.safe_load(stream) reference_losses",
"# Only read task once self._benchmark = benchmark self._previous_task_identifier = None self._previous_development_stage =",
"configspace) for task in task_batch: logger.info(f\"Running on task {task.identifier}\") task_result = optimizer.run( configspace=configspace,",
"working_directory): nameserver = hpns.NameServer( run_id=args.run_id, working_directory=str(working_directory), nic_name=args.nic_name, ) ns_host, ns_port = nameserver.start() #",
"1 and args.runtype.type in [\"eval_dim\", \"eval_reference\"]: trials_per_task = args.runtype.dim_factor_pre_adjustment else: trials_per_task = args.runtype.dim_factor",
"# tf.random.set_seed(seed) @hydra.main(config_path=\"configs\", config_name=\"run\") def run(args): _set_seeds(args.seed) working_directory = Path().cwd() # Log general",
"trials_per_task = args.runtype.dim_factor_pre_adjustment else: trials_per_task = args.runtype.dim_factor logger.info(f\"Using {trials_per_task} trials per task\") if",
"_HPOWorker( benchmark, run_id=args.run_id, host=ns_host, nameserver=ns_host, nameserver_port=ns_port, logger=logging.getLogger(\"worker\"), ) w.run(background=True) # Create an optimizer",
"information logger.info(f\"Using working_directory={working_directory}\") with contextlib.suppress(TypeError): git_info = gitinfo.get_git_info() logger.info(f\"Commit hash: {git_info['commit']}\") logger.info(f\"Commit date:",
"reference_losses[step][f\"{args.runtype.dim_factor}_loss\"] logger.info( f\"Also performing trials until loss {trials_until_loss :.4f}\" f\" (max {10 *",
"{trials_until_loss :.4f}\" f\" (max {10 * trials_per_task})\" ) else: trials_until_loss = None return",
"if args.worker_id == 0: _run_master(args, benchmark, working_directory) else: _run_worker(args, benchmark, working_directory) logger.info(f\"Run finished\")",
"if step > 1: _write_batch_result(args, result_batch) return result_batch def _train_and_eval(optimizer, benchmark, args): reference_losses",
"task once self._previous_task_identifier = task_identifier self._previous_development_stage = development_stage self._task = self._benchmark.get_task_from_identifier( task_identifier, development_stage",
"step ) logger.info(f\"Using configspace\\n{configspace}\".rstrip()) batch_result = _run_on_task_batch( optimizer, train_batch, configspace, step, result_trajectory, trials_per_task,",
"performing trials until loss {trials_until_loss :.4f}\" f\" (max {10 * trials_per_task})\" ) else:",
") logger.info(f\"Using configspace\\n{configspace}\".rstrip()) batch_result = _run_on_task_batch( optimizer, train_batch, configspace, step, result_trajectory, trials_per_task, trials_until_loss,",
"{10 * trials_per_task})\" ) else: trials_until_loss = None return trials_per_task, trials_until_loss def _write_batch_result(args,",
"step, (train_batch, configspace) in enumerate( zip(benchmark.dev_trajectory, benchmark.configspace_trajectory), 1 ): if args.runtype.type == \"reference\"",
"pylint: disable=unused-import import hydra import numpy as np import yaml from gitinfo import",
"None if args.runtype.type.startswith(\"eval_reference\"): reference_losses_path = hydra.utils.to_absolute_path(args.reference_losses_path) with Path(reference_losses_path).open(\"r\") as stream: reference_losses = yaml.safe_load(stream)",
"{git_info['commit']}\") logger.info(f\"Commit date: {git_info['author_date']}\") logger.info(f\"Arguments:\\n{OmegaConf.to_yaml(args)}\") # Construct benchmark if \"data_path\" in args.benchmark.benchmark: args.benchmark.benchmark.data_path",
"until loss {trials_until_loss :.4f}\" f\" (max {10 * trials_per_task})\" ) else: trials_until_loss =",
"args, reference_losses, step ) logger.info(f\"Using configspace\\n{configspace}\".rstrip()) batch_result = _run_on_task_batch( optimizer, train_batch, configspace, step,",
"Path import hp_transfer_benchmarks # pylint: disable=unused-import import hp_transfer_optimizers # pylint: disable=unused-import import hydra",
"def _run_on_task_batch( optimizer, task_batch, configspace, step, result_trajectory, trials_per_task, trials_until_loss, args, ): do_transfer =",
"task_changed = ( development_stage != self._previous_development_stage or self._previous_task_identifier != task_identifier ) if task_changed:",
"with Path(reference_losses_path).open(\"r\") as stream: reference_losses = yaml.safe_load(stream) reference_losses = reference_losses[args.benchmark.name] reference_losses = reference_losses[str(args.benchmark.benchmark.trajectory_id)]",
"trials_per_task, trials_until_loss, args, ) result_trajectory.insert(batch_result) class _HPOWorker(Worker): def __init__(self, benchmark, **kwargs): super().__init__(**kwargs) #",
"_get_trial_parameters(args, reference_losses, step): if step == 1 and args.runtype.type in [\"eval_dim\", \"eval_reference\"]: trials_per_task",
"logger = logging.getLogger(\"hp_transfer_aa_experiments.run\") def _read_reference_losses(args): reference_losses = None if args.runtype.type.startswith(\"eval_reference\"): reference_losses_path = hydra.utils.to_absolute_path(args.reference_losses_path)",
"import hp_transfer_benchmarks # pylint: disable=unused-import import hp_transfer_optimizers # pylint: disable=unused-import import hydra import",
"{git_info['author_date']}\") logger.info(f\"Arguments:\\n{OmegaConf.to_yaml(args)}\") # Construct benchmark if \"data_path\" in args.benchmark.benchmark: args.benchmark.benchmark.data_path = hydra.utils.to_absolute_path( args.benchmark.benchmark.data_path",
"development_stage ) if \"development_step\" in config: del config[\"development_step\"] return self._task.evaluate(config) def _run_worker(args, benchmark,",
"Worker from omegaconf import OmegaConf from hp_transfer_aa_experiments.analyse.read_results import get_batch_result_row logger = logging.getLogger(\"hp_transfer_aa_experiments.run\") def",
"loss {trials_until_loss :.4f}\" f\" (max {10 * trials_per_task})\" ) else: trials_until_loss = None",
"date: {git_info['author_date']}\") logger.info(f\"Arguments:\\n{OmegaConf.to_yaml(args)}\") # Construct benchmark if \"data_path\" in args.benchmark.benchmark: args.benchmark.benchmark.data_path = hydra.utils.to_absolute_path(",
"self._previous_development_stage = development_stage self._task = self._benchmark.get_task_from_identifier( task_identifier, development_stage ) if \"development_step\" in config:",
"np.random.seed(seed) # torch.backends.cudnn.benchmark = False # torch.backends.cudnn.deterministic = True # torch.manual_seed(seed) # tf.random.set_seed(seed)",
"task_identifier = kwargs[\"task_identifier\"] development_stage = kwargs[\"development_stage\"] task_changed = ( development_stage != self._previous_development_stage or",
"= ( development_stage != self._previous_development_stage or self._previous_task_identifier != task_identifier ) if task_changed: #",
"pylint: disable=unused-argument def compute( self, config_id, config, budget, working_directory, *args, **kwargs, ): task_identifier",
"import gitinfo from hp_transfer_optimizers.core import nameserver as hpns from hp_transfer_optimizers.core import result as",
"f\" (max {10 * trials_per_task})\" ) else: trials_until_loss = None return trials_per_task, trials_until_loss",
"hp_transfer_optimizers.core.worker import Worker from omegaconf import OmegaConf from hp_transfer_aa_experiments.analyse.read_results import get_batch_result_row logger =",
"already running host = hpns.nic_name_to_host(args.nic_name) w = _HPOWorker( benchmark, run_id=args.run_id, host=host, logger=logging.getLogger(\"worker\"), )",
"== 0: _run_master(args, benchmark, working_directory) else: _run_worker(args, benchmark, working_directory) logger.info(f\"Run finished\") if __name__",
"f\"results/{args.experiment_name.replace('/', ',')}.csv\", ) result_path.parent.mkdir(exist_ok=True, parents=True) with result_path.open(\"a\") as result_stream: result_stream.write(\"\\t\".join([str(value) for value in",
"> 1: _write_batch_result(args, result_batch) return result_batch def _train_and_eval(optimizer, benchmark, args): reference_losses = _read_reference_losses(args)",
"w.load_nameserver_credentials(working_directory=str(working_directory)) w.run(background=False) def _run_master(args, benchmark, working_directory): nameserver = hpns.NameServer( run_id=args.run_id, working_directory=str(working_directory), nic_name=args.nic_name, )",
"logger.info(f\"Arguments:\\n{OmegaConf.to_yaml(args)}\") # Construct benchmark if \"data_path\" in args.benchmark.benchmark: args.benchmark.benchmark.data_path = hydra.utils.to_absolute_path( args.benchmark.benchmark.data_path )",
"optimizer, task_batch, configspace, step, result_trajectory, trials_per_task, trials_until_loss, args, ): do_transfer = args.approach.name.startswith(\"transfer\") previous_results",
"_run_worker(args, benchmark, working_directory) logger.info(f\"Run finished\") if __name__ == \"__main__\": run() # pylint: disable=no-value-for-parameter",
"_run_on_task_batch( optimizer, train_batch, configspace, step, result_trajectory, trials_per_task, trials_until_loss, args, ) result_trajectory.insert(batch_result) class _HPOWorker(Worker):",
"_run_master(args, benchmark, working_directory): nameserver = hpns.NameServer( run_id=args.run_id, working_directory=str(working_directory), nic_name=args.nic_name, ) ns_host, ns_port =",
"per task\") if step > 1 and args.runtype.type.startswith(\"eval_reference\"): trials_until_loss = reference_losses[step][f\"{args.runtype.dim_factor}_loss\"] logger.info( f\"Also",
"yaml.safe_load(stream) reference_losses = reference_losses[args.benchmark.name] reference_losses = reference_losses[str(args.benchmark.benchmark.trajectory_id)] reference_losses = reference_losses[str(args.benchmark.benchmark.adjustment_id)] return reference_losses def",
"hash: {git_info['commit']}\") logger.info(f\"Commit date: {git_info['author_date']}\") logger.info(f\"Arguments:\\n{OmegaConf.to_yaml(args)}\") # Construct benchmark if \"data_path\" in args.benchmark.benchmark:",
"from hp_transfer_optimizers.core import result as result_utils from hp_transfer_optimizers.core.worker import Worker from omegaconf import",
"# Only read task once self._previous_task_identifier = task_identifier self._previous_development_stage = development_stage self._task =",
"result_stream: result_stream.write(\"\\t\".join([str(value) for value in batch_result_row]) + \"\\n\") def _run_on_task_batch( optimizer, task_batch, configspace,",
"\"data_path\" in args.benchmark.benchmark: args.benchmark.benchmark.data_path = hydra.utils.to_absolute_path( args.benchmark.benchmark.data_path ) benchmark = hydra.utils.instantiate(args.benchmark.benchmark) # Actually",
"= _HPOWorker( benchmark, run_id=args.run_id, host=ns_host, nameserver=ns_host, nameserver_port=ns_port, logger=logging.getLogger(\"worker\"), ) w.run(background=True) # Create an",
"import result as result_utils from hp_transfer_optimizers.core.worker import Worker from omegaconf import OmegaConf from",
"f\"Also performing trials until loss {trials_until_loss :.4f}\" f\" (max {10 * trials_per_task})\" )",
"result_utils.BatchResult(step, configspace) for task in task_batch: logger.info(f\"Running on task {task.identifier}\") task_result = optimizer.run(",
"(train_batch, configspace) in enumerate( zip(benchmark.dev_trajectory, benchmark.configspace_trajectory), 1 ): if args.runtype.type == \"reference\" and",
"args.runtype.type.startswith(\"eval_reference\"): reference_losses_path = hydra.utils.to_absolute_path(args.reference_losses_path) with Path(reference_losses_path).open(\"r\") as stream: reference_losses = yaml.safe_load(stream) reference_losses =",
") w.run(background=True) # Create an optimizer optimizer = hydra.utils.instantiate( args.approach.approach, host=ns_host, nameserver=ns_host, nameserver_port=ns_port,",
"result_trajectory.insert(batch_result) class _HPOWorker(Worker): def __init__(self, benchmark, **kwargs): super().__init__(**kwargs) # Only read task once",
"an optimizer optimizer = hydra.utils.instantiate( args.approach.approach, host=ns_host, nameserver=ns_host, nameserver_port=ns_port, logger=logging.getLogger(\"master\"), ) # Train",
"master node w = _HPOWorker( benchmark, run_id=args.run_id, host=ns_host, nameserver=ns_host, nameserver_port=ns_port, logger=logging.getLogger(\"worker\"), ) w.run(background=True)",
"once self._previous_task_identifier = task_identifier self._previous_development_stage = development_stage self._task = self._benchmark.get_task_from_identifier( task_identifier, development_stage )",
"config_name=\"run\") def run(args): _set_seeds(args.seed) working_directory = Path().cwd() # Log general information logger.info(f\"Using working_directory={working_directory}\")",
"else: trials_per_task = args.runtype.dim_factor logger.info(f\"Using {trials_per_task} trials per task\") if step > 1",
"disable=unused-argument def compute( self, config_id, config, budget, working_directory, *args, **kwargs, ): task_identifier =",
"task_result = optimizer.run( configspace=configspace, task=task, n_iterations=trials_per_task, trials_until_loss=trials_until_loss, previous_results=previous_results, ) result_batch.insert(task_result, task) if step",
"if \"data_path\" in args.benchmark.benchmark: args.benchmark.benchmark.data_path = hydra.utils.to_absolute_path( args.benchmark.benchmark.data_path ) benchmark = hydra.utils.instantiate(args.benchmark.benchmark) #",
"True # torch.manual_seed(seed) # tf.random.set_seed(seed) @hydra.main(config_path=\"configs\", config_name=\"run\") def run(args): _set_seeds(args.seed) working_directory = Path().cwd()",
"working_directory = Path().cwd() # Log general information logger.info(f\"Using working_directory={working_directory}\") with contextlib.suppress(TypeError): git_info =",
"hp_transfer_optimizers.core import nameserver as hpns from hp_transfer_optimizers.core import result as result_utils from hp_transfer_optimizers.core.worker",
"import contextlib import logging import logging.config import random import time from pathlib import",
"run if args.worker_id == 0: _run_master(args, benchmark, working_directory) else: _run_worker(args, benchmark, working_directory) logger.info(f\"Run",
"del config[\"development_step\"] return self._task.evaluate(config) def _run_worker(args, benchmark, working_directory): time.sleep(5) # short artificial delay",
"None result_batch = result_utils.BatchResult(step, configspace) for task in task_batch: logger.info(f\"Running on task {task.identifier}\")",
"as stream: reference_losses = yaml.safe_load(stream) reference_losses = reference_losses[args.benchmark.name] reference_losses = reference_losses[str(args.benchmark.benchmark.trajectory_id)] reference_losses =",
"import hp_transfer_optimizers # pylint: disable=unused-import import hydra import numpy as np import yaml",
"host=ns_host, nameserver=ns_host, nameserver_port=ns_port, logger=logging.getLogger(\"master\"), ) # Train and evaluate the optimizer try: _train_and_eval(optimizer,",
"result_batch): batch_result_row = get_batch_result_row( args.benchmark.name, args.runtype.dim_factor_pre_adjustment, args.approach.name, args.benchmark.benchmark.trajectory_id, args.benchmark.benchmark.adjustment_id, args.run_id, result_batch, ) result_path",
"# short artificial delay to make sure the nameserver is already running host",
"import yaml from gitinfo import gitinfo from hp_transfer_optimizers.core import nameserver as hpns from",
"reference_losses = None if args.runtype.type.startswith(\"eval_reference\"): reference_losses_path = hydra.utils.to_absolute_path(args.reference_losses_path) with Path(reference_losses_path).open(\"r\") as stream: reference_losses",
"the optimizer try: _train_and_eval(optimizer, benchmark, args) finally: optimizer.shutdown(shutdown_workers=True) nameserver.shutdown() def _set_seeds(seed): random.seed(seed) np.random.seed(seed)",
"logger.info(f\"Commit hash: {git_info['commit']}\") logger.info(f\"Commit date: {git_info['author_date']}\") logger.info(f\"Arguments:\\n{OmegaConf.to_yaml(args)}\") # Construct benchmark if \"data_path\" in",
"result_trajectory, trials_per_task, trials_until_loss, args, ) result_trajectory.insert(batch_result) class _HPOWorker(Worker): def __init__(self, benchmark, **kwargs): super().__init__(**kwargs)",
"logger.info(f\"Step ------- {step :04d}\") trials_per_task, trials_until_loss = _get_trial_parameters( args, reference_losses, step ) logger.info(f\"Using",
"return self._task.evaluate(config) def _run_worker(args, benchmark, working_directory): time.sleep(5) # short artificial delay to make",
"**kwargs, ): task_identifier = kwargs[\"task_identifier\"] development_stage = kwargs[\"development_stage\"] task_changed = ( development_stage !=",
"working_directory, *args, **kwargs, ): task_identifier = kwargs[\"task_identifier\"] development_stage = kwargs[\"development_stage\"] task_changed = (",
"hpns from hp_transfer_optimizers.core import result as result_utils from hp_transfer_optimizers.core.worker import Worker from omegaconf",
"task {task.identifier}\") task_result = optimizer.run( configspace=configspace, task=task, n_iterations=trials_per_task, trials_until_loss=trials_until_loss, previous_results=previous_results, ) result_batch.insert(task_result, task)",
"benchmark.configspace_trajectory), 1 ): if args.runtype.type == \"reference\" and step == 1: continue logger.info(f\"Step",
"False # torch.backends.cudnn.deterministic = True # torch.manual_seed(seed) # tf.random.set_seed(seed) @hydra.main(config_path=\"configs\", config_name=\"run\") def run(args):",
"logging import logging.config import random import time from pathlib import Path import hp_transfer_benchmarks",
"== \"reference\" and step == 1: continue logger.info(f\"Step ------- {step :04d}\") trials_per_task, trials_until_loss",
"= gitinfo.get_git_info() logger.info(f\"Commit hash: {git_info['commit']}\") logger.info(f\"Commit date: {git_info['author_date']}\") logger.info(f\"Arguments:\\n{OmegaConf.to_yaml(args)}\") # Construct benchmark if",
"',')}.csv\", ) result_path.parent.mkdir(exist_ok=True, parents=True) with result_path.open(\"a\") as result_stream: result_stream.write(\"\\t\".join([str(value) for value in batch_result_row])",
"_train_and_eval(optimizer, benchmark, args): reference_losses = _read_reference_losses(args) result_trajectory = result_utils.TrajectoryResult() for step, (train_batch, configspace)",
"args.runtype.type in [\"eval_dim\", \"eval_reference\"]: trials_per_task = args.runtype.dim_factor_pre_adjustment else: trials_per_task = args.runtype.dim_factor logger.info(f\"Using {trials_per_task}",
"Train and evaluate the optimizer try: _train_and_eval(optimizer, benchmark, args) finally: optimizer.shutdown(shutdown_workers=True) nameserver.shutdown() def",
"benchmark, args) finally: optimizer.shutdown(shutdown_workers=True) nameserver.shutdown() def _set_seeds(seed): random.seed(seed) np.random.seed(seed) # torch.backends.cudnn.benchmark = False",
"logger.info( f\"Also performing trials until loss {trials_until_loss :.4f}\" f\" (max {10 * trials_per_task})\"",
"= _get_trial_parameters( args, reference_losses, step ) logger.info(f\"Using configspace\\n{configspace}\".rstrip()) batch_result = _run_on_task_batch( optimizer, train_batch,",
"kwargs[\"development_stage\"] task_changed = ( development_stage != self._previous_development_stage or self._previous_task_identifier != task_identifier ) if",
"omegaconf import OmegaConf from hp_transfer_aa_experiments.analyse.read_results import get_batch_result_row logger = logging.getLogger(\"hp_transfer_aa_experiments.run\") def _read_reference_losses(args): reference_losses",
"_train_and_eval(optimizer, benchmark, args) finally: optimizer.shutdown(shutdown_workers=True) nameserver.shutdown() def _set_seeds(seed): random.seed(seed) np.random.seed(seed) # torch.backends.cudnn.benchmark =",
"finally: optimizer.shutdown(shutdown_workers=True) nameserver.shutdown() def _set_seeds(seed): random.seed(seed) np.random.seed(seed) # torch.backends.cudnn.benchmark = False # torch.backends.cudnn.deterministic",
"trials until loss {trials_until_loss :.4f}\" f\" (max {10 * trials_per_task})\" ) else: trials_until_loss",
"nameserver as hpns from hp_transfer_optimizers.core import result as result_utils from hp_transfer_optimizers.core.worker import Worker",
"nameserver is already running host = hpns.nic_name_to_host(args.nic_name) w = _HPOWorker( benchmark, run_id=args.run_id, host=host,",
"_HPOWorker(Worker): def __init__(self, benchmark, **kwargs): super().__init__(**kwargs) # Only read task once self._benchmark =",
"trials_per_task, trials_until_loss def _write_batch_result(args, result_batch): batch_result_row = get_batch_result_row( args.benchmark.name, args.runtype.dim_factor_pre_adjustment, args.approach.name, args.benchmark.benchmark.trajectory_id, args.benchmark.benchmark.adjustment_id,",
"Path().cwd() # Log general information logger.info(f\"Using working_directory={working_directory}\") with contextlib.suppress(TypeError): git_info = gitinfo.get_git_info() logger.info(f\"Commit",
"step, result_trajectory, trials_per_task, trials_until_loss, args, ): do_transfer = args.approach.name.startswith(\"transfer\") previous_results = result_trajectory if",
"# Actually run if args.worker_id == 0: _run_master(args, benchmark, working_directory) else: _run_worker(args, benchmark,",
"1 and args.runtype.type.startswith(\"eval_reference\"): trials_until_loss = reference_losses[step][f\"{args.runtype.dim_factor}_loss\"] logger.info( f\"Also performing trials until loss {trials_until_loss",
"or self._previous_task_identifier != task_identifier ) if task_changed: # Only read task once self._previous_task_identifier",
") if task_changed: # Only read task once self._previous_task_identifier = task_identifier self._previous_development_stage =",
"from hp_transfer_optimizers.core import nameserver as hpns from hp_transfer_optimizers.core import result as result_utils from",
"w = _HPOWorker( benchmark, run_id=args.run_id, host=host, logger=logging.getLogger(\"worker\"), ) w.load_nameserver_credentials(working_directory=str(working_directory)) w.run(background=False) def _run_master(args, benchmark,",
"config: del config[\"development_step\"] return self._task.evaluate(config) def _run_worker(args, benchmark, working_directory): time.sleep(5) # short artificial",
"benchmark, args): reference_losses = _read_reference_losses(args) result_trajectory = result_utils.TrajectoryResult() for step, (train_batch, configspace) in",
"configspace, step, result_trajectory, trials_per_task, trials_until_loss, args, ) result_trajectory.insert(batch_result) class _HPOWorker(Worker): def __init__(self, benchmark,",
"def __init__(self, benchmark, **kwargs): super().__init__(**kwargs) # Only read task once self._benchmark = benchmark",
"_run_master(args, benchmark, working_directory) else: _run_worker(args, benchmark, working_directory) logger.info(f\"Run finished\") if __name__ == \"__main__\":",
"on task {task.identifier}\") task_result = optimizer.run( configspace=configspace, task=task, n_iterations=trials_per_task, trials_until_loss=trials_until_loss, previous_results=previous_results, ) result_batch.insert(task_result,",
"self._task.evaluate(config) def _run_worker(args, benchmark, working_directory): time.sleep(5) # short artificial delay to make sure",
"optimizer.run( configspace=configspace, task=task, n_iterations=trials_per_task, trials_until_loss=trials_until_loss, previous_results=previous_results, ) result_batch.insert(task_result, task) if step > 1:",
"nameserver.shutdown() def _set_seeds(seed): random.seed(seed) np.random.seed(seed) # torch.backends.cudnn.benchmark = False # torch.backends.cudnn.deterministic = True",
"def _get_trial_parameters(args, reference_losses, step): if step == 1 and args.runtype.type in [\"eval_dim\", \"eval_reference\"]:",
"Only read task once self._previous_task_identifier = task_identifier self._previous_development_stage = development_stage self._task = self._benchmark.get_task_from_identifier(",
"hp_transfer_optimizers # pylint: disable=unused-import import hydra import numpy as np import yaml from",
") if \"development_step\" in config: del config[\"development_step\"] return self._task.evaluate(config) def _run_worker(args, benchmark, working_directory):",
"\"development_step\" in config: del config[\"development_step\"] return self._task.evaluate(config) def _run_worker(args, benchmark, working_directory): time.sleep(5) #",
"_run_worker(args, benchmark, working_directory): time.sleep(5) # short artificial delay to make sure the nameserver",
"trials_per_task})\" ) else: trials_until_loss = None return trials_per_task, trials_until_loss def _write_batch_result(args, result_batch): batch_result_row",
"= self._benchmark.get_task_from_identifier( task_identifier, development_stage ) if \"development_step\" in config: del config[\"development_step\"] return self._task.evaluate(config)",
"trials_per_task, trials_until_loss = _get_trial_parameters( args, reference_losses, step ) logger.info(f\"Using configspace\\n{configspace}\".rstrip()) batch_result = _run_on_task_batch(",
"train_batch, configspace, step, result_trajectory, trials_per_task, trials_until_loss, args, ) result_trajectory.insert(batch_result) class _HPOWorker(Worker): def __init__(self,",
"result as result_utils from hp_transfer_optimizers.core.worker import Worker from omegaconf import OmegaConf from hp_transfer_aa_experiments.analyse.read_results",
"benchmark, run_id=args.run_id, host=host, logger=logging.getLogger(\"worker\"), ) w.load_nameserver_credentials(working_directory=str(working_directory)) w.run(background=False) def _run_master(args, benchmark, working_directory): nameserver =",
"_set_seeds(args.seed) working_directory = Path().cwd() # Log general information logger.info(f\"Using working_directory={working_directory}\") with contextlib.suppress(TypeError): git_info",
"logger=logging.getLogger(\"worker\"), ) w.load_nameserver_credentials(working_directory=str(working_directory)) w.run(background=False) def _run_master(args, benchmark, working_directory): nameserver = hpns.NameServer( run_id=args.run_id, working_directory=str(working_directory),",
"working_directory=str(working_directory), nic_name=args.nic_name, ) ns_host, ns_port = nameserver.start() # Start a background worker for",
"_run_on_task_batch( optimizer, task_batch, configspace, step, result_trajectory, trials_per_task, trials_until_loss, args, ): do_transfer = args.approach.name.startswith(\"transfer\")",
"from pathlib import Path import hp_transfer_benchmarks # pylint: disable=unused-import import hp_transfer_optimizers # pylint:",
"task in task_batch: logger.info(f\"Running on task {task.identifier}\") task_result = optimizer.run( configspace=configspace, task=task, n_iterations=trials_per_task,",
"= None if args.runtype.type.startswith(\"eval_reference\"): reference_losses_path = hydra.utils.to_absolute_path(args.reference_losses_path) with Path(reference_losses_path).open(\"r\") as stream: reference_losses =",
"reference_losses = reference_losses[str(args.benchmark.benchmark.adjustment_id)] return reference_losses def _get_trial_parameters(args, reference_losses, step): if step == 1",
"from omegaconf import OmegaConf from hp_transfer_aa_experiments.analyse.read_results import get_batch_result_row logger = logging.getLogger(\"hp_transfer_aa_experiments.run\") def _read_reference_losses(args):",
"+ \"\\n\") def _run_on_task_batch( optimizer, task_batch, configspace, step, result_trajectory, trials_per_task, trials_until_loss, args, ):",
"# Construct benchmark if \"data_path\" in args.benchmark.benchmark: args.benchmark.benchmark.data_path = hydra.utils.to_absolute_path( args.benchmark.benchmark.data_path ) benchmark",
"= yaml.safe_load(stream) reference_losses = reference_losses[args.benchmark.name] reference_losses = reference_losses[str(args.benchmark.benchmark.trajectory_id)] reference_losses = reference_losses[str(args.benchmark.benchmark.adjustment_id)] return reference_losses",
"result_batch, ) result_path = Path( hydra.utils.to_absolute_path(\"results\"), args.experiment_group, f\"results/{args.experiment_name.replace('/', ',')}.csv\", ) result_path.parent.mkdir(exist_ok=True, parents=True) with",
"in enumerate( zip(benchmark.dev_trajectory, benchmark.configspace_trajectory), 1 ): if args.runtype.type == \"reference\" and step ==",
"= optimizer.run( configspace=configspace, task=task, n_iterations=trials_per_task, trials_until_loss=trials_until_loss, previous_results=previous_results, ) result_batch.insert(task_result, task) if step >",
"trials_until_loss, args, ) result_trajectory.insert(batch_result) class _HPOWorker(Worker): def __init__(self, benchmark, **kwargs): super().__init__(**kwargs) # Only",
"Log general information logger.info(f\"Using working_directory={working_directory}\") with contextlib.suppress(TypeError): git_info = gitinfo.get_git_info() logger.info(f\"Commit hash: {git_info['commit']}\")",
"torch.backends.cudnn.deterministic = True # torch.manual_seed(seed) # tf.random.set_seed(seed) @hydra.main(config_path=\"configs\", config_name=\"run\") def run(args): _set_seeds(args.seed) working_directory",
"benchmark = hydra.utils.instantiate(args.benchmark.benchmark) # Actually run if args.worker_id == 0: _run_master(args, benchmark, working_directory)",
"# Start a background worker for the master node w = _HPOWorker( benchmark,",
"in task_batch: logger.info(f\"Running on task {task.identifier}\") task_result = optimizer.run( configspace=configspace, task=task, n_iterations=trials_per_task, trials_until_loss=trials_until_loss,",
") w.load_nameserver_credentials(working_directory=str(working_directory)) w.run(background=False) def _run_master(args, benchmark, working_directory): nameserver = hpns.NameServer( run_id=args.run_id, working_directory=str(working_directory), nic_name=args.nic_name,",
"= None self._task = None # pylint: disable=unused-argument def compute( self, config_id, config,",
"task_batch, configspace, step, result_trajectory, trials_per_task, trials_until_loss, args, ): do_transfer = args.approach.name.startswith(\"transfer\") previous_results =",
"do_transfer = args.approach.name.startswith(\"transfer\") previous_results = result_trajectory if do_transfer else None result_batch = result_utils.BatchResult(step,",
"logger.info(f\"Commit date: {git_info['author_date']}\") logger.info(f\"Arguments:\\n{OmegaConf.to_yaml(args)}\") # Construct benchmark if \"data_path\" in args.benchmark.benchmark: args.benchmark.benchmark.data_path =",
"for value in batch_result_row]) + \"\\n\") def _run_on_task_batch( optimizer, task_batch, configspace, step, result_trajectory,",
"task) if step > 1: _write_batch_result(args, result_batch) return result_batch def _train_and_eval(optimizer, benchmark, args):",
"logger=logging.getLogger(\"worker\"), ) w.run(background=True) # Create an optimizer optimizer = hydra.utils.instantiate( args.approach.approach, host=ns_host, nameserver=ns_host,",
"as result_utils from hp_transfer_optimizers.core.worker import Worker from omegaconf import OmegaConf from hp_transfer_aa_experiments.analyse.read_results import",
"= True # torch.manual_seed(seed) # tf.random.set_seed(seed) @hydra.main(config_path=\"configs\", config_name=\"run\") def run(args): _set_seeds(args.seed) working_directory =",
"step == 1 and args.runtype.type in [\"eval_dim\", \"eval_reference\"]: trials_per_task = args.runtype.dim_factor_pre_adjustment else: trials_per_task",
"= None # pylint: disable=unused-argument def compute( self, config_id, config, budget, working_directory, *args,",
"reference_losses = yaml.safe_load(stream) reference_losses = reference_losses[args.benchmark.name] reference_losses = reference_losses[str(args.benchmark.benchmark.trajectory_id)] reference_losses = reference_losses[str(args.benchmark.benchmark.adjustment_id)] return",
"import time from pathlib import Path import hp_transfer_benchmarks # pylint: disable=unused-import import hp_transfer_optimizers",
"trials_until_loss=trials_until_loss, previous_results=previous_results, ) result_batch.insert(task_result, task) if step > 1: _write_batch_result(args, result_batch) return result_batch",
"Create an optimizer optimizer = hydra.utils.instantiate( args.approach.approach, host=ns_host, nameserver=ns_host, nameserver_port=ns_port, logger=logging.getLogger(\"master\"), ) #",
"= reference_losses[step][f\"{args.runtype.dim_factor}_loss\"] logger.info( f\"Also performing trials until loss {trials_until_loss :.4f}\" f\" (max {10",
"= result_utils.BatchResult(step, configspace) for task in task_batch: logger.info(f\"Running on task {task.identifier}\") task_result =",
"background worker for the master node w = _HPOWorker( benchmark, run_id=args.run_id, host=ns_host, nameserver=ns_host,",
"run_id=args.run_id, working_directory=str(working_directory), nic_name=args.nic_name, ) ns_host, ns_port = nameserver.start() # Start a background worker",
"configspace\\n{configspace}\".rstrip()) batch_result = _run_on_task_batch( optimizer, train_batch, configspace, step, result_trajectory, trials_per_task, trials_until_loss, args, )",
"previous_results = result_trajectory if do_transfer else None result_batch = result_utils.BatchResult(step, configspace) for task",
"super().__init__(**kwargs) # Only read task once self._benchmark = benchmark self._previous_task_identifier = None self._previous_development_stage",
"import OmegaConf from hp_transfer_aa_experiments.analyse.read_results import get_batch_result_row logger = logging.getLogger(\"hp_transfer_aa_experiments.run\") def _read_reference_losses(args): reference_losses =",
"short artificial delay to make sure the nameserver is already running host =",
"1: _write_batch_result(args, result_batch) return result_batch def _train_and_eval(optimizer, benchmark, args): reference_losses = _read_reference_losses(args) result_trajectory",
"task_changed: # Only read task once self._previous_task_identifier = task_identifier self._previous_development_stage = development_stage self._task",
"_HPOWorker( benchmark, run_id=args.run_id, host=host, logger=logging.getLogger(\"worker\"), ) w.load_nameserver_credentials(working_directory=str(working_directory)) w.run(background=False) def _run_master(args, benchmark, working_directory): nameserver",
"evaluate the optimizer try: _train_and_eval(optimizer, benchmark, args) finally: optimizer.shutdown(shutdown_workers=True) nameserver.shutdown() def _set_seeds(seed): random.seed(seed)",
"def _write_batch_result(args, result_batch): batch_result_row = get_batch_result_row( args.benchmark.name, args.runtype.dim_factor_pre_adjustment, args.approach.name, args.benchmark.benchmark.trajectory_id, args.benchmark.benchmark.adjustment_id, args.run_id, result_batch,",
"pylint: disable=unused-import import hp_transfer_optimizers # pylint: disable=unused-import import hydra import numpy as np",
"reference_losses[str(args.benchmark.benchmark.trajectory_id)] reference_losses = reference_losses[str(args.benchmark.benchmark.adjustment_id)] return reference_losses def _get_trial_parameters(args, reference_losses, step): if step ==",
"result_utils.TrajectoryResult() for step, (train_batch, configspace) in enumerate( zip(benchmark.dev_trajectory, benchmark.configspace_trajectory), 1 ): if args.runtype.type",
"_set_seeds(seed): random.seed(seed) np.random.seed(seed) # torch.backends.cudnn.benchmark = False # torch.backends.cudnn.deterministic = True # torch.manual_seed(seed)",
"def run(args): _set_seeds(args.seed) working_directory = Path().cwd() # Log general information logger.info(f\"Using working_directory={working_directory}\") with",
"= _run_on_task_batch( optimizer, train_batch, configspace, step, result_trajectory, trials_per_task, trials_until_loss, args, ) result_trajectory.insert(batch_result) class",
"config[\"development_step\"] return self._task.evaluate(config) def _run_worker(args, benchmark, working_directory): time.sleep(5) # short artificial delay to",
"result_utils from hp_transfer_optimizers.core.worker import Worker from omegaconf import OmegaConf from hp_transfer_aa_experiments.analyse.read_results import get_batch_result_row",
"= hydra.utils.instantiate(args.benchmark.benchmark) # Actually run if args.worker_id == 0: _run_master(args, benchmark, working_directory) else:",
"ns_port = nameserver.start() # Start a background worker for the master node w",
"result_batch = result_utils.BatchResult(step, configspace) for task in task_batch: logger.info(f\"Running on task {task.identifier}\") task_result",
"trials_until_loss, args, ): do_transfer = args.approach.name.startswith(\"transfer\") previous_results = result_trajectory if do_transfer else None",
"): if args.runtype.type == \"reference\" and step == 1: continue logger.info(f\"Step ------- {step",
"{step :04d}\") trials_per_task, trials_until_loss = _get_trial_parameters( args, reference_losses, step ) logger.info(f\"Using configspace\\n{configspace}\".rstrip()) batch_result",
"for step, (train_batch, configspace) in enumerate( zip(benchmark.dev_trajectory, benchmark.configspace_trajectory), 1 ): if args.runtype.type ==",
"trials_until_loss = reference_losses[step][f\"{args.runtype.dim_factor}_loss\"] logger.info( f\"Also performing trials until loss {trials_until_loss :.4f}\" f\" (max",
"and evaluate the optimizer try: _train_and_eval(optimizer, benchmark, args) finally: optimizer.shutdown(shutdown_workers=True) nameserver.shutdown() def _set_seeds(seed):",
"reference_losses = reference_losses[args.benchmark.name] reference_losses = reference_losses[str(args.benchmark.benchmark.trajectory_id)] reference_losses = reference_losses[str(args.benchmark.benchmark.adjustment_id)] return reference_losses def _get_trial_parameters(args,",
"self._task = None # pylint: disable=unused-argument def compute( self, config_id, config, budget, working_directory,",
"result_path.parent.mkdir(exist_ok=True, parents=True) with result_path.open(\"a\") as result_stream: result_stream.write(\"\\t\".join([str(value) for value in batch_result_row]) + \"\\n\")",
"torch.backends.cudnn.benchmark = False # torch.backends.cudnn.deterministic = True # torch.manual_seed(seed) # tf.random.set_seed(seed) @hydra.main(config_path=\"configs\", config_name=\"run\")",
"task_identifier ) if task_changed: # Only read task once self._previous_task_identifier = task_identifier self._previous_development_stage",
"time.sleep(5) # short artificial delay to make sure the nameserver is already running",
"args): reference_losses = _read_reference_losses(args) result_trajectory = result_utils.TrajectoryResult() for step, (train_batch, configspace) in enumerate(",
"args.experiment_group, f\"results/{args.experiment_name.replace('/', ',')}.csv\", ) result_path.parent.mkdir(exist_ok=True, parents=True) with result_path.open(\"a\") as result_stream: result_stream.write(\"\\t\".join([str(value) for value",
"@hydra.main(config_path=\"configs\", config_name=\"run\") def run(args): _set_seeds(args.seed) working_directory = Path().cwd() # Log general information logger.info(f\"Using",
"configspace, step, result_trajectory, trials_per_task, trials_until_loss, args, ): do_transfer = args.approach.name.startswith(\"transfer\") previous_results = result_trajectory",
"( development_stage != self._previous_development_stage or self._previous_task_identifier != task_identifier ) if task_changed: # Only",
"1: continue logger.info(f\"Step ------- {step :04d}\") trials_per_task, trials_until_loss = _get_trial_parameters( args, reference_losses, step",
"= benchmark self._previous_task_identifier = None self._previous_development_stage = None self._task = None # pylint:",
") # Train and evaluate the optimizer try: _train_and_eval(optimizer, benchmark, args) finally: optimizer.shutdown(shutdown_workers=True)",
"artificial delay to make sure the nameserver is already running host = hpns.nic_name_to_host(args.nic_name)",
"else: trials_until_loss = None return trials_per_task, trials_until_loss def _write_batch_result(args, result_batch): batch_result_row = get_batch_result_row(",
"args.runtype.type.startswith(\"eval_reference\"): trials_until_loss = reference_losses[step][f\"{args.runtype.dim_factor}_loss\"] logger.info( f\"Also performing trials until loss {trials_until_loss :.4f}\" f\"",
"[\"eval_dim\", \"eval_reference\"]: trials_per_task = args.runtype.dim_factor_pre_adjustment else: trials_per_task = args.runtype.dim_factor logger.info(f\"Using {trials_per_task} trials per",
"args.benchmark.benchmark.adjustment_id, args.run_id, result_batch, ) result_path = Path( hydra.utils.to_absolute_path(\"results\"), args.experiment_group, f\"results/{args.experiment_name.replace('/', ',')}.csv\", ) result_path.parent.mkdir(exist_ok=True,",
"for the master node w = _HPOWorker( benchmark, run_id=args.run_id, host=ns_host, nameserver=ns_host, nameserver_port=ns_port, logger=logging.getLogger(\"worker\"),",
"parents=True) with result_path.open(\"a\") as result_stream: result_stream.write(\"\\t\".join([str(value) for value in batch_result_row]) + \"\\n\") def",
"if \"development_step\" in config: del config[\"development_step\"] return self._task.evaluate(config) def _run_worker(args, benchmark, working_directory): time.sleep(5)",
"_read_reference_losses(args): reference_losses = None if args.runtype.type.startswith(\"eval_reference\"): reference_losses_path = hydra.utils.to_absolute_path(args.reference_losses_path) with Path(reference_losses_path).open(\"r\") as stream:",
"from hp_transfer_aa_experiments.analyse.read_results import get_batch_result_row logger = logging.getLogger(\"hp_transfer_aa_experiments.run\") def _read_reference_losses(args): reference_losses = None if",
"working_directory): time.sleep(5) # short artificial delay to make sure the nameserver is already",
"trials_until_loss def _write_batch_result(args, result_batch): batch_result_row = get_batch_result_row( args.benchmark.name, args.runtype.dim_factor_pre_adjustment, args.approach.name, args.benchmark.benchmark.trajectory_id, args.benchmark.benchmark.adjustment_id, args.run_id,",
"_get_trial_parameters( args, reference_losses, step ) logger.info(f\"Using configspace\\n{configspace}\".rstrip()) batch_result = _run_on_task_batch( optimizer, train_batch, configspace,",
"reference_losses_path = hydra.utils.to_absolute_path(args.reference_losses_path) with Path(reference_losses_path).open(\"r\") as stream: reference_losses = yaml.safe_load(stream) reference_losses = reference_losses[args.benchmark.name]",
"pathlib import Path import hp_transfer_benchmarks # pylint: disable=unused-import import hp_transfer_optimizers # pylint: disable=unused-import",
"with contextlib.suppress(TypeError): git_info = gitinfo.get_git_info() logger.info(f\"Commit hash: {git_info['commit']}\") logger.info(f\"Commit date: {git_info['author_date']}\") logger.info(f\"Arguments:\\n{OmegaConf.to_yaml(args)}\") #",
"args.runtype.dim_factor logger.info(f\"Using {trials_per_task} trials per task\") if step > 1 and args.runtype.type.startswith(\"eval_reference\"): trials_until_loss",
"return result_batch def _train_and_eval(optimizer, benchmark, args): reference_losses = _read_reference_losses(args) result_trajectory = result_utils.TrajectoryResult() for",
"result_trajectory if do_transfer else None result_batch = result_utils.BatchResult(step, configspace) for task in task_batch:",
"hydra import numpy as np import yaml from gitinfo import gitinfo from hp_transfer_optimizers.core",
"working_directory={working_directory}\") with contextlib.suppress(TypeError): git_info = gitinfo.get_git_info() logger.info(f\"Commit hash: {git_info['commit']}\") logger.info(f\"Commit date: {git_info['author_date']}\") logger.info(f\"Arguments:\\n{OmegaConf.to_yaml(args)}\")",
"hp_transfer_optimizers.core import result as result_utils from hp_transfer_optimizers.core.worker import Worker from omegaconf import OmegaConf",
"reference_losses, step): if step == 1 and args.runtype.type in [\"eval_dim\", \"eval_reference\"]: trials_per_task =",
"run_id=args.run_id, host=host, logger=logging.getLogger(\"worker\"), ) w.load_nameserver_credentials(working_directory=str(working_directory)) w.run(background=False) def _run_master(args, benchmark, working_directory): nameserver = hpns.NameServer(",
"task_identifier, development_stage ) if \"development_step\" in config: del config[\"development_step\"] return self._task.evaluate(config) def _run_worker(args,",
"batch_result_row = get_batch_result_row( args.benchmark.name, args.runtype.dim_factor_pre_adjustment, args.approach.name, args.benchmark.benchmark.trajectory_id, args.benchmark.benchmark.adjustment_id, args.run_id, result_batch, ) result_path =",
"in config: del config[\"development_step\"] return self._task.evaluate(config) def _run_worker(args, benchmark, working_directory): time.sleep(5) # short",
"__init__(self, benchmark, **kwargs): super().__init__(**kwargs) # Only read task once self._benchmark = benchmark self._previous_task_identifier",
"try: _train_and_eval(optimizer, benchmark, args) finally: optimizer.shutdown(shutdown_workers=True) nameserver.shutdown() def _set_seeds(seed): random.seed(seed) np.random.seed(seed) # torch.backends.cudnn.benchmark",
"benchmark, **kwargs): super().__init__(**kwargs) # Only read task once self._benchmark = benchmark self._previous_task_identifier =",
"development_stage self._task = self._benchmark.get_task_from_identifier( task_identifier, development_stage ) if \"development_step\" in config: del config[\"development_step\"]",
"None self._previous_development_stage = None self._task = None # pylint: disable=unused-argument def compute( self,",
"else None result_batch = result_utils.BatchResult(step, configspace) for task in task_batch: logger.info(f\"Running on task",
"def compute( self, config_id, config, budget, working_directory, *args, **kwargs, ): task_identifier = kwargs[\"task_identifier\"]",
"contextlib import logging import logging.config import random import time from pathlib import Path",
"logging.getLogger(\"hp_transfer_aa_experiments.run\") def _read_reference_losses(args): reference_losses = None if args.runtype.type.startswith(\"eval_reference\"): reference_losses_path = hydra.utils.to_absolute_path(args.reference_losses_path) with Path(reference_losses_path).open(\"r\")",
"working_directory) else: _run_worker(args, benchmark, working_directory) logger.info(f\"Run finished\") if __name__ == \"__main__\": run() #",
"Path(reference_losses_path).open(\"r\") as stream: reference_losses = yaml.safe_load(stream) reference_losses = reference_losses[args.benchmark.name] reference_losses = reference_losses[str(args.benchmark.benchmark.trajectory_id)] reference_losses",
":04d}\") trials_per_task, trials_until_loss = _get_trial_parameters( args, reference_losses, step ) logger.info(f\"Using configspace\\n{configspace}\".rstrip()) batch_result =",
"previous_results=previous_results, ) result_batch.insert(task_result, task) if step > 1: _write_batch_result(args, result_batch) return result_batch def",
"= args.runtype.dim_factor logger.info(f\"Using {trials_per_task} trials per task\") if step > 1 and args.runtype.type.startswith(\"eval_reference\"):",
"step > 1: _write_batch_result(args, result_batch) return result_batch def _train_and_eval(optimizer, benchmark, args): reference_losses =",
"args, ) result_trajectory.insert(batch_result) class _HPOWorker(Worker): def __init__(self, benchmark, **kwargs): super().__init__(**kwargs) # Only read",
"sure the nameserver is already running host = hpns.nic_name_to_host(args.nic_name) w = _HPOWorker( benchmark,",
"for task in task_batch: logger.info(f\"Running on task {task.identifier}\") task_result = optimizer.run( configspace=configspace, task=task,",
"delay to make sure the nameserver is already running host = hpns.nic_name_to_host(args.nic_name) w",
"optimizer = hydra.utils.instantiate( args.approach.approach, host=ns_host, nameserver=ns_host, nameserver_port=ns_port, logger=logging.getLogger(\"master\"), ) # Train and evaluate",
"import logging import logging.config import random import time from pathlib import Path import",
"import numpy as np import yaml from gitinfo import gitinfo from hp_transfer_optimizers.core import",
"args.worker_id == 0: _run_master(args, benchmark, working_directory) else: _run_worker(args, benchmark, working_directory) logger.info(f\"Run finished\") if",
"> 1 and args.runtype.type.startswith(\"eval_reference\"): trials_until_loss = reference_losses[step][f\"{args.runtype.dim_factor}_loss\"] logger.info( f\"Also performing trials until loss",
"= result_trajectory if do_transfer else None result_batch = result_utils.BatchResult(step, configspace) for task in",
"def _train_and_eval(optimizer, benchmark, args): reference_losses = _read_reference_losses(args) result_trajectory = result_utils.TrajectoryResult() for step, (train_batch,",
"as hpns from hp_transfer_optimizers.core import result as result_utils from hp_transfer_optimizers.core.worker import Worker from",
"nameserver = hpns.NameServer( run_id=args.run_id, working_directory=str(working_directory), nic_name=args.nic_name, ) ns_host, ns_port = nameserver.start() # Start",
"nameserver_port=ns_port, logger=logging.getLogger(\"master\"), ) # Train and evaluate the optimizer try: _train_and_eval(optimizer, benchmark, args)",
"!= self._previous_development_stage or self._previous_task_identifier != task_identifier ) if task_changed: # Only read task",
"optimizer try: _train_and_eval(optimizer, benchmark, args) finally: optimizer.shutdown(shutdown_workers=True) nameserver.shutdown() def _set_seeds(seed): random.seed(seed) np.random.seed(seed) #",
"hydra.utils.to_absolute_path(args.reference_losses_path) with Path(reference_losses_path).open(\"r\") as stream: reference_losses = yaml.safe_load(stream) reference_losses = reference_losses[args.benchmark.name] reference_losses =",
"stream: reference_losses = yaml.safe_load(stream) reference_losses = reference_losses[args.benchmark.name] reference_losses = reference_losses[str(args.benchmark.benchmark.trajectory_id)] reference_losses = reference_losses[str(args.benchmark.benchmark.adjustment_id)]",
"OmegaConf from hp_transfer_aa_experiments.analyse.read_results import get_batch_result_row logger = logging.getLogger(\"hp_transfer_aa_experiments.run\") def _read_reference_losses(args): reference_losses = None",
"= _HPOWorker( benchmark, run_id=args.run_id, host=host, logger=logging.getLogger(\"worker\"), ) w.load_nameserver_credentials(working_directory=str(working_directory)) w.run(background=False) def _run_master(args, benchmark, working_directory):",
"hydra.utils.to_absolute_path(\"results\"), args.experiment_group, f\"results/{args.experiment_name.replace('/', ',')}.csv\", ) result_path.parent.mkdir(exist_ok=True, parents=True) with result_path.open(\"a\") as result_stream: result_stream.write(\"\\t\".join([str(value) for",
"import logging.config import random import time from pathlib import Path import hp_transfer_benchmarks #",
"get_batch_result_row( args.benchmark.name, args.runtype.dim_factor_pre_adjustment, args.approach.name, args.benchmark.benchmark.trajectory_id, args.benchmark.benchmark.adjustment_id, args.run_id, result_batch, ) result_path = Path( hydra.utils.to_absolute_path(\"results\"),",
"to make sure the nameserver is already running host = hpns.nic_name_to_host(args.nic_name) w =",
"task_batch: logger.info(f\"Running on task {task.identifier}\") task_result = optimizer.run( configspace=configspace, task=task, n_iterations=trials_per_task, trials_until_loss=trials_until_loss, previous_results=previous_results,",
"\"reference\" and step == 1: continue logger.info(f\"Step ------- {step :04d}\") trials_per_task, trials_until_loss =",
"trials per task\") if step > 1 and args.runtype.type.startswith(\"eval_reference\"): trials_until_loss = reference_losses[step][f\"{args.runtype.dim_factor}_loss\"] logger.info(",
"batch_result = _run_on_task_batch( optimizer, train_batch, configspace, step, result_trajectory, trials_per_task, trials_until_loss, args, ) result_trajectory.insert(batch_result)",
"# torch.backends.cudnn.deterministic = True # torch.manual_seed(seed) # tf.random.set_seed(seed) @hydra.main(config_path=\"configs\", config_name=\"run\") def run(args): _set_seeds(args.seed)",
"w.run(background=True) # Create an optimizer optimizer = hydra.utils.instantiate( args.approach.approach, host=ns_host, nameserver=ns_host, nameserver_port=ns_port, logger=logging.getLogger(\"master\"),",
"result_batch.insert(task_result, task) if step > 1: _write_batch_result(args, result_batch) return result_batch def _train_and_eval(optimizer, benchmark,"
] |
[
"Applies any manual, one-time fixes to the EZ data. This is usually defined",
"'_990EZ' def ez_manual(self): \"\"\" Applies any manual, one-time fixes to the EZ data.",
"+ ez['PRGMSERVREV'] def ez_spevtg(self, ez): \"\"\" Calculates the SPEVTG column. ARGUMENTS ez (DataFrame)",
"a non-generalizable way, e.g. a mistyped EIN in the raw IRS data. ARGUMENTS",
"defined as a change to a single EIN from a single year, in",
"the URL to the PDF of the 990 filing on the Foundation Center's",
"(DataFrame) : Core file dataframe RETURNS Series \"\"\" assert(ez['TOTREV'].dtype.type in [np.int64, np.float64]) assert(ez['SALEOTHG'].dtype.type",
"this, and usually any discrepencies between stated and calculated values are tested in",
"dataframe \"\"\" def ez_calculate(self): \"\"\" Base method for calling all of the methods",
"EZ form. ARGUMENTS None RETURNS None \"\"\" main = self.main ez = main.data_dict['EZ']",
"the FILENAME column from the EIN and TAXPER columns, which is used to",
"from the Full 990 and EINS from the 990 EZ. ARGUMENTS ez (DataFrame)",
"self.ez_spevtg(ez) ez['NETGNLS'] = self.ez_netgnls(ez) ez['FILENAME'] = self.ez_filename(ez) ez['EPOSTCARD'] = self.copc_epostcard(ez) ez['STYEAR'] = self.copc_styear(ez)",
"Calculates the PROGREV column. Note that TOTREV2 is taken from 990EZ part I,",
"like this, and usually any discrepencies between stated and calculated values are tested",
"ez['NETGNLS'] = self.ez_netgnls(ez) ez['FILENAME'] = self.ez_filename(ez) ez['EPOSTCARD'] = self.copc_epostcard(ez) ez['STYEAR'] = self.copc_styear(ez) ez['SOIYR']",
"ez_progrev(self, ez): \"\"\" Calculates the PROGREV column. ARGUMENTS ez (DataFrame) : Core file",
"Series \"\"\" return ez['SALEOTHN'] def ez_filename(self, ez): \"\"\" Assembles the FILENAME column from",
"= main.data_dict['EZ'] main.logger.info('Calculating new columns for EZ.') ez['TOTREV'] = self.ez_totrev(ez) ez['GRREC'] = self.ez_grrec(ez)",
"is: http://990s.foundationcenter.org/990_pdf_archive/<FIRST THREE DIGITS OF EIN>/<FULL EIN>/<FILENAME>.pdf for 990 Full or EZ filings,",
"file dataframe RETURNS Series \"\"\" return ez['SALEOTHN'] def ez_filename(self, ez): \"\"\" Assembles the",
"'16eofinextractez.dat' and entry['NAME'] == 'UNITED WAY OF THE COASTAL EMPIRE INC': self.main.data_dict['EZ'].drop('580623603', inplace=True)",
"self.main.data_dict['EZ'].loc['580623603'] if entry['SOURCE'] == '16eofinextractez.dat' and entry['NAME'] == 'UNITED WAY OF THE COASTAL",
"= self.ez_netgnls(ez) ez['FILENAME'] = self.ez_filename(ez) ez['EPOSTCARD'] = self.copc_epostcard(ez) ez['STYEAR'] = self.copc_styear(ez) ez['SOIYR'] =",
"\"\"\" return ez.index + '_' + ez['TAXPER'] + '_990EZ' def ez_manual(self): \"\"\" Applies",
"RETURNS Series \"\"\" assert(ez['GRSINCFNDRSNG'].dtype.type in [np.int64, np.float64]) assert(ez['GRSINCGAMING'].dtype.type in [np.int64, np.float64]) return ez['GRSINCFNDRSNG']",
"\"\"\" Assembles the FILENAME column from the EIN and TAXPER columns, which is",
"Calculates the PROGREV column. ARGUMENTS ez (DataFrame) : Core file dataframe RETURNS Series",
"main.logger.info('Calculating new columns for EZ.') ez['TOTREV'] = self.ez_totrev(ez) ez['GRREC'] = self.ez_grrec(ez) ez['PROGREV'] =",
"Calculates the GRREC column. Note that the same column has a different calculation",
"is the only column like this, and usually any discrepencies between stated and",
"ez['TOTREV'] = self.ez_totrev(ez) ez['GRREC'] = self.ez_grrec(ez) ez['PROGREV'] = self.ez_progrev(ez) ez['SPEVTG'] = self.ez_spevtg(ez) ez['NETGNLS']",
"2016-2017 def ez_dup_criteria(dups): dups['val'] = dups['TOTREV'].abs() + dups['ASS_EOY'].abs() + dups['EXPS'].abs() return dups, ['FISYR',",
"(DataFrame) : Core file dataframe RETURNS Series \"\"\" assert(ez['DUESASSESMNTS'].dtype.type in [np.int64, np.float64]) assert(ez['PRGMSERVREV'].dtype.type",
"Full or EZ filings, or http://990s.foundationcenter.org/990pf_pdf_archive/<FIRST THREE DIGITS OF EIN>/<FULL EIN>/<FILENAME>.pdf for 990",
"to the EZ data. This is usually defined as a change to a",
"990 Full or EZ filings, or http://990s.foundationcenter.org/990pf_pdf_archive/<FIRST THREE DIGITS OF EIN>/<FULL EIN>/<FILENAME>.pdf for",
"is usually defined as a change to a single EIN from a single",
"SQL process. ARGUMENTS ez (DataFrame) : Core file dataframe RETURNS Series \"\"\" return",
"+ ez['SALEOTHG'] + ez['DIREXP'] + ez['GOODS'] def ez_progrev(self, ez): \"\"\" Calculates the PROGREV",
"990EZ part I, 9, while TOTREV is calculated from the expense and income",
"OF EIN>/<FULL EIN>/<FILENAME>.pdf for 990 PF filings. ARGUMENTS ez (DataFrame) : Core file",
"This is the only column like this, and usually any discrepencies between stated",
"'_' + ez['TAXPER'] + '_990EZ' def ez_manual(self): \"\"\" Applies any manual, one-time fixes",
"dups, ['FISYR', 'val', 'STYEAR', 'rnd'] class ProcessEZ(ProcessCOPC): \"\"\" Creates columns found only in",
"was always done this way before, so it continues. ARGUMENTS ez (DataFrame) :",
"\"\"\" Calculates the SPEVTG column. ARGUMENTS ez (DataFrame) : Core file dataframe RETURNS",
"However, it was always done this way before, so it continues. ARGUMENTS ez",
"Base method for calling all of the methods to calculate the columns for",
"and calculated values are tested in the validation steps. However, it was always",
"Series \"\"\" return ez.index + '_' + ez['TAXPER'] + '_990EZ' def ez_manual(self): \"\"\"",
"the Full 990 and EINS from the 990 EZ. ARGUMENTS ez (DataFrame) :",
"a mistyped EIN in the raw IRS data. ARGUMENTS None RETURNS None \"\"\"",
"Creates columns found only in the EZ dataframe \"\"\" def ez_calculate(self): \"\"\" Base",
"def ez_dup_criteria(dups): dups['val'] = dups['TOTREV'].abs() + dups['ASS_EOY'].abs() + dups['EXPS'].abs() return dups, ['FISYR', 'val',",
"DIGITS OF EIN>/<FULL EIN>/<FILENAME>.pdf for 990 Full or EZ filings, or http://990s.foundationcenter.org/990pf_pdf_archive/<FIRST THREE",
"Code by <NAME> (<EMAIL>), 2016-2017 def ez_dup_criteria(dups): dups['val'] = dups['TOTREV'].abs() + dups['ASS_EOY'].abs() +",
"in the validation steps. However, it was always done this way before, so",
"in the raw IRS data. ARGUMENTS None RETURNS None \"\"\" try: entry =",
"\"\"\" Creates columns found only in the EZ dataframe \"\"\" def ez_calculate(self): \"\"\"",
"full construction is: http://990s.foundationcenter.org/990_pdf_archive/<FIRST THREE DIGITS OF EIN>/<FULL EIN>/<FILENAME>.pdf for 990 Full or",
"assert(ez['TOTREV'].dtype.type in [np.int64, np.float64]) assert(ez['SALEOTHG'].dtype.type in [np.int64, np.float64]) assert(ez['DIREXP'].dtype.type in [np.int64, np.float64]) assert(ez['GOODS'].dtype.type",
"== '16eofinextractez.dat' and entry['NAME'] == 'UNITED WAY OF THE COASTAL EMPIRE INC': self.main.data_dict['EZ'].drop('580623603',",
"ez['SUBCD'] = self.copc_subcd(ez) def ez_grrec(self, ez): \"\"\" Calculates the GRREC column. Note that",
"\"\"\" return ez['EXPS'] + ez['NETINC'] def ez_netgnls(self, ez): \"\"\" Returns the SALEOTHN column",
"[np.int64, np.float64]) assert(ez['DIREXP'].dtype.type in [np.int64, np.float64]) assert(ez['GOODS'].dtype.type in [np.int64, np.float64]) return ez['TOTREV'] +",
"and income subtotals. This is the only column like this, and usually any",
"main.data_dict['EZ'] main.logger.info('Calculating new columns for EZ.') ez['TOTREV'] = self.ez_totrev(ez) ez['GRREC'] = self.ez_grrec(ez) ez['PROGREV']",
"# Code by <NAME> (<EMAIL>), 2016-2017 def ez_dup_criteria(dups): dups['val'] = dups['TOTREV'].abs() + dups['ASS_EOY'].abs()",
"ez['SOIYR'] = self.copc_soiyr(ez) ez['SUBCD'] = self.copc_subcd(ez) def ez_grrec(self, ez): \"\"\" Calculates the GRREC",
"def ez_manual(self): \"\"\" Applies any manual, one-time fixes to the EZ data. This",
"ez['GRSINCFNDRSNG'] + ez['GRSINCGAMING'] def ez_totrev(self, ez): \"\"\" Calculates the PROGREV column. Note that",
"column. Note that the same column has a different calculation for EINs from",
"same column has a different calculation for EINs from the Full 990 and",
"\"\"\" Calculates the PROGREV column. ARGUMENTS ez (DataFrame) : Core file dataframe RETURNS",
"EZ filings, or http://990s.foundationcenter.org/990pf_pdf_archive/<FIRST THREE DIGITS OF EIN>/<FULL EIN>/<FILENAME>.pdf for 990 PF filings.",
"ez['EXPS'] + ez['NETINC'] def ez_netgnls(self, ez): \"\"\" Returns the SALEOTHN column exactly. Redundant",
"ez): \"\"\" Calculates the PROGREV column. ARGUMENTS ez (DataFrame) : Core file dataframe",
"\"\"\" Calculates the PROGREV column. Note that TOTREV2 is taken from 990EZ part",
"assert(ez['DUESASSESMNTS'].dtype.type in [np.int64, np.float64]) assert(ez['PRGMSERVREV'].dtype.type in [np.int64, np.float64]) return ez['DUESASSESMNTS'] + ez['PRGMSERVREV'] def",
"None RETURNS None \"\"\" main = self.main ez = main.data_dict['EZ'] main.logger.info('Calculating new columns",
"[np.int64, np.float64]) return ez['GRSINCFNDRSNG'] + ez['GRSINCGAMING'] def ez_totrev(self, ez): \"\"\" Calculates the PROGREV",
"one-time fixes to the EZ data. This is usually defined as a change",
"self.ez_netgnls(ez) ez['FILENAME'] = self.ez_filename(ez) ez['EPOSTCARD'] = self.copc_epostcard(ez) ez['STYEAR'] = self.copc_styear(ez) ez['SOIYR'] = self.copc_soiyr(ez)",
"def ez_spevtg(self, ez): \"\"\" Calculates the SPEVTG column. ARGUMENTS ez (DataFrame) : Core",
"single EIN from a single year, in a non-generalizable way, e.g. a mistyped",
"filings, or http://990s.foundationcenter.org/990pf_pdf_archive/<FIRST THREE DIGITS OF EIN>/<FULL EIN>/<FILENAME>.pdf for 990 PF filings. ARGUMENTS",
"'val', 'STYEAR', 'rnd'] class ProcessEZ(ProcessCOPC): \"\"\" Creates columns found only in the EZ",
"self.main ez = main.data_dict['EZ'] main.logger.info('Calculating new columns for EZ.') ez['TOTREV'] = self.ez_totrev(ez) ez['GRREC']",
"class ProcessEZ(ProcessCOPC): \"\"\" Creates columns found only in the EZ dataframe \"\"\" def",
"the expense and income subtotals. This is the only column like this, and",
"usually defined as a change to a single EIN from a single year,",
"= self.copc_epostcard(ez) ez['STYEAR'] = self.copc_styear(ez) ez['SOIYR'] = self.copc_soiyr(ez) ez['SUBCD'] = self.copc_subcd(ez) def ez_grrec(self,",
"self.ez_totrev(ez) ez['GRREC'] = self.ez_grrec(ez) ez['PROGREV'] = self.ez_progrev(ez) ez['SPEVTG'] = self.ez_spevtg(ez) ez['NETGNLS'] = self.ez_netgnls(ez)",
": Core file dataframe RETURNS Series \"\"\" assert(ez['TOTREV'].dtype.type in [np.int64, np.float64]) assert(ez['SALEOTHG'].dtype.type in",
"subtotals. This is the only column like this, and usually any discrepencies between",
"dataframe RETURNS Series \"\"\" return ez['SALEOTHN'] def ez_filename(self, ez): \"\"\" Assembles the FILENAME",
"Full 990 and EINS from the 990 EZ. ARGUMENTS ez (DataFrame) : Core",
"ez): \"\"\" Calculates the GRREC column. Note that the same column has a",
"EINs from the Full 990 and EINS from the 990 EZ. ARGUMENTS ez",
"dups['TOTREV'].abs() + dups['ASS_EOY'].abs() + dups['EXPS'].abs() return dups, ['FISYR', 'val', 'STYEAR', 'rnd'] class ProcessEZ(ProcessCOPC):",
"calling all of the methods to calculate the columns for the 990 EZ",
"+ ez['NETINC'] def ez_netgnls(self, ez): \"\"\" Returns the SALEOTHN column exactly. Redundant holdover",
"assert(ez['GOODS'].dtype.type in [np.int64, np.float64]) return ez['TOTREV'] + ez['SALEOTHG'] + ez['DIREXP'] + ez['GOODS'] def",
"np.float64]) return ez['TOTREV'] + ez['SALEOTHG'] + ez['DIREXP'] + ez['GOODS'] def ez_progrev(self, ez): \"\"\"",
"dataframe RETURNS Series \"\"\" return ez['EXPS'] + ez['NETINC'] def ez_netgnls(self, ez): \"\"\" Returns",
"columns for EZ.') ez['TOTREV'] = self.ez_totrev(ez) ez['GRREC'] = self.ez_grrec(ez) ez['PROGREV'] = self.ez_progrev(ez) ez['SPEVTG']",
": Core file dataframe RETURNS Series \"\"\" return ez.index + '_' + ez['TAXPER']",
"if entry['SOURCE'] == '16eofinextractez.dat' and entry['NAME'] == 'UNITED WAY OF THE COASTAL EMPIRE",
"\"\"\" return ez['SALEOTHN'] def ez_filename(self, ez): \"\"\" Assembles the FILENAME column from the",
"['FISYR', 'val', 'STYEAR', 'rnd'] class ProcessEZ(ProcessCOPC): \"\"\" Creates columns found only in the",
"data. This is usually defined as a change to a single EIN from",
"EIN>/<FULL EIN>/<FILENAME>.pdf for 990 Full or EZ filings, or http://990s.foundationcenter.org/990pf_pdf_archive/<FIRST THREE DIGITS OF",
"and usually any discrepencies between stated and calculated values are tested in the",
"the Foundation Center's website. The full construction is: http://990s.foundationcenter.org/990_pdf_archive/<FIRST THREE DIGITS OF EIN>/<FULL",
"ez (DataFrame) : Core file dataframe RETURNS Series \"\"\" assert(ez['DUESASSESMNTS'].dtype.type in [np.int64, np.float64])",
"for the 990 EZ form. ARGUMENTS None RETURNS None \"\"\" main = self.main",
"steps. However, it was always done this way before, so it continues. ARGUMENTS",
"9, while TOTREV is calculated from the expense and income subtotals. This is",
"Assembles the FILENAME column from the EIN and TAXPER columns, which is used",
"def ez_calculate(self): \"\"\" Base method for calling all of the methods to calculate",
"def ez_progrev(self, ez): \"\"\" Calculates the PROGREV column. ARGUMENTS ez (DataFrame) : Core",
"EIN>/<FILENAME>.pdf for 990 PF filings. ARGUMENTS ez (DataFrame) : Core file dataframe RETURNS",
"\"\"\" assert(ez['GRSINCFNDRSNG'].dtype.type in [np.int64, np.float64]) assert(ez['GRSINCGAMING'].dtype.type in [np.int64, np.float64]) return ez['GRSINCFNDRSNG'] + ez['GRSINCGAMING']",
"DIGITS OF EIN>/<FULL EIN>/<FILENAME>.pdf for 990 PF filings. ARGUMENTS ez (DataFrame) : Core",
"RETURNS None \"\"\" try: entry = self.main.data_dict['EZ'].loc['580623603'] if entry['SOURCE'] == '16eofinextractez.dat' and entry['NAME']",
"np.float64]) assert(ez['GOODS'].dtype.type in [np.int64, np.float64]) return ez['TOTREV'] + ez['SALEOTHG'] + ez['DIREXP'] + ez['GOODS']",
"ez (DataFrame) : Core file dataframe RETURNS Series \"\"\" return ez.index + '_'",
"dataframe RETURNS Series \"\"\" assert(ez['GRSINCFNDRSNG'].dtype.type in [np.int64, np.float64]) assert(ez['GRSINCGAMING'].dtype.type in [np.int64, np.float64]) return",
"in [np.int64, np.float64]) return ez['TOTREV'] + ez['SALEOTHG'] + ez['DIREXP'] + ez['GOODS'] def ez_progrev(self,",
"assert(ez['SALEOTHG'].dtype.type in [np.int64, np.float64]) assert(ez['DIREXP'].dtype.type in [np.int64, np.float64]) assert(ez['GOODS'].dtype.type in [np.int64, np.float64]) return",
"dups['val'] = dups['TOTREV'].abs() + dups['ASS_EOY'].abs() + dups['EXPS'].abs() return dups, ['FISYR', 'val', 'STYEAR', 'rnd']",
"ez_filename(self, ez): \"\"\" Assembles the FILENAME column from the EIN and TAXPER columns,",
"entry = self.main.data_dict['EZ'].loc['580623603'] if entry['SOURCE'] == '16eofinextractez.dat' and entry['NAME'] == 'UNITED WAY OF",
"file dataframe RETURNS Series \"\"\" assert(ez['GRSINCFNDRSNG'].dtype.type in [np.int64, np.float64]) assert(ez['GRSINCGAMING'].dtype.type in [np.int64, np.float64])",
"or EZ filings, or http://990s.foundationcenter.org/990pf_pdf_archive/<FIRST THREE DIGITS OF EIN>/<FULL EIN>/<FILENAME>.pdf for 990 PF",
"so it continues. ARGUMENTS ez (DataFrame) : Core file dataframe RETURNS Series \"\"\"",
"Core file dataframe RETURNS Series \"\"\" return ez['EXPS'] + ez['NETINC'] def ez_netgnls(self, ez):",
"is used to build the URL to the PDF of the 990 filing",
"IRS data. ARGUMENTS None RETURNS None \"\"\" try: entry = self.main.data_dict['EZ'].loc['580623603'] if entry['SOURCE']",
"\"\"\" Calculates the GRREC column. Note that the same column has a different",
"by <NAME> (<EMAIL>), 2016-2017 def ez_dup_criteria(dups): dups['val'] = dups['TOTREV'].abs() + dups['ASS_EOY'].abs() + dups['EXPS'].abs()",
"in [np.int64, np.float64]) assert(ez['SALEOTHG'].dtype.type in [np.int64, np.float64]) assert(ez['DIREXP'].dtype.type in [np.int64, np.float64]) assert(ez['GOODS'].dtype.type in",
"PROGREV column. ARGUMENTS ez (DataFrame) : Core file dataframe RETURNS Series \"\"\" assert(ez['DUESASSESMNTS'].dtype.type",
"from a single year, in a non-generalizable way, e.g. a mistyped EIN in",
"return ez['EXPS'] + ez['NETINC'] def ez_netgnls(self, ez): \"\"\" Returns the SALEOTHN column exactly.",
"from the old SQL process. ARGUMENTS ez (DataFrame) : Core file dataframe RETURNS",
"the PDF of the 990 filing on the Foundation Center's website. The full",
"ProcessEZ(ProcessCOPC): \"\"\" Creates columns found only in the EZ dataframe \"\"\" def ez_calculate(self):",
"PF filings. ARGUMENTS ez (DataFrame) : Core file dataframe RETURNS Series \"\"\" return",
"filing on the Foundation Center's website. The full construction is: http://990s.foundationcenter.org/990_pdf_archive/<FIRST THREE DIGITS",
"ez_spevtg(self, ez): \"\"\" Calculates the SPEVTG column. ARGUMENTS ez (DataFrame) : Core file",
"assert(ez['GRSINCFNDRSNG'].dtype.type in [np.int64, np.float64]) assert(ez['GRSINCGAMING'].dtype.type in [np.int64, np.float64]) return ez['GRSINCFNDRSNG'] + ez['GRSINCGAMING'] def",
"ez): \"\"\" Returns the SALEOTHN column exactly. Redundant holdover from the old SQL",
"<NAME> (<EMAIL>), 2016-2017 def ez_dup_criteria(dups): dups['val'] = dups['TOTREV'].abs() + dups['ASS_EOY'].abs() + dups['EXPS'].abs() return",
"ez['TAXPER'] + '_990EZ' def ez_manual(self): \"\"\" Applies any manual, one-time fixes to the",
"columns found only in the EZ dataframe \"\"\" def ez_calculate(self): \"\"\" Base method",
"Series \"\"\" assert(ez['GRSINCFNDRSNG'].dtype.type in [np.int64, np.float64]) assert(ez['GRSINCGAMING'].dtype.type in [np.int64, np.float64]) return ez['GRSINCFNDRSNG'] +",
"and entry['NAME'] == 'UNITED WAY OF THE COASTAL EMPIRE INC': self.main.data_dict['EZ'].drop('580623603', inplace=True) except",
"process. ARGUMENTS ez (DataFrame) : Core file dataframe RETURNS Series \"\"\" return ez['SALEOTHN']",
"the old SQL process. ARGUMENTS ez (DataFrame) : Core file dataframe RETURNS Series",
"this way before, so it continues. ARGUMENTS ez (DataFrame) : Core file dataframe",
"a change to a single EIN from a single year, in a non-generalizable",
"it was always done this way before, so it continues. ARGUMENTS ez (DataFrame)",
"Calculates the SPEVTG column. ARGUMENTS ez (DataFrame) : Core file dataframe RETURNS Series",
"(DataFrame) : Core file dataframe RETURNS Series \"\"\" assert(ez['GRSINCFNDRSNG'].dtype.type in [np.int64, np.float64]) assert(ez['GRSINCGAMING'].dtype.type",
"self.copc_subcd(ez) def ez_grrec(self, ez): \"\"\" Calculates the GRREC column. Note that the same",
"assert(ez['DIREXP'].dtype.type in [np.int64, np.float64]) assert(ez['GOODS'].dtype.type in [np.int64, np.float64]) return ez['TOTREV'] + ez['SALEOTHG'] +",
"EZ. ARGUMENTS ez (DataFrame) : Core file dataframe RETURNS Series \"\"\" assert(ez['TOTREV'].dtype.type in",
"ez['GRREC'] = self.ez_grrec(ez) ez['PROGREV'] = self.ez_progrev(ez) ez['SPEVTG'] = self.ez_spevtg(ez) ez['NETGNLS'] = self.ez_netgnls(ez) ez['FILENAME']",
"self.ez_filename(ez) ez['EPOSTCARD'] = self.copc_epostcard(ez) ez['STYEAR'] = self.copc_styear(ez) ez['SOIYR'] = self.copc_soiyr(ez) ez['SUBCD'] = self.copc_subcd(ez)",
"tested in the validation steps. However, it was always done this way before,",
"and TAXPER columns, which is used to build the URL to the PDF",
"columns for the 990 EZ form. ARGUMENTS None RETURNS None \"\"\" main =",
"EZ.') ez['TOTREV'] = self.ez_totrev(ez) ez['GRREC'] = self.ez_grrec(ez) ez['PROGREV'] = self.ez_progrev(ez) ez['SPEVTG'] = self.ez_spevtg(ez)",
"self.copc_styear(ez) ez['SOIYR'] = self.copc_soiyr(ez) ez['SUBCD'] = self.copc_subcd(ez) def ez_grrec(self, ez): \"\"\" Calculates the",
"the validation steps. However, it was always done this way before, so it",
"def ez_filename(self, ez): \"\"\" Assembles the FILENAME column from the EIN and TAXPER",
"in [np.int64, np.float64]) return ez['DUESASSESMNTS'] + ez['PRGMSERVREV'] def ez_spevtg(self, ez): \"\"\" Calculates the",
"np.float64]) assert(ez['GRSINCGAMING'].dtype.type in [np.int64, np.float64]) return ez['GRSINCFNDRSNG'] + ez['GRSINCGAMING'] def ez_totrev(self, ez): \"\"\"",
"that TOTREV2 is taken from 990EZ part I, 9, while TOTREV is calculated",
"done this way before, so it continues. ARGUMENTS ez (DataFrame) : Core file",
"filings. ARGUMENTS ez (DataFrame) : Core file dataframe RETURNS Series \"\"\" return ez.index",
"main = self.main ez = main.data_dict['EZ'] main.logger.info('Calculating new columns for EZ.') ez['TOTREV'] =",
"SPEVTG column. ARGUMENTS ez (DataFrame) : Core file dataframe RETURNS Series \"\"\" assert(ez['GRSINCFNDRSNG'].dtype.type",
"the 990 EZ. ARGUMENTS ez (DataFrame) : Core file dataframe RETURNS Series \"\"\"",
"self.copc_epostcard(ez) ez['STYEAR'] = self.copc_styear(ez) ez['SOIYR'] = self.copc_soiyr(ez) ez['SUBCD'] = self.copc_subcd(ez) def ez_grrec(self, ez):",
"only column like this, and usually any discrepencies between stated and calculated values",
": Core file dataframe RETURNS Series \"\"\" return ez['EXPS'] + ez['NETINC'] def ez_netgnls(self,",
"[np.int64, np.float64]) assert(ez['GRSINCGAMING'].dtype.type in [np.int64, np.float64]) return ez['GRSINCFNDRSNG'] + ez['GRSINCGAMING'] def ez_totrev(self, ez):",
"ez_grrec(self, ez): \"\"\" Calculates the GRREC column. Note that the same column has",
"\"\"\" Applies any manual, one-time fixes to the EZ data. This is usually",
"+ dups['EXPS'].abs() return dups, ['FISYR', 'val', 'STYEAR', 'rnd'] class ProcessEZ(ProcessCOPC): \"\"\" Creates columns",
"ez['SALEOTHG'] + ez['DIREXP'] + ez['GOODS'] def ez_progrev(self, ez): \"\"\" Calculates the PROGREV column.",
"validation steps. However, it was always done this way before, so it continues.",
"Core file dataframe RETURNS Series \"\"\" return ez.index + '_' + ez['TAXPER'] +",
"= self.copc_subcd(ez) def ez_grrec(self, ez): \"\"\" Calculates the GRREC column. Note that the",
"the 990 filing on the Foundation Center's website. The full construction is: http://990s.foundationcenter.org/990_pdf_archive/<FIRST",
"+ ez['GRSINCGAMING'] def ez_totrev(self, ez): \"\"\" Calculates the PROGREV column. Note that TOTREV2",
"I, 9, while TOTREV is calculated from the expense and income subtotals. This",
"+ ez['TAXPER'] + '_990EZ' def ez_manual(self): \"\"\" Applies any manual, one-time fixes to",
"ez): \"\"\" Calculates the PROGREV column. Note that TOTREV2 is taken from 990EZ",
"file dataframe RETURNS Series \"\"\" return ez.index + '_' + ez['TAXPER'] + '_990EZ'",
"THREE DIGITS OF EIN>/<FULL EIN>/<FILENAME>.pdf for 990 PF filings. ARGUMENTS ez (DataFrame) :",
"Series \"\"\" assert(ez['TOTREV'].dtype.type in [np.int64, np.float64]) assert(ez['SALEOTHG'].dtype.type in [np.int64, np.float64]) assert(ez['DIREXP'].dtype.type in [np.int64,",
"= self.ez_spevtg(ez) ez['NETGNLS'] = self.ez_netgnls(ez) ez['FILENAME'] = self.ez_filename(ez) ez['EPOSTCARD'] = self.copc_epostcard(ez) ez['STYEAR'] =",
"ez_totrev(self, ez): \"\"\" Calculates the PROGREV column. Note that TOTREV2 is taken from",
"the SALEOTHN column exactly. Redundant holdover from the old SQL process. ARGUMENTS ez",
"990 filing on the Foundation Center's website. The full construction is: http://990s.foundationcenter.org/990_pdf_archive/<FIRST THREE",
"np.float64]) assert(ez['DIREXP'].dtype.type in [np.int64, np.float64]) assert(ez['GOODS'].dtype.type in [np.int64, np.float64]) return ez['TOTREV'] + ez['SALEOTHG']",
"ez['DIREXP'] + ez['GOODS'] def ez_progrev(self, ez): \"\"\" Calculates the PROGREV column. ARGUMENTS ez",
"ez): \"\"\" Calculates the SPEVTG column. ARGUMENTS ez (DataFrame) : Core file dataframe",
"EIN>/<FULL EIN>/<FILENAME>.pdf for 990 PF filings. ARGUMENTS ez (DataFrame) : Core file dataframe",
"as a change to a single EIN from a single year, in a",
"FILENAME column from the EIN and TAXPER columns, which is used to build",
"This is usually defined as a change to a single EIN from a",
"ez['STYEAR'] = self.copc_styear(ez) ez['SOIYR'] = self.copc_soiyr(ez) ez['SUBCD'] = self.copc_subcd(ez) def ez_grrec(self, ez): \"\"\"",
"the PROGREV column. ARGUMENTS ez (DataFrame) : Core file dataframe RETURNS Series \"\"\"",
"fixes to the EZ data. This is usually defined as a change to",
"Core file dataframe RETURNS Series \"\"\" assert(ez['GRSINCFNDRSNG'].dtype.type in [np.int64, np.float64]) assert(ez['GRSINCGAMING'].dtype.type in [np.int64,",
"TOTREV2 is taken from 990EZ part I, 9, while TOTREV is calculated from",
"RETURNS Series \"\"\" return ez['EXPS'] + ez['NETINC'] def ez_netgnls(self, ez): \"\"\" Returns the",
"(DataFrame) : Core file dataframe RETURNS Series \"\"\" return ez.index + '_' +",
"old SQL process. ARGUMENTS ez (DataFrame) : Core file dataframe RETURNS Series \"\"\"",
"ez['PRGMSERVREV'] def ez_spevtg(self, ez): \"\"\" Calculates the SPEVTG column. ARGUMENTS ez (DataFrame) :",
"to calculate the columns for the 990 EZ form. ARGUMENTS None RETURNS None",
"EIN in the raw IRS data. ARGUMENTS None RETURNS None \"\"\" try: entry",
"the EZ data. This is usually defined as a change to a single",
"990 EZ. ARGUMENTS ez (DataFrame) : Core file dataframe RETURNS Series \"\"\" assert(ez['TOTREV'].dtype.type",
"ez_dup_criteria(dups): dups['val'] = dups['TOTREV'].abs() + dups['ASS_EOY'].abs() + dups['EXPS'].abs() return dups, ['FISYR', 'val', 'STYEAR',",
"ez.index + '_' + ez['TAXPER'] + '_990EZ' def ez_manual(self): \"\"\" Applies any manual,",
"[np.int64, np.float64]) assert(ez['GOODS'].dtype.type in [np.int64, np.float64]) return ez['TOTREV'] + ez['SALEOTHG'] + ez['DIREXP'] +",
"self.copc_soiyr(ez) ez['SUBCD'] = self.copc_subcd(ez) def ez_grrec(self, ez): \"\"\" Calculates the GRREC column. Note",
"def ez_netgnls(self, ez): \"\"\" Returns the SALEOTHN column exactly. Redundant holdover from the",
"EZ data. This is usually defined as a change to a single EIN",
"a different calculation for EINs from the Full 990 and EINS from the",
"the SPEVTG column. ARGUMENTS ez (DataFrame) : Core file dataframe RETURNS Series \"\"\"",
"Redundant holdover from the old SQL process. ARGUMENTS ez (DataFrame) : Core file",
"990 PF filings. ARGUMENTS ez (DataFrame) : Core file dataframe RETURNS Series \"\"\"",
"ez (DataFrame) : Core file dataframe RETURNS Series \"\"\" assert(ez['GRSINCFNDRSNG'].dtype.type in [np.int64, np.float64])",
"Core file dataframe RETURNS Series \"\"\" assert(ez['TOTREV'].dtype.type in [np.int64, np.float64]) assert(ez['SALEOTHG'].dtype.type in [np.int64,",
"990 and EINS from the 990 EZ. ARGUMENTS ez (DataFrame) : Core file",
"Returns the SALEOTHN column exactly. Redundant holdover from the old SQL process. ARGUMENTS",
"\"\"\" def ez_calculate(self): \"\"\" Base method for calling all of the methods to",
"TOTREV is calculated from the expense and income subtotals. This is the only",
"ARGUMENTS ez (DataFrame) : Core file dataframe RETURNS Series \"\"\" assert(ez['TOTREV'].dtype.type in [np.int64,",
"[np.int64, np.float64]) assert(ez['SALEOTHG'].dtype.type in [np.int64, np.float64]) assert(ez['DIREXP'].dtype.type in [np.int64, np.float64]) assert(ez['GOODS'].dtype.type in [np.int64,",
"(<EMAIL>), 2016-2017 def ez_dup_criteria(dups): dups['val'] = dups['TOTREV'].abs() + dups['ASS_EOY'].abs() + dups['EXPS'].abs() return dups,",
"the columns for the 990 EZ form. ARGUMENTS None RETURNS None \"\"\" main",
"new columns for EZ.') ez['TOTREV'] = self.ez_totrev(ez) ez['GRREC'] = self.ez_grrec(ez) ez['PROGREV'] = self.ez_progrev(ez)",
"np.float64]) assert(ez['PRGMSERVREV'].dtype.type in [np.int64, np.float64]) return ez['DUESASSESMNTS'] + ez['PRGMSERVREV'] def ez_spevtg(self, ez): \"\"\"",
"self.ez_grrec(ez) ez['PROGREV'] = self.ez_progrev(ez) ez['SPEVTG'] = self.ez_spevtg(ez) ez['NETGNLS'] = self.ez_netgnls(ez) ez['FILENAME'] = self.ez_filename(ez)",
"return ez['TOTREV'] + ez['SALEOTHG'] + ez['DIREXP'] + ez['GOODS'] def ez_progrev(self, ez): \"\"\" Calculates",
"dataframe RETURNS Series \"\"\" return ez.index + '_' + ez['TAXPER'] + '_990EZ' def",
"= self.ez_progrev(ez) ez['SPEVTG'] = self.ez_spevtg(ez) ez['NETGNLS'] = self.ez_netgnls(ez) ez['FILENAME'] = self.ez_filename(ez) ez['EPOSTCARD'] =",
"ez['DUESASSESMNTS'] + ez['PRGMSERVREV'] def ez_spevtg(self, ez): \"\"\" Calculates the SPEVTG column. ARGUMENTS ez",
"PDF of the 990 filing on the Foundation Center's website. The full construction",
"column. ARGUMENTS ez (DataFrame) : Core file dataframe RETURNS Series \"\"\" assert(ez['DUESASSESMNTS'].dtype.type in",
"[np.int64, np.float64]) assert(ez['PRGMSERVREV'].dtype.type in [np.int64, np.float64]) return ez['DUESASSESMNTS'] + ez['PRGMSERVREV'] def ez_spevtg(self, ez):",
"return ez['DUESASSESMNTS'] + ez['PRGMSERVREV'] def ez_spevtg(self, ez): \"\"\" Calculates the SPEVTG column. ARGUMENTS",
"which is used to build the URL to the PDF of the 990",
": Core file dataframe RETURNS Series \"\"\" assert(ez['DUESASSESMNTS'].dtype.type in [np.int64, np.float64]) assert(ez['PRGMSERVREV'].dtype.type in",
"values are tested in the validation steps. However, it was always done this",
"self.ez_progrev(ez) ez['SPEVTG'] = self.ez_spevtg(ez) ez['NETGNLS'] = self.ez_netgnls(ez) ez['FILENAME'] = self.ez_filename(ez) ez['EPOSTCARD'] = self.copc_epostcard(ez)",
"dups['EXPS'].abs() return dups, ['FISYR', 'val', 'STYEAR', 'rnd'] class ProcessEZ(ProcessCOPC): \"\"\" Creates columns found",
"any discrepencies between stated and calculated values are tested in the validation steps.",
"try: entry = self.main.data_dict['EZ'].loc['580623603'] if entry['SOURCE'] == '16eofinextractez.dat' and entry['NAME'] == 'UNITED WAY",
"= self.main.data_dict['EZ'].loc['580623603'] if entry['SOURCE'] == '16eofinextractez.dat' and entry['NAME'] == 'UNITED WAY OF THE",
"* import logging # Code by <NAME> (<EMAIL>), 2016-2017 def ez_dup_criteria(dups): dups['val'] =",
"RETURNS Series \"\"\" assert(ez['DUESASSESMNTS'].dtype.type in [np.int64, np.float64]) assert(ez['PRGMSERVREV'].dtype.type in [np.int64, np.float64]) return ez['DUESASSESMNTS']",
"logging # Code by <NAME> (<EMAIL>), 2016-2017 def ez_dup_criteria(dups): dups['val'] = dups['TOTREV'].abs() +",
"ARGUMENTS None RETURNS None \"\"\" try: entry = self.main.data_dict['EZ'].loc['580623603'] if entry['SOURCE'] == '16eofinextractez.dat'",
"from process_co_pc import * import logging # Code by <NAME> (<EMAIL>), 2016-2017 def",
"990 EZ form. ARGUMENTS None RETURNS None \"\"\" main = self.main ez =",
"the EIN and TAXPER columns, which is used to build the URL to",
"file dataframe RETURNS Series \"\"\" assert(ez['TOTREV'].dtype.type in [np.int64, np.float64]) assert(ez['SALEOTHG'].dtype.type in [np.int64, np.float64])",
"ez['GRSINCGAMING'] def ez_totrev(self, ez): \"\"\" Calculates the PROGREV column. Note that TOTREV2 is",
"return ez['GRSINCFNDRSNG'] + ez['GRSINCGAMING'] def ez_totrev(self, ez): \"\"\" Calculates the PROGREV column. Note",
"\"\"\" try: entry = self.main.data_dict['EZ'].loc['580623603'] if entry['SOURCE'] == '16eofinextractez.dat' and entry['NAME'] == 'UNITED",
"it continues. ARGUMENTS ez (DataFrame) : Core file dataframe RETURNS Series \"\"\" return",
"file dataframe RETURNS Series \"\"\" return ez['EXPS'] + ez['NETINC'] def ez_netgnls(self, ez): \"\"\"",
"Series \"\"\" assert(ez['DUESASSESMNTS'].dtype.type in [np.int64, np.float64]) assert(ez['PRGMSERVREV'].dtype.type in [np.int64, np.float64]) return ez['DUESASSESMNTS'] +",
"entry['NAME'] == 'UNITED WAY OF THE COASTAL EMPIRE INC': self.main.data_dict['EZ'].drop('580623603', inplace=True) except KeyError:",
"and EINS from the 990 EZ. ARGUMENTS ez (DataFrame) : Core file dataframe",
"for 990 Full or EZ filings, or http://990s.foundationcenter.org/990pf_pdf_archive/<FIRST THREE DIGITS OF EIN>/<FULL EIN>/<FILENAME>.pdf",
"def ez_totrev(self, ez): \"\"\" Calculates the PROGREV column. Note that TOTREV2 is taken",
"income subtotals. This is the only column like this, and usually any discrepencies",
"column. Note that TOTREV2 is taken from 990EZ part I, 9, while TOTREV",
"EZ dataframe \"\"\" def ez_calculate(self): \"\"\" Base method for calling all of the",
"only in the EZ dataframe \"\"\" def ez_calculate(self): \"\"\" Base method for calling",
"EIN and TAXPER columns, which is used to build the URL to the",
"mistyped EIN in the raw IRS data. ARGUMENTS None RETURNS None \"\"\" try:",
"methods to calculate the columns for the 990 EZ form. ARGUMENTS None RETURNS",
"Note that the same column has a different calculation for EINs from the",
"\"\"\" assert(ez['DUESASSESMNTS'].dtype.type in [np.int64, np.float64]) assert(ez['PRGMSERVREV'].dtype.type in [np.int64, np.float64]) return ez['DUESASSESMNTS'] + ez['PRGMSERVREV']",
"expense and income subtotals. This is the only column like this, and usually",
"before, so it continues. ARGUMENTS ez (DataFrame) : Core file dataframe RETURNS Series",
"column has a different calculation for EINs from the Full 990 and EINS",
"Note that TOTREV2 is taken from 990EZ part I, 9, while TOTREV is",
"URL to the PDF of the 990 filing on the Foundation Center's website.",
"that the same column has a different calculation for EINs from the Full",
"return ez.index + '_' + ez['TAXPER'] + '_990EZ' def ez_manual(self): \"\"\" Applies any",
"all of the methods to calculate the columns for the 990 EZ form.",
"dataframe RETURNS Series \"\"\" assert(ez['DUESASSESMNTS'].dtype.type in [np.int64, np.float64]) assert(ez['PRGMSERVREV'].dtype.type in [np.int64, np.float64]) return",
"ez (DataFrame) : Core file dataframe RETURNS Series \"\"\" assert(ez['TOTREV'].dtype.type in [np.int64, np.float64])",
"ez['SPEVTG'] = self.ez_spevtg(ez) ez['NETGNLS'] = self.ez_netgnls(ez) ez['FILENAME'] = self.ez_filename(ez) ez['EPOSTCARD'] = self.copc_epostcard(ez) ez['STYEAR']",
"\"\"\" Base method for calling all of the methods to calculate the columns",
"the EZ dataframe \"\"\" def ez_calculate(self): \"\"\" Base method for calling all of",
"Center's website. The full construction is: http://990s.foundationcenter.org/990_pdf_archive/<FIRST THREE DIGITS OF EIN>/<FULL EIN>/<FILENAME>.pdf for",
"+ dups['ASS_EOY'].abs() + dups['EXPS'].abs() return dups, ['FISYR', 'val', 'STYEAR', 'rnd'] class ProcessEZ(ProcessCOPC): \"\"\"",
"exactly. Redundant holdover from the old SQL process. ARGUMENTS ez (DataFrame) : Core",
"stated and calculated values are tested in the validation steps. However, it was",
"for 990 PF filings. ARGUMENTS ez (DataFrame) : Core file dataframe RETURNS Series",
"manual, one-time fixes to the EZ data. This is usually defined as a",
"dups['ASS_EOY'].abs() + dups['EXPS'].abs() return dups, ['FISYR', 'val', 'STYEAR', 'rnd'] class ProcessEZ(ProcessCOPC): \"\"\" Creates",
"column from the EIN and TAXPER columns, which is used to build the",
"holdover from the old SQL process. ARGUMENTS ez (DataFrame) : Core file dataframe",
"columns, which is used to build the URL to the PDF of the",
": Core file dataframe RETURNS Series \"\"\" return ez['SALEOTHN'] def ez_filename(self, ez): \"\"\"",
"file dataframe RETURNS Series \"\"\" assert(ez['DUESASSESMNTS'].dtype.type in [np.int64, np.float64]) assert(ez['PRGMSERVREV'].dtype.type in [np.int64, np.float64])",
"np.float64]) assert(ez['SALEOTHG'].dtype.type in [np.int64, np.float64]) assert(ez['DIREXP'].dtype.type in [np.int64, np.float64]) assert(ez['GOODS'].dtype.type in [np.int64, np.float64])",
"in [np.int64, np.float64]) return ez['GRSINCFNDRSNG'] + ez['GRSINCGAMING'] def ez_totrev(self, ez): \"\"\" Calculates the",
"ez['TOTREV'] + ez['SALEOTHG'] + ez['DIREXP'] + ez['GOODS'] def ez_progrev(self, ez): \"\"\" Calculates the",
"return ez['SALEOTHN'] def ez_filename(self, ez): \"\"\" Assembles the FILENAME column from the EIN",
"to the PDF of the 990 filing on the Foundation Center's website. The",
"Core file dataframe RETURNS Series \"\"\" assert(ez['DUESASSESMNTS'].dtype.type in [np.int64, np.float64]) assert(ez['PRGMSERVREV'].dtype.type in [np.int64,",
"different calculation for EINs from the Full 990 and EINS from the 990",
"for calling all of the methods to calculate the columns for the 990",
"\"\"\" assert(ez['TOTREV'].dtype.type in [np.int64, np.float64]) assert(ez['SALEOTHG'].dtype.type in [np.int64, np.float64]) assert(ez['DIREXP'].dtype.type in [np.int64, np.float64])",
"RETURNS Series \"\"\" return ez.index + '_' + ez['TAXPER'] + '_990EZ' def ez_manual(self):",
"or http://990s.foundationcenter.org/990pf_pdf_archive/<FIRST THREE DIGITS OF EIN>/<FULL EIN>/<FILENAME>.pdf for 990 PF filings. ARGUMENTS ez",
"ez): \"\"\" Assembles the FILENAME column from the EIN and TAXPER columns, which",
"the only column like this, and usually any discrepencies between stated and calculated",
"ez_calculate(self): \"\"\" Base method for calling all of the methods to calculate the",
"the PROGREV column. Note that TOTREV2 is taken from 990EZ part I, 9,",
"the methods to calculate the columns for the 990 EZ form. ARGUMENTS None",
"ez['PROGREV'] = self.ez_progrev(ez) ez['SPEVTG'] = self.ez_spevtg(ez) ez['NETGNLS'] = self.ez_netgnls(ez) ez['FILENAME'] = self.ez_filename(ez) ez['EPOSTCARD']",
"between stated and calculated values are tested in the validation steps. However, it",
"build the URL to the PDF of the 990 filing on the Foundation",
"http://990s.foundationcenter.org/990pf_pdf_archive/<FIRST THREE DIGITS OF EIN>/<FULL EIN>/<FILENAME>.pdf for 990 PF filings. ARGUMENTS ez (DataFrame)",
"None RETURNS None \"\"\" try: entry = self.main.data_dict['EZ'].loc['580623603'] if entry['SOURCE'] == '16eofinextractez.dat' and",
"OF EIN>/<FULL EIN>/<FILENAME>.pdf for 990 Full or EZ filings, or http://990s.foundationcenter.org/990pf_pdf_archive/<FIRST THREE DIGITS",
"= self.main ez = main.data_dict['EZ'] main.logger.info('Calculating new columns for EZ.') ez['TOTREV'] = self.ez_totrev(ez)",
"the raw IRS data. ARGUMENTS None RETURNS None \"\"\" try: entry = self.main.data_dict['EZ'].loc['580623603']",
"None \"\"\" try: entry = self.main.data_dict['EZ'].loc['580623603'] if entry['SOURCE'] == '16eofinextractez.dat' and entry['NAME'] ==",
"single year, in a non-generalizable way, e.g. a mistyped EIN in the raw",
"process_co_pc import * import logging # Code by <NAME> (<EMAIL>), 2016-2017 def ez_dup_criteria(dups):",
"are tested in the validation steps. However, it was always done this way",
"def ez_grrec(self, ez): \"\"\" Calculates the GRREC column. Note that the same column",
"[np.int64, np.float64]) return ez['DUESASSESMNTS'] + ez['PRGMSERVREV'] def ez_spevtg(self, ez): \"\"\" Calculates the SPEVTG",
"part I, 9, while TOTREV is calculated from the expense and income subtotals.",
"from the expense and income subtotals. This is the only column like this,",
"the 990 EZ form. ARGUMENTS None RETURNS None \"\"\" main = self.main ez",
"data. ARGUMENTS None RETURNS None \"\"\" try: entry = self.main.data_dict['EZ'].loc['580623603'] if entry['SOURCE'] ==",
"always done this way before, so it continues. ARGUMENTS ez (DataFrame) : Core",
"ARGUMENTS ez (DataFrame) : Core file dataframe RETURNS Series \"\"\" return ez['EXPS'] +",
"ez = main.data_dict['EZ'] main.logger.info('Calculating new columns for EZ.') ez['TOTREV'] = self.ez_totrev(ez) ez['GRREC'] =",
"a single year, in a non-generalizable way, e.g. a mistyped EIN in the",
"import * import logging # Code by <NAME> (<EMAIL>), 2016-2017 def ez_dup_criteria(dups): dups['val']",
"for EINs from the Full 990 and EINS from the 990 EZ. ARGUMENTS",
"assert(ez['PRGMSERVREV'].dtype.type in [np.int64, np.float64]) return ez['DUESASSESMNTS'] + ez['PRGMSERVREV'] def ez_spevtg(self, ez): \"\"\" Calculates",
"the same column has a different calculation for EINs from the Full 990",
"Foundation Center's website. The full construction is: http://990s.foundationcenter.org/990_pdf_archive/<FIRST THREE DIGITS OF EIN>/<FULL EIN>/<FILENAME>.pdf",
"to a single EIN from a single year, in a non-generalizable way, e.g.",
"calculated values are tested in the validation steps. However, it was always done",
"= self.copc_styear(ez) ez['SOIYR'] = self.copc_soiyr(ez) ez['SUBCD'] = self.copc_subcd(ez) def ez_grrec(self, ez): \"\"\" Calculates",
"ez (DataFrame) : Core file dataframe RETURNS Series \"\"\" return ez['SALEOTHN'] def ez_filename(self,",
"is taken from 990EZ part I, 9, while TOTREV is calculated from the",
"form. ARGUMENTS None RETURNS None \"\"\" main = self.main ez = main.data_dict['EZ'] main.logger.info('Calculating",
"RETURNS Series \"\"\" assert(ez['TOTREV'].dtype.type in [np.int64, np.float64]) assert(ez['SALEOTHG'].dtype.type in [np.int64, np.float64]) assert(ez['DIREXP'].dtype.type in",
"ez_netgnls(self, ez): \"\"\" Returns the SALEOTHN column exactly. Redundant holdover from the old",
"= self.ez_totrev(ez) ez['GRREC'] = self.ez_grrec(ez) ez['PROGREV'] = self.ez_progrev(ez) ez['SPEVTG'] = self.ez_spevtg(ez) ez['NETGNLS'] =",
"import logging # Code by <NAME> (<EMAIL>), 2016-2017 def ez_dup_criteria(dups): dups['val'] = dups['TOTREV'].abs()",
"calculate the columns for the 990 EZ form. ARGUMENTS None RETURNS None \"\"\"",
"TAXPER columns, which is used to build the URL to the PDF of",
"calculation for EINs from the Full 990 and EINS from the 990 EZ.",
"ez_manual(self): \"\"\" Applies any manual, one-time fixes to the EZ data. This is",
"= self.ez_grrec(ez) ez['PROGREV'] = self.ez_progrev(ez) ez['SPEVTG'] = self.ez_spevtg(ez) ez['NETGNLS'] = self.ez_netgnls(ez) ez['FILENAME'] =",
"of the methods to calculate the columns for the 990 EZ form. ARGUMENTS",
"column. ARGUMENTS ez (DataFrame) : Core file dataframe RETURNS Series \"\"\" assert(ez['GRSINCFNDRSNG'].dtype.type in",
"return dups, ['FISYR', 'val', 'STYEAR', 'rnd'] class ProcessEZ(ProcessCOPC): \"\"\" Creates columns found only",
"is calculated from the expense and income subtotals. This is the only column",
"assert(ez['GRSINCGAMING'].dtype.type in [np.int64, np.float64]) return ez['GRSINCFNDRSNG'] + ez['GRSINCGAMING'] def ez_totrev(self, ez): \"\"\" Calculates",
"np.float64]) return ez['DUESASSESMNTS'] + ez['PRGMSERVREV'] def ez_spevtg(self, ez): \"\"\" Calculates the SPEVTG column.",
"ez['NETINC'] def ez_netgnls(self, ez): \"\"\" Returns the SALEOTHN column exactly. Redundant holdover from",
"+ ez['GOODS'] def ez_progrev(self, ez): \"\"\" Calculates the PROGREV column. ARGUMENTS ez (DataFrame)",
"SALEOTHN column exactly. Redundant holdover from the old SQL process. ARGUMENTS ez (DataFrame)",
"np.float64]) return ez['GRSINCFNDRSNG'] + ez['GRSINCGAMING'] def ez_totrev(self, ez): \"\"\" Calculates the PROGREV column.",
"= self.ez_filename(ez) ez['EPOSTCARD'] = self.copc_epostcard(ez) ez['STYEAR'] = self.copc_styear(ez) ez['SOIYR'] = self.copc_soiyr(ez) ez['SUBCD'] =",
"usually any discrepencies between stated and calculated values are tested in the validation",
"ez['SALEOTHN'] def ez_filename(self, ez): \"\"\" Assembles the FILENAME column from the EIN and",
"on the Foundation Center's website. The full construction is: http://990s.foundationcenter.org/990_pdf_archive/<FIRST THREE DIGITS OF",
"Series \"\"\" return ez['EXPS'] + ez['NETINC'] def ez_netgnls(self, ez): \"\"\" Returns the SALEOTHN",
"ARGUMENTS ez (DataFrame) : Core file dataframe RETURNS Series \"\"\" return ez['SALEOTHN'] def",
"ARGUMENTS ez (DataFrame) : Core file dataframe RETURNS Series \"\"\" assert(ez['DUESASSESMNTS'].dtype.type in [np.int64,",
"in [np.int64, np.float64]) assert(ez['GOODS'].dtype.type in [np.int64, np.float64]) return ez['TOTREV'] + ez['SALEOTHG'] + ez['DIREXP']",
"= self.copc_soiyr(ez) ez['SUBCD'] = self.copc_subcd(ez) def ez_grrec(self, ez): \"\"\" Calculates the GRREC column.",
"RETURNS Series \"\"\" return ez['SALEOTHN'] def ez_filename(self, ez): \"\"\" Assembles the FILENAME column",
"discrepencies between stated and calculated values are tested in the validation steps. However,",
"in [np.int64, np.float64]) assert(ez['PRGMSERVREV'].dtype.type in [np.int64, np.float64]) return ez['DUESASSESMNTS'] + ez['PRGMSERVREV'] def ez_spevtg(self,",
"the GRREC column. Note that the same column has a different calculation for",
"The full construction is: http://990s.foundationcenter.org/990_pdf_archive/<FIRST THREE DIGITS OF EIN>/<FULL EIN>/<FILENAME>.pdf for 990 Full",
"'STYEAR', 'rnd'] class ProcessEZ(ProcessCOPC): \"\"\" Creates columns found only in the EZ dataframe",
"None \"\"\" main = self.main ez = main.data_dict['EZ'] main.logger.info('Calculating new columns for EZ.')",
"used to build the URL to the PDF of the 990 filing on",
"[np.int64, np.float64]) return ez['TOTREV'] + ez['SALEOTHG'] + ez['DIREXP'] + ez['GOODS'] def ez_progrev(self, ez):",
"from 990EZ part I, 9, while TOTREV is calculated from the expense and",
"change to a single EIN from a single year, in a non-generalizable way,",
"for EZ.') ez['TOTREV'] = self.ez_totrev(ez) ez['GRREC'] = self.ez_grrec(ez) ez['PROGREV'] = self.ez_progrev(ez) ez['SPEVTG'] =",
"EINS from the 990 EZ. ARGUMENTS ez (DataFrame) : Core file dataframe RETURNS",
"(DataFrame) : Core file dataframe RETURNS Series \"\"\" return ez['SALEOTHN'] def ez_filename(self, ez):",
"ARGUMENTS ez (DataFrame) : Core file dataframe RETURNS Series \"\"\" return ez.index +",
"raw IRS data. ARGUMENTS None RETURNS None \"\"\" try: entry = self.main.data_dict['EZ'].loc['580623603'] if",
"found only in the EZ dataframe \"\"\" def ez_calculate(self): \"\"\" Base method for",
"in the EZ dataframe \"\"\" def ez_calculate(self): \"\"\" Base method for calling all",
"+ '_990EZ' def ez_manual(self): \"\"\" Applies any manual, one-time fixes to the EZ",
"ARGUMENTS None RETURNS None \"\"\" main = self.main ez = main.data_dict['EZ'] main.logger.info('Calculating new",
"construction is: http://990s.foundationcenter.org/990_pdf_archive/<FIRST THREE DIGITS OF EIN>/<FULL EIN>/<FILENAME>.pdf for 990 Full or EZ",
"PROGREV column. Note that TOTREV2 is taken from 990EZ part I, 9, while",
"while TOTREV is calculated from the expense and income subtotals. This is the",
"EIN>/<FILENAME>.pdf for 990 Full or EZ filings, or http://990s.foundationcenter.org/990pf_pdf_archive/<FIRST THREE DIGITS OF EIN>/<FULL",
"== 'UNITED WAY OF THE COASTAL EMPIRE INC': self.main.data_dict['EZ'].drop('580623603', inplace=True) except KeyError: pass",
"RETURNS None \"\"\" main = self.main ez = main.data_dict['EZ'] main.logger.info('Calculating new columns for",
"+ '_' + ez['TAXPER'] + '_990EZ' def ez_manual(self): \"\"\" Applies any manual, one-time",
"in [np.int64, np.float64]) assert(ez['GRSINCGAMING'].dtype.type in [np.int64, np.float64]) return ez['GRSINCFNDRSNG'] + ez['GRSINCGAMING'] def ez_totrev(self,",
"in [np.int64, np.float64]) assert(ez['DIREXP'].dtype.type in [np.int64, np.float64]) assert(ez['GOODS'].dtype.type in [np.int64, np.float64]) return ez['TOTREV']",
"(DataFrame) : Core file dataframe RETURNS Series \"\"\" return ez['EXPS'] + ez['NETINC'] def",
"year, in a non-generalizable way, e.g. a mistyped EIN in the raw IRS",
"from the EIN and TAXPER columns, which is used to build the URL",
"way before, so it continues. ARGUMENTS ez (DataFrame) : Core file dataframe RETURNS",
"continues. ARGUMENTS ez (DataFrame) : Core file dataframe RETURNS Series \"\"\" return ez['EXPS']",
"any manual, one-time fixes to the EZ data. This is usually defined as",
"THREE DIGITS OF EIN>/<FULL EIN>/<FILENAME>.pdf for 990 Full or EZ filings, or http://990s.foundationcenter.org/990pf_pdf_archive/<FIRST",
"ez['GOODS'] def ez_progrev(self, ez): \"\"\" Calculates the PROGREV column. ARGUMENTS ez (DataFrame) :",
"GRREC column. Note that the same column has a different calculation for EINs",
"dataframe RETURNS Series \"\"\" assert(ez['TOTREV'].dtype.type in [np.int64, np.float64]) assert(ez['SALEOTHG'].dtype.type in [np.int64, np.float64]) assert(ez['DIREXP'].dtype.type",
"e.g. a mistyped EIN in the raw IRS data. ARGUMENTS None RETURNS None",
"way, e.g. a mistyped EIN in the raw IRS data. ARGUMENTS None RETURNS",
"ARGUMENTS ez (DataFrame) : Core file dataframe RETURNS Series \"\"\" assert(ez['GRSINCFNDRSNG'].dtype.type in [np.int64,",
"website. The full construction is: http://990s.foundationcenter.org/990_pdf_archive/<FIRST THREE DIGITS OF EIN>/<FULL EIN>/<FILENAME>.pdf for 990",
"method for calling all of the methods to calculate the columns for the",
"\"\"\" main = self.main ez = main.data_dict['EZ'] main.logger.info('Calculating new columns for EZ.') ez['TOTREV']",
"has a different calculation for EINs from the Full 990 and EINS from",
"entry['SOURCE'] == '16eofinextractez.dat' and entry['NAME'] == 'UNITED WAY OF THE COASTAL EMPIRE INC':",
"to build the URL to the PDF of the 990 filing on the",
"+ ez['DIREXP'] + ez['GOODS'] def ez_progrev(self, ez): \"\"\" Calculates the PROGREV column. ARGUMENTS",
"ez['EPOSTCARD'] = self.copc_epostcard(ez) ez['STYEAR'] = self.copc_styear(ez) ez['SOIYR'] = self.copc_soiyr(ez) ez['SUBCD'] = self.copc_subcd(ez) def",
"in a non-generalizable way, e.g. a mistyped EIN in the raw IRS data.",
": Core file dataframe RETURNS Series \"\"\" assert(ez['GRSINCFNDRSNG'].dtype.type in [np.int64, np.float64]) assert(ez['GRSINCGAMING'].dtype.type in",
"ez['FILENAME'] = self.ez_filename(ez) ez['EPOSTCARD'] = self.copc_epostcard(ez) ez['STYEAR'] = self.copc_styear(ez) ez['SOIYR'] = self.copc_soiyr(ez) ez['SUBCD']",
"a single EIN from a single year, in a non-generalizable way, e.g. a",
"non-generalizable way, e.g. a mistyped EIN in the raw IRS data. ARGUMENTS None",
"calculated from the expense and income subtotals. This is the only column like",
"taken from 990EZ part I, 9, while TOTREV is calculated from the expense",
"'rnd'] class ProcessEZ(ProcessCOPC): \"\"\" Creates columns found only in the EZ dataframe \"\"\"",
"column exactly. Redundant holdover from the old SQL process. ARGUMENTS ez (DataFrame) :",
"from the 990 EZ. ARGUMENTS ez (DataFrame) : Core file dataframe RETURNS Series",
"EIN from a single year, in a non-generalizable way, e.g. a mistyped EIN",
"= dups['TOTREV'].abs() + dups['ASS_EOY'].abs() + dups['EXPS'].abs() return dups, ['FISYR', 'val', 'STYEAR', 'rnd'] class",
"Core file dataframe RETURNS Series \"\"\" return ez['SALEOTHN'] def ez_filename(self, ez): \"\"\" Assembles",
"\"\"\" Returns the SALEOTHN column exactly. Redundant holdover from the old SQL process.",
"column like this, and usually any discrepencies between stated and calculated values are",
"ez (DataFrame) : Core file dataframe RETURNS Series \"\"\" return ez['EXPS'] + ez['NETINC']",
"http://990s.foundationcenter.org/990_pdf_archive/<FIRST THREE DIGITS OF EIN>/<FULL EIN>/<FILENAME>.pdf for 990 Full or EZ filings, or",
"of the 990 filing on the Foundation Center's website. The full construction is:"
] |
[
"ERROR_DIR_NOT_FOUND = 'errFolderNotFound' ERROR_FILE_NOT_FOUND = 'errFileNotFound' #'File not found.' ERROR_TRGDIR_NOT_FOUND = 'errTrgFolderNotFound' #'Target",
"class NamedError(Exception): \"\"\" Elfinder-specific exception. `msg` contains the error code `name` holds the",
"ERROR_UPLOAD_NO_FILES = 'errUploadNoFiles' #'No files found for upload.' ERROR_UPLOAD_TOTAL_SIZE = 'errUploadTotalSize' #'Data exceeds",
"#'File named \"$1\" already exists.' ERROR_INVALID_NAME = 'errInvName' #'Invalid file name.' ERROR_MKDIR =",
"transfer error.' ERROR_ACCESS_DENIED = 'errAccess' ERROR_NOT_REPLACE = 'errNotReplace' #Object \"$1\" already exists at",
"ERROR_ARC_SYMLINKS = 'errArcSymlinks' ERROR_ARC_MAXSIZE = 'errArcMaxSize' ERROR_RESIZE = 'errResize' ERROR_UNSUPPORT_TYPE = 'errUsupportType' ERROR_NOT_UTF8_CONTENT",
"= 'errCopyFrom' ERROR_COPY_TO = 'errCopyTo' ERROR_COPY_ITSELF = 'errCopyInItself' ERROR_REPLACE = 'errReplace' #'Unable to",
"'errArcType' ERROR_ARC_SYMLINKS = 'errArcSymlinks' ERROR_ARC_MAXSIZE = 'errArcMaxSize' ERROR_RESIZE = 'errResize' ERROR_UNSUPPORT_TYPE = 'errUsupportType'",
"PermissionDeniedError(Exception): def __init__(self): super(PermissionDeniedError, self).__init__(ElfinderErrorMessages.ERROR_PERM_DENIED) class NamedError(Exception): \"\"\" Elfinder-specific exception. `msg` contains the",
"ERROR_UPLOAD_FILE = 'errUploadFile' #'Unable to upload \"$1\".' ERROR_UPLOAD_NO_FILES = 'errUploadNoFiles' #'No files found",
"name super(NamedError, self).__init__(msg) class NotAnImageError(Exception): def __init__(self): super(NotAnImageError, self).__init__(_('This is not a valid",
"ERROR_PERM_DENIED = 'errPerm' ERROR_LOCKED = 'errLocked' #'\"$1\" is locked and can not be",
"super(NamedError, self).__init__(msg) class NotAnImageError(Exception): def __init__(self): super(NotAnImageError, self).__init__(_('This is not a valid image",
"and can not be replaced with object of another type. ERROR_SAVE = 'errSave'",
"be replaced with object of another type. ERROR_SAVE = 'errSave' ERROR_EXTRACT = 'errExtract'",
"__init__(self): super(DirNotFoundError, self).__init__(ElfinderErrorMessages.ERROR_DIR_NOT_FOUND) class PermissionDeniedError(Exception): def __init__(self): super(PermissionDeniedError, self).__init__(ElfinderErrorMessages.ERROR_PERM_DENIED) class NamedError(Exception): \"\"\" Elfinder-specific",
"'errInvName' #'Invalid file name.' ERROR_MKDIR = 'errMkdir' ERROR_MKFILE = 'errMkfile' ERROR_RENAME = 'errRename'",
"class DirNotFoundError(Exception): def __init__(self): super(DirNotFoundError, self).__init__(ElfinderErrorMessages.ERROR_DIR_NOT_FOUND) class PermissionDeniedError(Exception): def __init__(self): super(PermissionDeniedError, self).__init__(ElfinderErrorMessages.ERROR_PERM_DENIED) class",
"ERROR_UPLOAD_FILE_SIZE = 'errUploadFileSize' #'File exceeds maximum allowed size.' ERROR_UPLOAD_FILE_MIME = 'errUploadMime' #'File type",
"error.' ERROR_UPLOAD_FILE = 'errUploadFile' #'Unable to upload \"$1\".' ERROR_UPLOAD_NO_FILES = 'errUploadNoFiles' #'No files",
"self).__init__(_(\"Volume could not be found\")) class FileNotFoundError(Exception): def __init__(self): super(FileNotFoundError, self).__init__(ElfinderErrorMessages.ERROR_FILE_NOT_FOUND) class DirNotFoundError(Exception):",
"#'File not found.' ERROR_TRGDIR_NOT_FOUND = 'errTrgFolderNotFound' #'Target folder \"$1\" not found.' ERROR_NOT_DIR =",
"'errNetMount' ERROR_NETMOUNT_NO_DRIVER = 'errNetMountNoDriver' ERROR_NETMOUNT_FAILED = 'errNetMountFailed' class VolumeNotFoundError(Exception): def __init__(self): super(VolumeNotFoundError, self).__init__(_(\"Volume",
"self).__init__(ElfinderErrorMessages.ERROR_DIR_NOT_FOUND) class PermissionDeniedError(Exception): def __init__(self): super(PermissionDeniedError, self).__init__(ElfinderErrorMessages.ERROR_PERM_DENIED) class NamedError(Exception): \"\"\" Elfinder-specific exception. `msg`",
"not be found\")) class FileNotFoundError(Exception): def __init__(self): super(FileNotFoundError, self).__init__(ElfinderErrorMessages.ERROR_FILE_NOT_FOUND) class DirNotFoundError(Exception): def __init__(self):",
"is locked and can not be renamed, moved or removed.' ERROR_EXISTS = 'errExists'",
"'errCopyTo' ERROR_COPY_ITSELF = 'errCopyInItself' ERROR_REPLACE = 'errReplace' #'Unable to replace \"$1\".' ERROR_RM =",
"remove source file(s)' ERROR_UPLOAD = 'errUpload' #'Upload error.' ERROR_UPLOAD_FILE = 'errUploadFile' #'Unable to",
"ERROR_RENAME = 'errRename' ERROR_COPY = 'errCopy' ERROR_MOVE = 'errMove' ERROR_COPY_FROM = 'errCopyFrom' ERROR_COPY_TO",
"#'Unable to remove \"$1\".' ERROR_RM_SRC = 'errRmSrc' #'Unable remove source file(s)' ERROR_UPLOAD =",
"= 'errNoVolumes' ERROR_INV_PARAMS = 'errCmdParams' ERROR_OPEN = 'errOpen' ERROR_DIR_NOT_FOUND = 'errFolderNotFound' ERROR_FILE_NOT_FOUND =",
"= 'errUploadMime' #'File type not allowed.' ERROR_UPLOAD_TRANSFER = 'errUploadTransfer' #'\"$1\" transfer error.' ERROR_ACCESS_DENIED",
"ERROR_COPY_ITSELF = 'errCopyInItself' ERROR_REPLACE = 'errReplace' #'Unable to replace \"$1\".' ERROR_RM = 'errRm'",
"ERROR_RESIZE = 'errResize' ERROR_UNSUPPORT_TYPE = 'errUsupportType' ERROR_NOT_UTF8_CONTENT = 'errNotUTF8Content' ERROR_NETMOUNT = 'errNetMount' ERROR_NETMOUNT_NO_DRIVER",
"path for which operation failed \"\"\" def __init__(self, msg, name): self.name = name",
"be renamed, moved or removed.' ERROR_EXISTS = 'errExists' #'File named \"$1\" already exists.'",
"file name.' ERROR_MKDIR = 'errMkdir' ERROR_MKFILE = 'errMkfile' ERROR_RENAME = 'errRename' ERROR_COPY =",
"#'Data exceeds the maximum allowed size.' ERROR_UPLOAD_FILE_SIZE = 'errUploadFileSize' #'File exceeds maximum allowed",
"= 'errUsupportType' ERROR_NOT_UTF8_CONTENT = 'errNotUTF8Content' ERROR_NETMOUNT = 'errNetMount' ERROR_NETMOUNT_NO_DRIVER = 'errNetMountNoDriver' ERROR_NETMOUNT_FAILED =",
"super(FileNotFoundError, self).__init__(ElfinderErrorMessages.ERROR_FILE_NOT_FOUND) class DirNotFoundError(Exception): def __init__(self): super(DirNotFoundError, self).__init__(ElfinderErrorMessages.ERROR_DIR_NOT_FOUND) class PermissionDeniedError(Exception): def __init__(self): super(PermissionDeniedError,",
"self).__init__(msg) class NotAnImageError(Exception): def __init__(self): super(NotAnImageError, self).__init__(_('This is not a valid image file'))",
"= 'errTrgFolderNotFound' #'Target folder \"$1\" not found.' ERROR_NOT_DIR = 'errNotFolder' ERROR_NOT_FILE = 'errNotFile'",
"size.' ERROR_UPLOAD_FILE_SIZE = 'errUploadFileSize' #'File exceeds maximum allowed size.' ERROR_UPLOAD_FILE_MIME = 'errUploadMime' #'File",
"at this location and can not be replaced with object of another type.",
"def __init__(self): super(PermissionDeniedError, self).__init__(ElfinderErrorMessages.ERROR_PERM_DENIED) class NamedError(Exception): \"\"\" Elfinder-specific exception. `msg` contains the error",
"= 'errUploadTransfer' #'\"$1\" transfer error.' ERROR_ACCESS_DENIED = 'errAccess' ERROR_NOT_REPLACE = 'errNotReplace' #Object \"$1\"",
"\"$1\" already exists.' ERROR_INVALID_NAME = 'errInvName' #'Invalid file name.' ERROR_MKDIR = 'errMkdir' ERROR_MKFILE",
"Standard error message codes, the text message of which is handled by the",
"the elFinder client \"\"\" ERROR_UNKNOWN = 'errUnknown' ERROR_UNKNOWN_CMD = 'errUnknownCmd' ERROR_CONF = 'errConf'",
"\"\"\" def __init__(self, msg, name): self.name = name super(NamedError, self).__init__(msg) class NotAnImageError(Exception): def",
"ERROR_ARCHIVE = 'errArchive' ERROR_NOT_ARCHIVE = 'errNoArchive' ERROR_ARCHIVE_TYPE = 'errArcType' ERROR_ARC_SYMLINKS = 'errArcSymlinks' ERROR_ARC_MAXSIZE",
"'errNotFile' ERROR_PERM_DENIED = 'errPerm' ERROR_LOCKED = 'errLocked' #'\"$1\" is locked and can not",
"FileNotFoundError(Exception): def __init__(self): super(FileNotFoundError, self).__init__(ElfinderErrorMessages.ERROR_FILE_NOT_FOUND) class DirNotFoundError(Exception): def __init__(self): super(DirNotFoundError, self).__init__(ElfinderErrorMessages.ERROR_DIR_NOT_FOUND) class PermissionDeniedError(Exception):",
"__init__(self): super(PermissionDeniedError, self).__init__(ElfinderErrorMessages.ERROR_PERM_DENIED) class NamedError(Exception): \"\"\" Elfinder-specific exception. `msg` contains the error code",
"'errNotUTF8Content' ERROR_NETMOUNT = 'errNetMount' ERROR_NETMOUNT_NO_DRIVER = 'errNetMountNoDriver' ERROR_NETMOUNT_FAILED = 'errNetMountFailed' class VolumeNotFoundError(Exception): def",
"= 'errRmSrc' #'Unable remove source file(s)' ERROR_UPLOAD = 'errUpload' #'Upload error.' ERROR_UPLOAD_FILE =",
"\"$1\".' ERROR_RM_SRC = 'errRmSrc' #'Unable remove source file(s)' ERROR_UPLOAD = 'errUpload' #'Upload error.'",
"name): self.name = name super(NamedError, self).__init__(msg) class NotAnImageError(Exception): def __init__(self): super(NotAnImageError, self).__init__(_('This is",
"#'Unable remove source file(s)' ERROR_UPLOAD = 'errUpload' #'Upload error.' ERROR_UPLOAD_FILE = 'errUploadFile' #'Unable",
"= 'errMove' ERROR_COPY_FROM = 'errCopyFrom' ERROR_COPY_TO = 'errCopyTo' ERROR_COPY_ITSELF = 'errCopyInItself' ERROR_REPLACE =",
"ERROR_CONF_NO_VOL = 'errNoVolumes' ERROR_INV_PARAMS = 'errCmdParams' ERROR_OPEN = 'errOpen' ERROR_DIR_NOT_FOUND = 'errFolderNotFound' ERROR_FILE_NOT_FOUND",
"= 'errResize' ERROR_UNSUPPORT_TYPE = 'errUsupportType' ERROR_NOT_UTF8_CONTENT = 'errNotUTF8Content' ERROR_NETMOUNT = 'errNetMount' ERROR_NETMOUNT_NO_DRIVER =",
"removed.' ERROR_EXISTS = 'errExists' #'File named \"$1\" already exists.' ERROR_INVALID_NAME = 'errInvName' #'Invalid",
"= 'errUnknown' ERROR_UNKNOWN_CMD = 'errUnknownCmd' ERROR_CONF = 'errConf' ERROR_CONF_NO_JSON = 'errJSON' ERROR_CONF_NO_VOL =",
"from django.utils.translation import ugettext as _ class ElfinderErrorMessages: \"\"\" Standard error message codes,",
"ERROR_ACCESS_DENIED = 'errAccess' ERROR_NOT_REPLACE = 'errNotReplace' #Object \"$1\" already exists at this location",
"ERROR_UPLOAD_FILE_MIME = 'errUploadMime' #'File type not allowed.' ERROR_UPLOAD_TRANSFER = 'errUploadTransfer' #'\"$1\" transfer error.'",
"not found.' ERROR_NOT_DIR = 'errNotFolder' ERROR_NOT_FILE = 'errNotFile' ERROR_PERM_DENIED = 'errPerm' ERROR_LOCKED =",
"#Object \"$1\" already exists at this location and can not be replaced with",
"ERROR_FILE_NOT_FOUND = 'errFileNotFound' #'File not found.' ERROR_TRGDIR_NOT_FOUND = 'errTrgFolderNotFound' #'Target folder \"$1\" not",
"ERROR_NETMOUNT = 'errNetMount' ERROR_NETMOUNT_NO_DRIVER = 'errNetMountNoDriver' ERROR_NETMOUNT_FAILED = 'errNetMountFailed' class VolumeNotFoundError(Exception): def __init__(self):",
"\"$1\" not found.' ERROR_NOT_DIR = 'errNotFolder' ERROR_NOT_FILE = 'errNotFile' ERROR_PERM_DENIED = 'errPerm' ERROR_LOCKED",
"class FileNotFoundError(Exception): def __init__(self): super(FileNotFoundError, self).__init__(ElfinderErrorMessages.ERROR_FILE_NOT_FOUND) class DirNotFoundError(Exception): def __init__(self): super(DirNotFoundError, self).__init__(ElfinderErrorMessages.ERROR_DIR_NOT_FOUND) class",
"'errFileNotFound' #'File not found.' ERROR_TRGDIR_NOT_FOUND = 'errTrgFolderNotFound' #'Target folder \"$1\" not found.' ERROR_NOT_DIR",
"ERROR_NOT_FILE = 'errNotFile' ERROR_PERM_DENIED = 'errPerm' ERROR_LOCKED = 'errLocked' #'\"$1\" is locked and",
"#'Target folder \"$1\" not found.' ERROR_NOT_DIR = 'errNotFolder' ERROR_NOT_FILE = 'errNotFile' ERROR_PERM_DENIED =",
"exceeds the maximum allowed size.' ERROR_UPLOAD_FILE_SIZE = 'errUploadFileSize' #'File exceeds maximum allowed size.'",
"= 'errUpload' #'Upload error.' ERROR_UPLOAD_FILE = 'errUploadFile' #'Unable to upload \"$1\".' ERROR_UPLOAD_NO_FILES =",
"'errCmdParams' ERROR_OPEN = 'errOpen' ERROR_DIR_NOT_FOUND = 'errFolderNotFound' ERROR_FILE_NOT_FOUND = 'errFileNotFound' #'File not found.'",
"'errRm' #'Unable to remove \"$1\".' ERROR_RM_SRC = 'errRmSrc' #'Unable remove source file(s)' ERROR_UPLOAD",
"= 'errUnknownCmd' ERROR_CONF = 'errConf' ERROR_CONF_NO_JSON = 'errJSON' ERROR_CONF_NO_VOL = 'errNoVolumes' ERROR_INV_PARAMS =",
"to remove \"$1\".' ERROR_RM_SRC = 'errRmSrc' #'Unable remove source file(s)' ERROR_UPLOAD = 'errUpload'",
"'errRmSrc' #'Unable remove source file(s)' ERROR_UPLOAD = 'errUpload' #'Upload error.' ERROR_UPLOAD_FILE = 'errUploadFile'",
"ERROR_UNKNOWN_CMD = 'errUnknownCmd' ERROR_CONF = 'errConf' ERROR_CONF_NO_JSON = 'errJSON' ERROR_CONF_NO_VOL = 'errNoVolumes' ERROR_INV_PARAMS",
"ERROR_REPLACE = 'errReplace' #'Unable to replace \"$1\".' ERROR_RM = 'errRm' #'Unable to remove",
"Elfinder-specific exception. `msg` contains the error code `name` holds the path for which",
"can not be replaced with object of another type. ERROR_SAVE = 'errSave' ERROR_EXTRACT",
"= 'errConf' ERROR_CONF_NO_JSON = 'errJSON' ERROR_CONF_NO_VOL = 'errNoVolumes' ERROR_INV_PARAMS = 'errCmdParams' ERROR_OPEN =",
"import ugettext as _ class ElfinderErrorMessages: \"\"\" Standard error message codes, the text",
"__init__(self, msg, name): self.name = name super(NamedError, self).__init__(msg) class NotAnImageError(Exception): def __init__(self): super(NotAnImageError,",
"self).__init__(ElfinderErrorMessages.ERROR_FILE_NOT_FOUND) class DirNotFoundError(Exception): def __init__(self): super(DirNotFoundError, self).__init__(ElfinderErrorMessages.ERROR_DIR_NOT_FOUND) class PermissionDeniedError(Exception): def __init__(self): super(PermissionDeniedError, self).__init__(ElfinderErrorMessages.ERROR_PERM_DENIED)",
"ERROR_SAVE = 'errSave' ERROR_EXTRACT = 'errExtract' ERROR_ARCHIVE = 'errArchive' ERROR_NOT_ARCHIVE = 'errNoArchive' ERROR_ARCHIVE_TYPE",
"not be renamed, moved or removed.' ERROR_EXISTS = 'errExists' #'File named \"$1\" already",
"with object of another type. ERROR_SAVE = 'errSave' ERROR_EXTRACT = 'errExtract' ERROR_ARCHIVE =",
"'errLocked' #'\"$1\" is locked and can not be renamed, moved or removed.' ERROR_EXISTS",
"locked and can not be renamed, moved or removed.' ERROR_EXISTS = 'errExists' #'File",
"'errAccess' ERROR_NOT_REPLACE = 'errNotReplace' #Object \"$1\" already exists at this location and can",
"'errUploadMime' #'File type not allowed.' ERROR_UPLOAD_TRANSFER = 'errUploadTransfer' #'\"$1\" transfer error.' ERROR_ACCESS_DENIED =",
"= 'errExists' #'File named \"$1\" already exists.' ERROR_INVALID_NAME = 'errInvName' #'Invalid file name.'",
"= 'errUploadFileSize' #'File exceeds maximum allowed size.' ERROR_UPLOAD_FILE_MIME = 'errUploadMime' #'File type not",
"ERROR_NOT_REPLACE = 'errNotReplace' #Object \"$1\" already exists at this location and can not",
"class VolumeNotFoundError(Exception): def __init__(self): super(VolumeNotFoundError, self).__init__(_(\"Volume could not be found\")) class FileNotFoundError(Exception): def",
"ERROR_LOCKED = 'errLocked' #'\"$1\" is locked and can not be renamed, moved or",
"allowed.' ERROR_UPLOAD_TRANSFER = 'errUploadTransfer' #'\"$1\" transfer error.' ERROR_ACCESS_DENIED = 'errAccess' ERROR_NOT_REPLACE = 'errNotReplace'",
"renamed, moved or removed.' ERROR_EXISTS = 'errExists' #'File named \"$1\" already exists.' ERROR_INVALID_NAME",
"#'Invalid file name.' ERROR_MKDIR = 'errMkdir' ERROR_MKFILE = 'errMkfile' ERROR_RENAME = 'errRename' ERROR_COPY",
"exceeds maximum allowed size.' ERROR_UPLOAD_FILE_MIME = 'errUploadMime' #'File type not allowed.' ERROR_UPLOAD_TRANSFER =",
"upload.' ERROR_UPLOAD_TOTAL_SIZE = 'errUploadTotalSize' #'Data exceeds the maximum allowed size.' ERROR_UPLOAD_FILE_SIZE = 'errUploadFileSize'",
"'errSave' ERROR_EXTRACT = 'errExtract' ERROR_ARCHIVE = 'errArchive' ERROR_NOT_ARCHIVE = 'errNoArchive' ERROR_ARCHIVE_TYPE = 'errArcType'",
"'errArcMaxSize' ERROR_RESIZE = 'errResize' ERROR_UNSUPPORT_TYPE = 'errUsupportType' ERROR_NOT_UTF8_CONTENT = 'errNotUTF8Content' ERROR_NETMOUNT = 'errNetMount'",
"\"\"\" Standard error message codes, the text message of which is handled by",
"the maximum allowed size.' ERROR_UPLOAD_FILE_SIZE = 'errUploadFileSize' #'File exceeds maximum allowed size.' ERROR_UPLOAD_FILE_MIME",
"replaced with object of another type. ERROR_SAVE = 'errSave' ERROR_EXTRACT = 'errExtract' ERROR_ARCHIVE",
"#'Upload error.' ERROR_UPLOAD_FILE = 'errUploadFile' #'Unable to upload \"$1\".' ERROR_UPLOAD_NO_FILES = 'errUploadNoFiles' #'No",
"ERROR_NOT_ARCHIVE = 'errNoArchive' ERROR_ARCHIVE_TYPE = 'errArcType' ERROR_ARC_SYMLINKS = 'errArcSymlinks' ERROR_ARC_MAXSIZE = 'errArcMaxSize' ERROR_RESIZE",
"= 'errArcSymlinks' ERROR_ARC_MAXSIZE = 'errArcMaxSize' ERROR_RESIZE = 'errResize' ERROR_UNSUPPORT_TYPE = 'errUsupportType' ERROR_NOT_UTF8_CONTENT =",
"= 'errNetMountNoDriver' ERROR_NETMOUNT_FAILED = 'errNetMountFailed' class VolumeNotFoundError(Exception): def __init__(self): super(VolumeNotFoundError, self).__init__(_(\"Volume could not",
"= 'errNoArchive' ERROR_ARCHIVE_TYPE = 'errArcType' ERROR_ARC_SYMLINKS = 'errArcSymlinks' ERROR_ARC_MAXSIZE = 'errArcMaxSize' ERROR_RESIZE =",
"holds the path for which operation failed \"\"\" def __init__(self, msg, name): self.name",
"#'\"$1\" is locked and can not be renamed, moved or removed.' ERROR_EXISTS =",
"= 'errCmdParams' ERROR_OPEN = 'errOpen' ERROR_DIR_NOT_FOUND = 'errFolderNotFound' ERROR_FILE_NOT_FOUND = 'errFileNotFound' #'File not",
"'errRename' ERROR_COPY = 'errCopy' ERROR_MOVE = 'errMove' ERROR_COPY_FROM = 'errCopyFrom' ERROR_COPY_TO = 'errCopyTo'",
"code `name` holds the path for which operation failed \"\"\" def __init__(self, msg,",
"ERROR_NETMOUNT_FAILED = 'errNetMountFailed' class VolumeNotFoundError(Exception): def __init__(self): super(VolumeNotFoundError, self).__init__(_(\"Volume could not be found\"))",
"= 'errPerm' ERROR_LOCKED = 'errLocked' #'\"$1\" is locked and can not be renamed,",
"file(s)' ERROR_UPLOAD = 'errUpload' #'Upload error.' ERROR_UPLOAD_FILE = 'errUploadFile' #'Unable to upload \"$1\".'",
"'errUnknownCmd' ERROR_CONF = 'errConf' ERROR_CONF_NO_JSON = 'errJSON' ERROR_CONF_NO_VOL = 'errNoVolumes' ERROR_INV_PARAMS = 'errCmdParams'",
"for which operation failed \"\"\" def __init__(self, msg, name): self.name = name super(NamedError,",
"files found for upload.' ERROR_UPLOAD_TOTAL_SIZE = 'errUploadTotalSize' #'Data exceeds the maximum allowed size.'",
"found for upload.' ERROR_UPLOAD_TOTAL_SIZE = 'errUploadTotalSize' #'Data exceeds the maximum allowed size.' ERROR_UPLOAD_FILE_SIZE",
"name.' ERROR_MKDIR = 'errMkdir' ERROR_MKFILE = 'errMkfile' ERROR_RENAME = 'errRename' ERROR_COPY = 'errCopy'",
"'errNotReplace' #Object \"$1\" already exists at this location and can not be replaced",
"'errNetMountFailed' class VolumeNotFoundError(Exception): def __init__(self): super(VolumeNotFoundError, self).__init__(_(\"Volume could not be found\")) class FileNotFoundError(Exception):",
"found\")) class FileNotFoundError(Exception): def __init__(self): super(FileNotFoundError, self).__init__(ElfinderErrorMessages.ERROR_FILE_NOT_FOUND) class DirNotFoundError(Exception): def __init__(self): super(DirNotFoundError, self).__init__(ElfinderErrorMessages.ERROR_DIR_NOT_FOUND)",
"found.' ERROR_NOT_DIR = 'errNotFolder' ERROR_NOT_FILE = 'errNotFile' ERROR_PERM_DENIED = 'errPerm' ERROR_LOCKED = 'errLocked'",
"= 'errArchive' ERROR_NOT_ARCHIVE = 'errNoArchive' ERROR_ARCHIVE_TYPE = 'errArcType' ERROR_ARC_SYMLINKS = 'errArcSymlinks' ERROR_ARC_MAXSIZE =",
"= 'errNotFolder' ERROR_NOT_FILE = 'errNotFile' ERROR_PERM_DENIED = 'errPerm' ERROR_LOCKED = 'errLocked' #'\"$1\" is",
"ERROR_INV_PARAMS = 'errCmdParams' ERROR_OPEN = 'errOpen' ERROR_DIR_NOT_FOUND = 'errFolderNotFound' ERROR_FILE_NOT_FOUND = 'errFileNotFound' #'File",
"source file(s)' ERROR_UPLOAD = 'errUpload' #'Upload error.' ERROR_UPLOAD_FILE = 'errUploadFile' #'Unable to upload",
"__init__(self): super(FileNotFoundError, self).__init__(ElfinderErrorMessages.ERROR_FILE_NOT_FOUND) class DirNotFoundError(Exception): def __init__(self): super(DirNotFoundError, self).__init__(ElfinderErrorMessages.ERROR_DIR_NOT_FOUND) class PermissionDeniedError(Exception): def __init__(self):",
"'errArcSymlinks' ERROR_ARC_MAXSIZE = 'errArcMaxSize' ERROR_RESIZE = 'errResize' ERROR_UNSUPPORT_TYPE = 'errUsupportType' ERROR_NOT_UTF8_CONTENT = 'errNotUTF8Content'",
"'errCopyInItself' ERROR_REPLACE = 'errReplace' #'Unable to replace \"$1\".' ERROR_RM = 'errRm' #'Unable to",
"allowed size.' ERROR_UPLOAD_FILE_MIME = 'errUploadMime' #'File type not allowed.' ERROR_UPLOAD_TRANSFER = 'errUploadTransfer' #'\"$1\"",
"\"\"\" Elfinder-specific exception. `msg` contains the error code `name` holds the path for",
"contains the error code `name` holds the path for which operation failed \"\"\"",
"'errUploadTransfer' #'\"$1\" transfer error.' ERROR_ACCESS_DENIED = 'errAccess' ERROR_NOT_REPLACE = 'errNotReplace' #Object \"$1\" already",
"ERROR_COPY_FROM = 'errCopyFrom' ERROR_COPY_TO = 'errCopyTo' ERROR_COPY_ITSELF = 'errCopyInItself' ERROR_REPLACE = 'errReplace' #'Unable",
"django.utils.translation import ugettext as _ class ElfinderErrorMessages: \"\"\" Standard error message codes, the",
"super(VolumeNotFoundError, self).__init__(_(\"Volume could not be found\")) class FileNotFoundError(Exception): def __init__(self): super(FileNotFoundError, self).__init__(ElfinderErrorMessages.ERROR_FILE_NOT_FOUND) class",
"ERROR_CONF_NO_JSON = 'errJSON' ERROR_CONF_NO_VOL = 'errNoVolumes' ERROR_INV_PARAMS = 'errCmdParams' ERROR_OPEN = 'errOpen' ERROR_DIR_NOT_FOUND",
"= 'errMkfile' ERROR_RENAME = 'errRename' ERROR_COPY = 'errCopy' ERROR_MOVE = 'errMove' ERROR_COPY_FROM =",
"#'File exceeds maximum allowed size.' ERROR_UPLOAD_FILE_MIME = 'errUploadMime' #'File type not allowed.' ERROR_UPLOAD_TRANSFER",
"<gh_stars>10-100 from django.utils.translation import ugettext as _ class ElfinderErrorMessages: \"\"\" Standard error message",
"= 'errNotUTF8Content' ERROR_NETMOUNT = 'errNetMount' ERROR_NETMOUNT_NO_DRIVER = 'errNetMountNoDriver' ERROR_NETMOUNT_FAILED = 'errNetMountFailed' class VolumeNotFoundError(Exception):",
"#'Unable to upload \"$1\".' ERROR_UPLOAD_NO_FILES = 'errUploadNoFiles' #'No files found for upload.' ERROR_UPLOAD_TOTAL_SIZE",
"could not be found\")) class FileNotFoundError(Exception): def __init__(self): super(FileNotFoundError, self).__init__(ElfinderErrorMessages.ERROR_FILE_NOT_FOUND) class DirNotFoundError(Exception): def",
"found.' ERROR_TRGDIR_NOT_FOUND = 'errTrgFolderNotFound' #'Target folder \"$1\" not found.' ERROR_NOT_DIR = 'errNotFolder' ERROR_NOT_FILE",
"super(PermissionDeniedError, self).__init__(ElfinderErrorMessages.ERROR_PERM_DENIED) class NamedError(Exception): \"\"\" Elfinder-specific exception. `msg` contains the error code `name`",
"'errUploadFileSize' #'File exceeds maximum allowed size.' ERROR_UPLOAD_FILE_MIME = 'errUploadMime' #'File type not allowed.'",
"= 'errInvName' #'Invalid file name.' ERROR_MKDIR = 'errMkdir' ERROR_MKFILE = 'errMkfile' ERROR_RENAME =",
"codes, the text message of which is handled by the elFinder client \"\"\"",
"ERROR_UNSUPPORT_TYPE = 'errUsupportType' ERROR_NOT_UTF8_CONTENT = 'errNotUTF8Content' ERROR_NETMOUNT = 'errNetMount' ERROR_NETMOUNT_NO_DRIVER = 'errNetMountNoDriver' ERROR_NETMOUNT_FAILED",
"\"\"\" ERROR_UNKNOWN = 'errUnknown' ERROR_UNKNOWN_CMD = 'errUnknownCmd' ERROR_CONF = 'errConf' ERROR_CONF_NO_JSON = 'errJSON'",
"replace \"$1\".' ERROR_RM = 'errRm' #'Unable to remove \"$1\".' ERROR_RM_SRC = 'errRmSrc' #'Unable",
"self).__init__(ElfinderErrorMessages.ERROR_PERM_DENIED) class NamedError(Exception): \"\"\" Elfinder-specific exception. `msg` contains the error code `name` holds",
"'errMkfile' ERROR_RENAME = 'errRename' ERROR_COPY = 'errCopy' ERROR_MOVE = 'errMove' ERROR_COPY_FROM = 'errCopyFrom'",
"= 'errFolderNotFound' ERROR_FILE_NOT_FOUND = 'errFileNotFound' #'File not found.' ERROR_TRGDIR_NOT_FOUND = 'errTrgFolderNotFound' #'Target folder",
"ERROR_INVALID_NAME = 'errInvName' #'Invalid file name.' ERROR_MKDIR = 'errMkdir' ERROR_MKFILE = 'errMkfile' ERROR_RENAME",
"maximum allowed size.' ERROR_UPLOAD_FILE_MIME = 'errUploadMime' #'File type not allowed.' ERROR_UPLOAD_TRANSFER = 'errUploadTransfer'",
"or removed.' ERROR_EXISTS = 'errExists' #'File named \"$1\" already exists.' ERROR_INVALID_NAME = 'errInvName'",
"= 'errNotReplace' #Object \"$1\" already exists at this location and can not be",
"as _ class ElfinderErrorMessages: \"\"\" Standard error message codes, the text message of",
"NamedError(Exception): \"\"\" Elfinder-specific exception. `msg` contains the error code `name` holds the path",
"which is handled by the elFinder client \"\"\" ERROR_UNKNOWN = 'errUnknown' ERROR_UNKNOWN_CMD =",
"'errReplace' #'Unable to replace \"$1\".' ERROR_RM = 'errRm' #'Unable to remove \"$1\".' ERROR_RM_SRC",
"ERROR_NOT_UTF8_CONTENT = 'errNotUTF8Content' ERROR_NETMOUNT = 'errNetMount' ERROR_NETMOUNT_NO_DRIVER = 'errNetMountNoDriver' ERROR_NETMOUNT_FAILED = 'errNetMountFailed' class",
"__init__(self): super(VolumeNotFoundError, self).__init__(_(\"Volume could not be found\")) class FileNotFoundError(Exception): def __init__(self): super(FileNotFoundError, self).__init__(ElfinderErrorMessages.ERROR_FILE_NOT_FOUND)",
"= name super(NamedError, self).__init__(msg) class NotAnImageError(Exception): def __init__(self): super(NotAnImageError, self).__init__(_('This is not a",
"_ class ElfinderErrorMessages: \"\"\" Standard error message codes, the text message of which",
"not allowed.' ERROR_UPLOAD_TRANSFER = 'errUploadTransfer' #'\"$1\" transfer error.' ERROR_ACCESS_DENIED = 'errAccess' ERROR_NOT_REPLACE =",
"ERROR_MKFILE = 'errMkfile' ERROR_RENAME = 'errRename' ERROR_COPY = 'errCopy' ERROR_MOVE = 'errMove' ERROR_COPY_FROM",
"ERROR_MOVE = 'errMove' ERROR_COPY_FROM = 'errCopyFrom' ERROR_COPY_TO = 'errCopyTo' ERROR_COPY_ITSELF = 'errCopyInItself' ERROR_REPLACE",
"exists at this location and can not be replaced with object of another",
"'errConf' ERROR_CONF_NO_JSON = 'errJSON' ERROR_CONF_NO_VOL = 'errNoVolumes' ERROR_INV_PARAMS = 'errCmdParams' ERROR_OPEN = 'errOpen'",
"self.name = name super(NamedError, self).__init__(msg) class NotAnImageError(Exception): def __init__(self): super(NotAnImageError, self).__init__(_('This is not",
"'errMkdir' ERROR_MKFILE = 'errMkfile' ERROR_RENAME = 'errRename' ERROR_COPY = 'errCopy' ERROR_MOVE = 'errMove'",
"exception. `msg` contains the error code `name` holds the path for which operation",
"= 'errUploadNoFiles' #'No files found for upload.' ERROR_UPLOAD_TOTAL_SIZE = 'errUploadTotalSize' #'Data exceeds the",
"ERROR_CONF = 'errConf' ERROR_CONF_NO_JSON = 'errJSON' ERROR_CONF_NO_VOL = 'errNoVolumes' ERROR_INV_PARAMS = 'errCmdParams' ERROR_OPEN",
"= 'errNetMountFailed' class VolumeNotFoundError(Exception): def __init__(self): super(VolumeNotFoundError, self).__init__(_(\"Volume could not be found\")) class",
"'errNotFolder' ERROR_NOT_FILE = 'errNotFile' ERROR_PERM_DENIED = 'errPerm' ERROR_LOCKED = 'errLocked' #'\"$1\" is locked",
"'errNoVolumes' ERROR_INV_PARAMS = 'errCmdParams' ERROR_OPEN = 'errOpen' ERROR_DIR_NOT_FOUND = 'errFolderNotFound' ERROR_FILE_NOT_FOUND = 'errFileNotFound'",
"error message codes, the text message of which is handled by the elFinder",
"type not allowed.' ERROR_UPLOAD_TRANSFER = 'errUploadTransfer' #'\"$1\" transfer error.' ERROR_ACCESS_DENIED = 'errAccess' ERROR_NOT_REPLACE",
"by the elFinder client \"\"\" ERROR_UNKNOWN = 'errUnknown' ERROR_UNKNOWN_CMD = 'errUnknownCmd' ERROR_CONF =",
"already exists.' ERROR_INVALID_NAME = 'errInvName' #'Invalid file name.' ERROR_MKDIR = 'errMkdir' ERROR_MKFILE =",
"ERROR_OPEN = 'errOpen' ERROR_DIR_NOT_FOUND = 'errFolderNotFound' ERROR_FILE_NOT_FOUND = 'errFileNotFound' #'File not found.' ERROR_TRGDIR_NOT_FOUND",
"def __init__(self): super(VolumeNotFoundError, self).__init__(_(\"Volume could not be found\")) class FileNotFoundError(Exception): def __init__(self): super(FileNotFoundError,",
"= 'errNotFile' ERROR_PERM_DENIED = 'errPerm' ERROR_LOCKED = 'errLocked' #'\"$1\" is locked and can",
"the path for which operation failed \"\"\" def __init__(self, msg, name): self.name =",
"'errUploadFile' #'Unable to upload \"$1\".' ERROR_UPLOAD_NO_FILES = 'errUploadNoFiles' #'No files found for upload.'",
"message of which is handled by the elFinder client \"\"\" ERROR_UNKNOWN = 'errUnknown'",
"class ElfinderErrorMessages: \"\"\" Standard error message codes, the text message of which is",
"another type. ERROR_SAVE = 'errSave' ERROR_EXTRACT = 'errExtract' ERROR_ARCHIVE = 'errArchive' ERROR_NOT_ARCHIVE =",
"= 'errAccess' ERROR_NOT_REPLACE = 'errNotReplace' #Object \"$1\" already exists at this location and",
"\"$1\".' ERROR_RM = 'errRm' #'Unable to remove \"$1\".' ERROR_RM_SRC = 'errRmSrc' #'Unable remove",
"remove \"$1\".' ERROR_RM_SRC = 'errRmSrc' #'Unable remove source file(s)' ERROR_UPLOAD = 'errUpload' #'Upload",
"= 'errSave' ERROR_EXTRACT = 'errExtract' ERROR_ARCHIVE = 'errArchive' ERROR_NOT_ARCHIVE = 'errNoArchive' ERROR_ARCHIVE_TYPE =",
"ERROR_MKDIR = 'errMkdir' ERROR_MKFILE = 'errMkfile' ERROR_RENAME = 'errRename' ERROR_COPY = 'errCopy' ERROR_MOVE",
"ERROR_COPY_TO = 'errCopyTo' ERROR_COPY_ITSELF = 'errCopyInItself' ERROR_REPLACE = 'errReplace' #'Unable to replace \"$1\".'",
"msg, name): self.name = name super(NamedError, self).__init__(msg) class NotAnImageError(Exception): def __init__(self): super(NotAnImageError, self).__init__(_('This",
"= 'errArcMaxSize' ERROR_RESIZE = 'errResize' ERROR_UNSUPPORT_TYPE = 'errUsupportType' ERROR_NOT_UTF8_CONTENT = 'errNotUTF8Content' ERROR_NETMOUNT =",
"the error code `name` holds the path for which operation failed \"\"\" def",
"= 'errFileNotFound' #'File not found.' ERROR_TRGDIR_NOT_FOUND = 'errTrgFolderNotFound' #'Target folder \"$1\" not found.'",
"be found\")) class FileNotFoundError(Exception): def __init__(self): super(FileNotFoundError, self).__init__(ElfinderErrorMessages.ERROR_FILE_NOT_FOUND) class DirNotFoundError(Exception): def __init__(self): super(DirNotFoundError,",
"#'No files found for upload.' ERROR_UPLOAD_TOTAL_SIZE = 'errUploadTotalSize' #'Data exceeds the maximum allowed",
"ERROR_UNKNOWN = 'errUnknown' ERROR_UNKNOWN_CMD = 'errUnknownCmd' ERROR_CONF = 'errConf' ERROR_CONF_NO_JSON = 'errJSON' ERROR_CONF_NO_VOL",
"= 'errArcType' ERROR_ARC_SYMLINKS = 'errArcSymlinks' ERROR_ARC_MAXSIZE = 'errArcMaxSize' ERROR_RESIZE = 'errResize' ERROR_UNSUPPORT_TYPE =",
"for upload.' ERROR_UPLOAD_TOTAL_SIZE = 'errUploadTotalSize' #'Data exceeds the maximum allowed size.' ERROR_UPLOAD_FILE_SIZE =",
"ERROR_UPLOAD = 'errUpload' #'Upload error.' ERROR_UPLOAD_FILE = 'errUploadFile' #'Unable to upload \"$1\".' ERROR_UPLOAD_NO_FILES",
"'errArchive' ERROR_NOT_ARCHIVE = 'errNoArchive' ERROR_ARCHIVE_TYPE = 'errArcType' ERROR_ARC_SYMLINKS = 'errArcSymlinks' ERROR_ARC_MAXSIZE = 'errArcMaxSize'",
"= 'errRename' ERROR_COPY = 'errCopy' ERROR_MOVE = 'errMove' ERROR_COPY_FROM = 'errCopyFrom' ERROR_COPY_TO =",
"error.' ERROR_ACCESS_DENIED = 'errAccess' ERROR_NOT_REPLACE = 'errNotReplace' #Object \"$1\" already exists at this",
"`name` holds the path for which operation failed \"\"\" def __init__(self, msg, name):",
"operation failed \"\"\" def __init__(self, msg, name): self.name = name super(NamedError, self).__init__(msg) class",
"this location and can not be replaced with object of another type. ERROR_SAVE",
"maximum allowed size.' ERROR_UPLOAD_FILE_SIZE = 'errUploadFileSize' #'File exceeds maximum allowed size.' ERROR_UPLOAD_FILE_MIME =",
"def __init__(self): super(FileNotFoundError, self).__init__(ElfinderErrorMessages.ERROR_FILE_NOT_FOUND) class DirNotFoundError(Exception): def __init__(self): super(DirNotFoundError, self).__init__(ElfinderErrorMessages.ERROR_DIR_NOT_FOUND) class PermissionDeniedError(Exception): def",
"`msg` contains the error code `name` holds the path for which operation failed",
"'errCopyFrom' ERROR_COPY_TO = 'errCopyTo' ERROR_COPY_ITSELF = 'errCopyInItself' ERROR_REPLACE = 'errReplace' #'Unable to replace",
"def __init__(self): super(DirNotFoundError, self).__init__(ElfinderErrorMessages.ERROR_DIR_NOT_FOUND) class PermissionDeniedError(Exception): def __init__(self): super(PermissionDeniedError, self).__init__(ElfinderErrorMessages.ERROR_PERM_DENIED) class NamedError(Exception): \"\"\"",
"ERROR_UPLOAD_TRANSFER = 'errUploadTransfer' #'\"$1\" transfer error.' ERROR_ACCESS_DENIED = 'errAccess' ERROR_NOT_REPLACE = 'errNotReplace' #Object",
"ERROR_ARC_MAXSIZE = 'errArcMaxSize' ERROR_RESIZE = 'errResize' ERROR_UNSUPPORT_TYPE = 'errUsupportType' ERROR_NOT_UTF8_CONTENT = 'errNotUTF8Content' ERROR_NETMOUNT",
"super(DirNotFoundError, self).__init__(ElfinderErrorMessages.ERROR_DIR_NOT_FOUND) class PermissionDeniedError(Exception): def __init__(self): super(PermissionDeniedError, self).__init__(ElfinderErrorMessages.ERROR_PERM_DENIED) class NamedError(Exception): \"\"\" Elfinder-specific exception.",
"ERROR_ARCHIVE_TYPE = 'errArcType' ERROR_ARC_SYMLINKS = 'errArcSymlinks' ERROR_ARC_MAXSIZE = 'errArcMaxSize' ERROR_RESIZE = 'errResize' ERROR_UNSUPPORT_TYPE",
"and can not be renamed, moved or removed.' ERROR_EXISTS = 'errExists' #'File named",
"can not be renamed, moved or removed.' ERROR_EXISTS = 'errExists' #'File named \"$1\"",
"'errUploadTotalSize' #'Data exceeds the maximum allowed size.' ERROR_UPLOAD_FILE_SIZE = 'errUploadFileSize' #'File exceeds maximum",
"= 'errNetMount' ERROR_NETMOUNT_NO_DRIVER = 'errNetMountNoDriver' ERROR_NETMOUNT_FAILED = 'errNetMountFailed' class VolumeNotFoundError(Exception): def __init__(self): super(VolumeNotFoundError,",
"location and can not be replaced with object of another type. ERROR_SAVE =",
"to upload \"$1\".' ERROR_UPLOAD_NO_FILES = 'errUploadNoFiles' #'No files found for upload.' ERROR_UPLOAD_TOTAL_SIZE =",
"not found.' ERROR_TRGDIR_NOT_FOUND = 'errTrgFolderNotFound' #'Target folder \"$1\" not found.' ERROR_NOT_DIR = 'errNotFolder'",
"upload \"$1\".' ERROR_UPLOAD_NO_FILES = 'errUploadNoFiles' #'No files found for upload.' ERROR_UPLOAD_TOTAL_SIZE = 'errUploadTotalSize'",
"'errPerm' ERROR_LOCKED = 'errLocked' #'\"$1\" is locked and can not be renamed, moved",
"message codes, the text message of which is handled by the elFinder client",
"'errCopy' ERROR_MOVE = 'errMove' ERROR_COPY_FROM = 'errCopyFrom' ERROR_COPY_TO = 'errCopyTo' ERROR_COPY_ITSELF = 'errCopyInItself'",
"client \"\"\" ERROR_UNKNOWN = 'errUnknown' ERROR_UNKNOWN_CMD = 'errUnknownCmd' ERROR_CONF = 'errConf' ERROR_CONF_NO_JSON =",
"\"$1\" already exists at this location and can not be replaced with object",
"= 'errUploadFile' #'Unable to upload \"$1\".' ERROR_UPLOAD_NO_FILES = 'errUploadNoFiles' #'No files found for",
"#'File type not allowed.' ERROR_UPLOAD_TRANSFER = 'errUploadTransfer' #'\"$1\" transfer error.' ERROR_ACCESS_DENIED = 'errAccess'",
"= 'errUploadTotalSize' #'Data exceeds the maximum allowed size.' ERROR_UPLOAD_FILE_SIZE = 'errUploadFileSize' #'File exceeds",
"= 'errCopyInItself' ERROR_REPLACE = 'errReplace' #'Unable to replace \"$1\".' ERROR_RM = 'errRm' #'Unable",
"size.' ERROR_UPLOAD_FILE_MIME = 'errUploadMime' #'File type not allowed.' ERROR_UPLOAD_TRANSFER = 'errUploadTransfer' #'\"$1\" transfer",
"'errMove' ERROR_COPY_FROM = 'errCopyFrom' ERROR_COPY_TO = 'errCopyTo' ERROR_COPY_ITSELF = 'errCopyInItself' ERROR_REPLACE = 'errReplace'",
"class PermissionDeniedError(Exception): def __init__(self): super(PermissionDeniedError, self).__init__(ElfinderErrorMessages.ERROR_PERM_DENIED) class NamedError(Exception): \"\"\" Elfinder-specific exception. `msg` contains",
"'errFolderNotFound' ERROR_FILE_NOT_FOUND = 'errFileNotFound' #'File not found.' ERROR_TRGDIR_NOT_FOUND = 'errTrgFolderNotFound' #'Target folder \"$1\"",
"= 'errReplace' #'Unable to replace \"$1\".' ERROR_RM = 'errRm' #'Unable to remove \"$1\".'",
"exists.' ERROR_INVALID_NAME = 'errInvName' #'Invalid file name.' ERROR_MKDIR = 'errMkdir' ERROR_MKFILE = 'errMkfile'",
"'errExists' #'File named \"$1\" already exists.' ERROR_INVALID_NAME = 'errInvName' #'Invalid file name.' ERROR_MKDIR",
"ERROR_RM = 'errRm' #'Unable to remove \"$1\".' ERROR_RM_SRC = 'errRmSrc' #'Unable remove source",
"ERROR_NOT_DIR = 'errNotFolder' ERROR_NOT_FILE = 'errNotFile' ERROR_PERM_DENIED = 'errPerm' ERROR_LOCKED = 'errLocked' #'\"$1\"",
"the text message of which is handled by the elFinder client \"\"\" ERROR_UNKNOWN",
"'errUsupportType' ERROR_NOT_UTF8_CONTENT = 'errNotUTF8Content' ERROR_NETMOUNT = 'errNetMount' ERROR_NETMOUNT_NO_DRIVER = 'errNetMountNoDriver' ERROR_NETMOUNT_FAILED = 'errNetMountFailed'",
"ElfinderErrorMessages: \"\"\" Standard error message codes, the text message of which is handled",
"named \"$1\" already exists.' ERROR_INVALID_NAME = 'errInvName' #'Invalid file name.' ERROR_MKDIR = 'errMkdir'",
"folder \"$1\" not found.' ERROR_NOT_DIR = 'errNotFolder' ERROR_NOT_FILE = 'errNotFile' ERROR_PERM_DENIED = 'errPerm'",
"ugettext as _ class ElfinderErrorMessages: \"\"\" Standard error message codes, the text message",
"handled by the elFinder client \"\"\" ERROR_UNKNOWN = 'errUnknown' ERROR_UNKNOWN_CMD = 'errUnknownCmd' ERROR_CONF",
"'errExtract' ERROR_ARCHIVE = 'errArchive' ERROR_NOT_ARCHIVE = 'errNoArchive' ERROR_ARCHIVE_TYPE = 'errArcType' ERROR_ARC_SYMLINKS = 'errArcSymlinks'",
"ERROR_UPLOAD_TOTAL_SIZE = 'errUploadTotalSize' #'Data exceeds the maximum allowed size.' ERROR_UPLOAD_FILE_SIZE = 'errUploadFileSize' #'File",
"ERROR_EXTRACT = 'errExtract' ERROR_ARCHIVE = 'errArchive' ERROR_NOT_ARCHIVE = 'errNoArchive' ERROR_ARCHIVE_TYPE = 'errArcType' ERROR_ARC_SYMLINKS",
"elFinder client \"\"\" ERROR_UNKNOWN = 'errUnknown' ERROR_UNKNOWN_CMD = 'errUnknownCmd' ERROR_CONF = 'errConf' ERROR_CONF_NO_JSON",
"= 'errRm' #'Unable to remove \"$1\".' ERROR_RM_SRC = 'errRmSrc' #'Unable remove source file(s)'",
"'errOpen' ERROR_DIR_NOT_FOUND = 'errFolderNotFound' ERROR_FILE_NOT_FOUND = 'errFileNotFound' #'File not found.' ERROR_TRGDIR_NOT_FOUND = 'errTrgFolderNotFound'",
"= 'errCopy' ERROR_MOVE = 'errMove' ERROR_COPY_FROM = 'errCopyFrom' ERROR_COPY_TO = 'errCopyTo' ERROR_COPY_ITSELF =",
"'errUploadNoFiles' #'No files found for upload.' ERROR_UPLOAD_TOTAL_SIZE = 'errUploadTotalSize' #'Data exceeds the maximum",
"'errTrgFolderNotFound' #'Target folder \"$1\" not found.' ERROR_NOT_DIR = 'errNotFolder' ERROR_NOT_FILE = 'errNotFile' ERROR_PERM_DENIED",
"of which is handled by the elFinder client \"\"\" ERROR_UNKNOWN = 'errUnknown' ERROR_UNKNOWN_CMD",
"= 'errJSON' ERROR_CONF_NO_VOL = 'errNoVolumes' ERROR_INV_PARAMS = 'errCmdParams' ERROR_OPEN = 'errOpen' ERROR_DIR_NOT_FOUND =",
"already exists at this location and can not be replaced with object of",
"'errUnknown' ERROR_UNKNOWN_CMD = 'errUnknownCmd' ERROR_CONF = 'errConf' ERROR_CONF_NO_JSON = 'errJSON' ERROR_CONF_NO_VOL = 'errNoVolumes'",
"\"$1\".' ERROR_UPLOAD_NO_FILES = 'errUploadNoFiles' #'No files found for upload.' ERROR_UPLOAD_TOTAL_SIZE = 'errUploadTotalSize' #'Data",
"text message of which is handled by the elFinder client \"\"\" ERROR_UNKNOWN =",
"allowed size.' ERROR_UPLOAD_FILE_SIZE = 'errUploadFileSize' #'File exceeds maximum allowed size.' ERROR_UPLOAD_FILE_MIME = 'errUploadMime'",
"object of another type. ERROR_SAVE = 'errSave' ERROR_EXTRACT = 'errExtract' ERROR_ARCHIVE = 'errArchive'",
"ERROR_RM_SRC = 'errRmSrc' #'Unable remove source file(s)' ERROR_UPLOAD = 'errUpload' #'Upload error.' ERROR_UPLOAD_FILE",
"= 'errMkdir' ERROR_MKFILE = 'errMkfile' ERROR_RENAME = 'errRename' ERROR_COPY = 'errCopy' ERROR_MOVE =",
"#'\"$1\" transfer error.' ERROR_ACCESS_DENIED = 'errAccess' ERROR_NOT_REPLACE = 'errNotReplace' #Object \"$1\" already exists",
"of another type. ERROR_SAVE = 'errSave' ERROR_EXTRACT = 'errExtract' ERROR_ARCHIVE = 'errArchive' ERROR_NOT_ARCHIVE",
"'errNetMountNoDriver' ERROR_NETMOUNT_FAILED = 'errNetMountFailed' class VolumeNotFoundError(Exception): def __init__(self): super(VolumeNotFoundError, self).__init__(_(\"Volume could not be",
"error code `name` holds the path for which operation failed \"\"\" def __init__(self,",
"ERROR_TRGDIR_NOT_FOUND = 'errTrgFolderNotFound' #'Target folder \"$1\" not found.' ERROR_NOT_DIR = 'errNotFolder' ERROR_NOT_FILE =",
"DirNotFoundError(Exception): def __init__(self): super(DirNotFoundError, self).__init__(ElfinderErrorMessages.ERROR_DIR_NOT_FOUND) class PermissionDeniedError(Exception): def __init__(self): super(PermissionDeniedError, self).__init__(ElfinderErrorMessages.ERROR_PERM_DENIED) class NamedError(Exception):",
"'errNoArchive' ERROR_ARCHIVE_TYPE = 'errArcType' ERROR_ARC_SYMLINKS = 'errArcSymlinks' ERROR_ARC_MAXSIZE = 'errArcMaxSize' ERROR_RESIZE = 'errResize'",
"'errJSON' ERROR_CONF_NO_VOL = 'errNoVolumes' ERROR_INV_PARAMS = 'errCmdParams' ERROR_OPEN = 'errOpen' ERROR_DIR_NOT_FOUND = 'errFolderNotFound'",
"which operation failed \"\"\" def __init__(self, msg, name): self.name = name super(NamedError, self).__init__(msg)",
"failed \"\"\" def __init__(self, msg, name): self.name = name super(NamedError, self).__init__(msg) class NotAnImageError(Exception):",
"moved or removed.' ERROR_EXISTS = 'errExists' #'File named \"$1\" already exists.' ERROR_INVALID_NAME =",
"to replace \"$1\".' ERROR_RM = 'errRm' #'Unable to remove \"$1\".' ERROR_RM_SRC = 'errRmSrc'",
"'errResize' ERROR_UNSUPPORT_TYPE = 'errUsupportType' ERROR_NOT_UTF8_CONTENT = 'errNotUTF8Content' ERROR_NETMOUNT = 'errNetMount' ERROR_NETMOUNT_NO_DRIVER = 'errNetMountNoDriver'",
"ERROR_COPY = 'errCopy' ERROR_MOVE = 'errMove' ERROR_COPY_FROM = 'errCopyFrom' ERROR_COPY_TO = 'errCopyTo' ERROR_COPY_ITSELF",
"is handled by the elFinder client \"\"\" ERROR_UNKNOWN = 'errUnknown' ERROR_UNKNOWN_CMD = 'errUnknownCmd'",
"ERROR_EXISTS = 'errExists' #'File named \"$1\" already exists.' ERROR_INVALID_NAME = 'errInvName' #'Invalid file",
"not be replaced with object of another type. ERROR_SAVE = 'errSave' ERROR_EXTRACT =",
"= 'errExtract' ERROR_ARCHIVE = 'errArchive' ERROR_NOT_ARCHIVE = 'errNoArchive' ERROR_ARCHIVE_TYPE = 'errArcType' ERROR_ARC_SYMLINKS =",
"ERROR_NETMOUNT_NO_DRIVER = 'errNetMountNoDriver' ERROR_NETMOUNT_FAILED = 'errNetMountFailed' class VolumeNotFoundError(Exception): def __init__(self): super(VolumeNotFoundError, self).__init__(_(\"Volume could",
"= 'errCopyTo' ERROR_COPY_ITSELF = 'errCopyInItself' ERROR_REPLACE = 'errReplace' #'Unable to replace \"$1\".' ERROR_RM",
"def __init__(self, msg, name): self.name = name super(NamedError, self).__init__(msg) class NotAnImageError(Exception): def __init__(self):",
"#'Unable to replace \"$1\".' ERROR_RM = 'errRm' #'Unable to remove \"$1\".' ERROR_RM_SRC =",
"type. ERROR_SAVE = 'errSave' ERROR_EXTRACT = 'errExtract' ERROR_ARCHIVE = 'errArchive' ERROR_NOT_ARCHIVE = 'errNoArchive'",
"= 'errLocked' #'\"$1\" is locked and can not be renamed, moved or removed.'",
"= 'errOpen' ERROR_DIR_NOT_FOUND = 'errFolderNotFound' ERROR_FILE_NOT_FOUND = 'errFileNotFound' #'File not found.' ERROR_TRGDIR_NOT_FOUND =",
"VolumeNotFoundError(Exception): def __init__(self): super(VolumeNotFoundError, self).__init__(_(\"Volume could not be found\")) class FileNotFoundError(Exception): def __init__(self):",
"'errUpload' #'Upload error.' ERROR_UPLOAD_FILE = 'errUploadFile' #'Unable to upload \"$1\".' ERROR_UPLOAD_NO_FILES = 'errUploadNoFiles'"
] |
[
"def image_url(image_data): #构建鉴权对象 q = Auth(access_key, secret_key) #要上传的空间 bucket_name = 'new3333' #上传后保存的文件名 key",
"= 'new3333' #上传后保存的文件名 key = None # 处理上传结果 token = q.upload_token(bucket_name, key, 3600)",
"image_data) print(ret) print(info) if info.status_code == 200: return ret.get('key') else: return None if",
"None if __name__ == '__main__': with open('./滑稽.jpg', 'rb') as f: image_data = f.read()",
"Key 和 Secret Key access_key = '<KEY>' secret_key = '<KEY>' def image_url(image_data): #构建鉴权对象",
"ret.get('key') else: return None if __name__ == '__main__': with open('./滑稽.jpg', 'rb') as f:",
"qiniu import Auth, put_data #需要填写你的 Access Key 和 Secret Key access_key = '<KEY>'",
"access_key = '<KEY>' secret_key = '<KEY>' def image_url(image_data): #构建鉴权对象 q = Auth(access_key, secret_key)",
"token = q.upload_token(bucket_name, key, 3600) ret, info = put_data(token, key, image_data) print(ret) print(info)",
"if __name__ == '__main__': with open('./滑稽.jpg', 'rb') as f: image_data = f.read() image_url(image_data)",
"print(info) if info.status_code == 200: return ret.get('key') else: return None if __name__ ==",
"'new3333' #上传后保存的文件名 key = None # 处理上传结果 token = q.upload_token(bucket_name, key, 3600) ret,",
"print(ret) print(info) if info.status_code == 200: return ret.get('key') else: return None if __name__",
"#上传后保存的文件名 key = None # 处理上传结果 token = q.upload_token(bucket_name, key, 3600) ret, info",
"put_data(token, key, image_data) print(ret) print(info) if info.status_code == 200: return ret.get('key') else: return",
"Secret Key access_key = '<KEY>' secret_key = '<KEY>' def image_url(image_data): #构建鉴权对象 q =",
"q = Auth(access_key, secret_key) #要上传的空间 bucket_name = 'new3333' #上传后保存的文件名 key = None #",
"secret_key) #要上传的空间 bucket_name = 'new3333' #上传后保存的文件名 key = None # 处理上传结果 token =",
"Auth, put_data #需要填写你的 Access Key 和 Secret Key access_key = '<KEY>' secret_key =",
"= q.upload_token(bucket_name, key, 3600) ret, info = put_data(token, key, image_data) print(ret) print(info) if",
"bucket_name = 'new3333' #上传后保存的文件名 key = None # 处理上传结果 token = q.upload_token(bucket_name, key,",
"3600) ret, info = put_data(token, key, image_data) print(ret) print(info) if info.status_code == 200:",
"= Auth(access_key, secret_key) #要上传的空间 bucket_name = 'new3333' #上传后保存的文件名 key = None # 处理上传结果",
"else: return None if __name__ == '__main__': with open('./滑稽.jpg', 'rb') as f: image_data",
"info.status_code == 200: return ret.get('key') else: return None if __name__ == '__main__': with",
"#要上传的空间 bucket_name = 'new3333' #上传后保存的文件名 key = None # 处理上传结果 token = q.upload_token(bucket_name,",
"= '<KEY>' def image_url(image_data): #构建鉴权对象 q = Auth(access_key, secret_key) #要上传的空间 bucket_name = 'new3333'",
"#需要填写你的 Access Key 和 Secret Key access_key = '<KEY>' secret_key = '<KEY>' def",
"secret_key = '<KEY>' def image_url(image_data): #构建鉴权对象 q = Auth(access_key, secret_key) #要上传的空间 bucket_name =",
"'<KEY>' secret_key = '<KEY>' def image_url(image_data): #构建鉴权对象 q = Auth(access_key, secret_key) #要上传的空间 bucket_name",
"#构建鉴权对象 q = Auth(access_key, secret_key) #要上传的空间 bucket_name = 'new3333' #上传后保存的文件名 key = None",
"key, 3600) ret, info = put_data(token, key, image_data) print(ret) print(info) if info.status_code ==",
"None # 处理上传结果 token = q.upload_token(bucket_name, key, 3600) ret, info = put_data(token, key,",
"# 处理上传结果 token = q.upload_token(bucket_name, key, 3600) ret, info = put_data(token, key, image_data)",
"return None if __name__ == '__main__': with open('./滑稽.jpg', 'rb') as f: image_data =",
"key, image_data) print(ret) print(info) if info.status_code == 200: return ret.get('key') else: return None",
"= '<KEY>' secret_key = '<KEY>' def image_url(image_data): #构建鉴权对象 q = Auth(access_key, secret_key) #要上传的空间",
"info = put_data(token, key, image_data) print(ret) print(info) if info.status_code == 200: return ret.get('key')",
"= put_data(token, key, image_data) print(ret) print(info) if info.status_code == 200: return ret.get('key') else:",
"import Auth, put_data #需要填写你的 Access Key 和 Secret Key access_key = '<KEY>' secret_key",
"q.upload_token(bucket_name, key, 3600) ret, info = put_data(token, key, image_data) print(ret) print(info) if info.status_code",
"image_url(image_data): #构建鉴权对象 q = Auth(access_key, secret_key) #要上传的空间 bucket_name = 'new3333' #上传后保存的文件名 key =",
"key = None # 处理上传结果 token = q.upload_token(bucket_name, key, 3600) ret, info =",
"= None # 处理上传结果 token = q.upload_token(bucket_name, key, 3600) ret, info = put_data(token,",
"200: return ret.get('key') else: return None if __name__ == '__main__': with open('./滑稽.jpg', 'rb')",
"return ret.get('key') else: return None if __name__ == '__main__': with open('./滑稽.jpg', 'rb') as",
"if info.status_code == 200: return ret.get('key') else: return None if __name__ == '__main__':",
"== 200: return ret.get('key') else: return None if __name__ == '__main__': with open('./滑稽.jpg',",
"Access Key 和 Secret Key access_key = '<KEY>' secret_key = '<KEY>' def image_url(image_data):",
"put_data #需要填写你的 Access Key 和 Secret Key access_key = '<KEY>' secret_key = '<KEY>'",
"ret, info = put_data(token, key, image_data) print(ret) print(info) if info.status_code == 200: return",
"Key access_key = '<KEY>' secret_key = '<KEY>' def image_url(image_data): #构建鉴权对象 q = Auth(access_key,",
"和 Secret Key access_key = '<KEY>' secret_key = '<KEY>' def image_url(image_data): #构建鉴权对象 q",
"'<KEY>' def image_url(image_data): #构建鉴权对象 q = Auth(access_key, secret_key) #要上传的空间 bucket_name = 'new3333' #上传后保存的文件名",
"处理上传结果 token = q.upload_token(bucket_name, key, 3600) ret, info = put_data(token, key, image_data) print(ret)",
"Auth(access_key, secret_key) #要上传的空间 bucket_name = 'new3333' #上传后保存的文件名 key = None # 处理上传结果 token",
"from qiniu import Auth, put_data #需要填写你的 Access Key 和 Secret Key access_key ="
] |
[
"import sys def load_gimp_palette(filename): \"\"\" For simplicity's sake, a palette is just an",
"the format using Gimp -> export as .txt. palette = load_gimp_palette('./palette.txt') img =",
"= scipy.misc.imread(img_file) for i in range(len(img)): for j in range(len(img[i])): rgba = img[i][j]",
"return best img_file = './test_input.png' if len(sys.argv) >= 2: img_file = sys.argv[1] #",
"simplicity's sake, a palette is just an array of RGB values: palette =",
"range(len(img)): for j in range(len(img[i])): rgba = img[i][j] #print('Bef: %s' % rgba) rgba",
"touch. best.append(rgba.tolist()[3]) return best img_file = './test_input.png' if len(sys.argv) >= 2: img_file =",
"assert len(prgb) == 3 diff = abs(rgba[0]-prgb[0]) + abs(rgba[1]-prgb[1]) + abs(rgba[2]-prgb[2]) if diff",
"rgba[3] is transparency, which we don't touch. best.append(rgba.tolist()[3]) return best img_file = './test_input.png'",
"len(sys.argv) >= 2: img_file = sys.argv[1] # Expected format: # #0080fc # ...",
"1e9 for prgb in palette: assert len(prgb) == 3 diff = abs(rgba[0]-prgb[0]) +",
"prgb in palette: assert len(prgb) == 3 diff = abs(rgba[0]-prgb[0]) + abs(rgba[1]-prgb[1]) +",
"scipy.misc import sys def load_gimp_palette(filename): \"\"\" For simplicity's sake, a palette is just",
"= 1e9 for prgb in palette: assert len(prgb) == 3 diff = abs(rgba[0]-prgb[0])",
"best = prgb[:] # rgba[3] is transparency, which we don't touch. best.append(rgba.tolist()[3]) return",
"sake, a palette is just an array of RGB values: palette = [",
"rgba) rgba = filter_to_closest_in_palette(rgba, palette) #print('Aft: %s' % rgba) img[i][j] = rgba scipy.misc.imsave(img_file.replace('input',",
"0) g = int('0x'+line[2:4], 0) b = int('0x'+line[4:6], 0) rgb = [r,g,b] palette.append(rgb)",
"# This is the format using Gimp -> export as .txt. palette =",
"if diff < dist: dist = diff best = prgb[:] # rgba[3] is",
"lines = open(filename, 'r').readlines() palette = [] for line in lines: if '#'",
"line) pass return palette def filter_to_red(rgba): return [rgba[0], 0, 0, rgba[3]] def filter_to_closest_in_palette(rgba,",
"filter_to_red(rgba): return [rgba[0], 0, 0, rgba[3]] def filter_to_closest_in_palette(rgba, palette): best = None dist",
"open(filename, 'r').readlines() palette = [] for line in lines: if '#' in line:",
"import scipy import scipy.misc import sys def load_gimp_palette(filename): \"\"\" For simplicity's sake, a",
"= open(filename, 'r').readlines() palette = [] for line in lines: if '#' in",
"... ] \"\"\" lines = open(filename, 'r').readlines() palette = [] for line in",
"#print('Ignore %s' % line) pass return palette def filter_to_red(rgba): return [rgba[0], 0, 0,",
"= int('0x'+line[4:6], 0) rgb = [r,g,b] palette.append(rgb) except: #print('Ignore %s' % line) pass",
"\"\"\" lines = open(filename, 'r').readlines() palette = [] for line in lines: if",
"= diff best = prgb[:] # rgba[3] is transparency, which we don't touch.",
"\"\"\" import scipy import scipy.misc import sys def load_gimp_palette(filename): \"\"\" For simplicity's sake,",
"dist: dist = diff best = prgb[:] # rgba[3] is transparency, which we",
"dist = diff best = prgb[:] # rgba[3] is transparency, which we don't",
"0, rgba[3]] def filter_to_closest_in_palette(rgba, palette): best = None dist = 1e9 for prgb",
"prgb[:] # rgba[3] is transparency, which we don't touch. best.append(rgba.tolist()[3]) return best img_file",
"img_file = './test_input.png' if len(sys.argv) >= 2: img_file = sys.argv[1] # Expected format:",
"sys def load_gimp_palette(filename): \"\"\" For simplicity's sake, a palette is just an array",
"pass return palette def filter_to_red(rgba): return [rgba[0], 0, 0, rgba[3]] def filter_to_closest_in_palette(rgba, palette):",
"#print('Bef: %s' % rgba) rgba = filter_to_closest_in_palette(rgba, palette) #print('Aft: %s' % rgba) img[i][j]",
"[r,g,b], [r,g,b], ... ] \"\"\" lines = open(filename, 'r').readlines() palette = [] for",
"= sys.argv[1] # Expected format: # #0080fc # ... # This is the",
"load_gimp_palette('./palette.txt') img = scipy.misc.imread(img_file) for i in range(len(img)): for j in range(len(img[i])): rgba",
"%s' % rgba) rgba = filter_to_closest_in_palette(rgba, palette) #print('Aft: %s' % rgba) img[i][j] =",
"rgb = [r,g,b] palette.append(rgb) except: #print('Ignore %s' % line) pass return palette def",
"def filter_to_closest_in_palette(rgba, palette): best = None dist = 1e9 for prgb in palette:",
"rgba[3]] def filter_to_closest_in_palette(rgba, palette): best = None dist = 1e9 for prgb in",
"in range(len(img)): for j in range(len(img[i])): rgba = img[i][j] #print('Bef: %s' % rgba)",
"] \"\"\" lines = open(filename, 'r').readlines() palette = [] for line in lines:",
"= abs(rgba[0]-prgb[0]) + abs(rgba[1]-prgb[1]) + abs(rgba[2]-prgb[2]) if diff < dist: dist = diff",
"= None dist = 1e9 for prgb in palette: assert len(prgb) == 3",
"3 diff = abs(rgba[0]-prgb[0]) + abs(rgba[1]-prgb[1]) + abs(rgba[2]-prgb[2]) if diff < dist: dist",
"import scipy.misc import sys def load_gimp_palette(filename): \"\"\" For simplicity's sake, a palette is",
"Links - https://stackoverflow.com/questions/31386096/importing-png-files-into-numpy \"\"\" import scipy import scipy.misc import sys def load_gimp_palette(filename): \"\"\"",
"img[i][j] #print('Bef: %s' % rgba) rgba = filter_to_closest_in_palette(rgba, palette) #print('Aft: %s' % rgba)",
"%s' % line) pass return palette def filter_to_red(rgba): return [rgba[0], 0, 0, rgba[3]]",
"abs(rgba[0]-prgb[0]) + abs(rgba[1]-prgb[1]) + abs(rgba[2]-prgb[2]) if diff < dist: dist = diff best",
"line = line.split('#')[1] try: r = int('0x'+line[0:2], 0) g = int('0x'+line[2:4], 0) b",
">= 2: img_file = sys.argv[1] # Expected format: # #0080fc # ... #",
"for i in range(len(img)): for j in range(len(img[i])): rgba = img[i][j] #print('Bef: %s'",
"values: palette = [ [r,g,b], [r,g,b], ... ] \"\"\" lines = open(filename, 'r').readlines()",
"+ abs(rgba[2]-prgb[2]) if diff < dist: dist = diff best = prgb[:] #",
"diff < dist: dist = diff best = prgb[:] # rgba[3] is transparency,",
"= load_gimp_palette('./palette.txt') img = scipy.misc.imread(img_file) for i in range(len(img)): for j in range(len(img[i])):",
"# ... # This is the format using Gimp -> export as .txt.",
"For simplicity's sake, a palette is just an array of RGB values: palette",
"an array of RGB values: palette = [ [r,g,b], [r,g,b], ... ] \"\"\"",
"scipy import scipy.misc import sys def load_gimp_palette(filename): \"\"\" For simplicity's sake, a palette",
"is the format using Gimp -> export as .txt. palette = load_gimp_palette('./palette.txt') img",
"export as .txt. palette = load_gimp_palette('./palette.txt') img = scipy.misc.imread(img_file) for i in range(len(img)):",
"'#' in line: line = line.split('#')[1] try: r = int('0x'+line[0:2], 0) g =",
"except: #print('Ignore %s' % line) pass return palette def filter_to_red(rgba): return [rgba[0], 0,",
"return [rgba[0], 0, 0, rgba[3]] def filter_to_closest_in_palette(rgba, palette): best = None dist =",
"best = None dist = 1e9 for prgb in palette: assert len(prgb) ==",
"... # This is the format using Gimp -> export as .txt. palette",
"# Expected format: # #0080fc # ... # This is the format using",
"[] for line in lines: if '#' in line: line = line.split('#')[1] try:",
"\"\"\" For simplicity's sake, a palette is just an array of RGB values:",
"palette: assert len(prgb) == 3 diff = abs(rgba[0]-prgb[0]) + abs(rgba[1]-prgb[1]) + abs(rgba[2]-prgb[2]) if",
"using Gimp -> export as .txt. palette = load_gimp_palette('./palette.txt') img = scipy.misc.imread(img_file) for",
"best img_file = './test_input.png' if len(sys.argv) >= 2: img_file = sys.argv[1] # Expected",
"Gimp -> export as .txt. palette = load_gimp_palette('./palette.txt') img = scipy.misc.imread(img_file) for i",
"array of RGB values: palette = [ [r,g,b], [r,g,b], ... ] \"\"\" lines",
"palette.append(rgb) except: #print('Ignore %s' % line) pass return palette def filter_to_red(rgba): return [rgba[0],",
"# rgba[3] is transparency, which we don't touch. best.append(rgba.tolist()[3]) return best img_file =",
"= img[i][j] #print('Bef: %s' % rgba) rgba = filter_to_closest_in_palette(rgba, palette) #print('Aft: %s' %",
"a palette is just an array of RGB values: palette = [ [r,g,b],",
"line.split('#')[1] try: r = int('0x'+line[0:2], 0) g = int('0x'+line[2:4], 0) b = int('0x'+line[4:6],",
"which we don't touch. best.append(rgba.tolist()[3]) return best img_file = './test_input.png' if len(sys.argv) >=",
"#!/usr/bin/python3 \"\"\" # Links - https://stackoverflow.com/questions/31386096/importing-png-files-into-numpy \"\"\" import scipy import scipy.misc import sys",
"sys.argv[1] # Expected format: # #0080fc # ... # This is the format",
"+ abs(rgba[1]-prgb[1]) + abs(rgba[2]-prgb[2]) if diff < dist: dist = diff best =",
"if len(sys.argv) >= 2: img_file = sys.argv[1] # Expected format: # #0080fc #",
"= [] for line in lines: if '#' in line: line = line.split('#')[1]",
"[ [r,g,b], [r,g,b], ... ] \"\"\" lines = open(filename, 'r').readlines() palette = []",
"range(len(img[i])): rgba = img[i][j] #print('Bef: %s' % rgba) rgba = filter_to_closest_in_palette(rgba, palette) #print('Aft:",
"palette = [] for line in lines: if '#' in line: line =",
"\"\"\" # Links - https://stackoverflow.com/questions/31386096/importing-png-files-into-numpy \"\"\" import scipy import scipy.misc import sys def",
"load_gimp_palette(filename): \"\"\" For simplicity's sake, a palette is just an array of RGB",
"lines: if '#' in line: line = line.split('#')[1] try: r = int('0x'+line[0:2], 0)",
"= line.split('#')[1] try: r = int('0x'+line[0:2], 0) g = int('0x'+line[2:4], 0) b =",
"i in range(len(img)): for j in range(len(img[i])): rgba = img[i][j] #print('Bef: %s' %",
"g = int('0x'+line[2:4], 0) b = int('0x'+line[4:6], 0) rgb = [r,g,b] palette.append(rgb) except:",
"#0080fc # ... # This is the format using Gimp -> export as",
"r = int('0x'+line[0:2], 0) g = int('0x'+line[2:4], 0) b = int('0x'+line[4:6], 0) rgb",
"int('0x'+line[4:6], 0) rgb = [r,g,b] palette.append(rgb) except: #print('Ignore %s' % line) pass return",
"RGB values: palette = [ [r,g,b], [r,g,b], ... ] \"\"\" lines = open(filename,",
"in lines: if '#' in line: line = line.split('#')[1] try: r = int('0x'+line[0:2],",
"we don't touch. best.append(rgba.tolist()[3]) return best img_file = './test_input.png' if len(sys.argv) >= 2:",
"don't touch. best.append(rgba.tolist()[3]) return best img_file = './test_input.png' if len(sys.argv) >= 2: img_file",
"diff best = prgb[:] # rgba[3] is transparency, which we don't touch. best.append(rgba.tolist()[3])",
"< dist: dist = diff best = prgb[:] # rgba[3] is transparency, which",
"= prgb[:] # rgba[3] is transparency, which we don't touch. best.append(rgba.tolist()[3]) return best",
"format using Gimp -> export as .txt. palette = load_gimp_palette('./palette.txt') img = scipy.misc.imread(img_file)",
"palette = load_gimp_palette('./palette.txt') img = scipy.misc.imread(img_file) for i in range(len(img)): for j in",
"[rgba[0], 0, 0, rgba[3]] def filter_to_closest_in_palette(rgba, palette): best = None dist = 1e9",
"j in range(len(img[i])): rgba = img[i][j] #print('Bef: %s' % rgba) rgba = filter_to_closest_in_palette(rgba,",
"rgba = filter_to_closest_in_palette(rgba, palette) #print('Aft: %s' % rgba) img[i][j] = rgba scipy.misc.imsave(img_file.replace('input', 'output'),",
"for line in lines: if '#' in line: line = line.split('#')[1] try: r",
"[r,g,b], ... ] \"\"\" lines = open(filename, 'r').readlines() palette = [] for line",
"- https://stackoverflow.com/questions/31386096/importing-png-files-into-numpy \"\"\" import scipy import scipy.misc import sys def load_gimp_palette(filename): \"\"\" For",
"for j in range(len(img[i])): rgba = img[i][j] #print('Bef: %s' % rgba) rgba =",
"palette): best = None dist = 1e9 for prgb in palette: assert len(prgb)",
"% rgba) rgba = filter_to_closest_in_palette(rgba, palette) #print('Aft: %s' % rgba) img[i][j] = rgba",
"= int('0x'+line[2:4], 0) b = int('0x'+line[4:6], 0) rgb = [r,g,b] palette.append(rgb) except: #print('Ignore",
"= [r,g,b] palette.append(rgb) except: #print('Ignore %s' % line) pass return palette def filter_to_red(rgba):",
"# Links - https://stackoverflow.com/questions/31386096/importing-png-files-into-numpy \"\"\" import scipy import scipy.misc import sys def load_gimp_palette(filename):",
"format: # #0080fc # ... # This is the format using Gimp ->",
"0) b = int('0x'+line[4:6], 0) rgb = [r,g,b] palette.append(rgb) except: #print('Ignore %s' %",
"scipy.misc.imread(img_file) for i in range(len(img)): for j in range(len(img[i])): rgba = img[i][j] #print('Bef:",
"try: r = int('0x'+line[0:2], 0) g = int('0x'+line[2:4], 0) b = int('0x'+line[4:6], 0)",
"palette) #print('Aft: %s' % rgba) img[i][j] = rgba scipy.misc.imsave(img_file.replace('input', 'output'), img) #scipy.misc.imsave('xxx_'+img_file, img)",
"'./test_input.png' if len(sys.argv) >= 2: img_file = sys.argv[1] # Expected format: # #0080fc",
"palette def filter_to_red(rgba): return [rgba[0], 0, 0, rgba[3]] def filter_to_closest_in_palette(rgba, palette): best =",
"def load_gimp_palette(filename): \"\"\" For simplicity's sake, a palette is just an array of",
"-> export as .txt. palette = load_gimp_palette('./palette.txt') img = scipy.misc.imread(img_file) for i in",
"'r').readlines() palette = [] for line in lines: if '#' in line: line",
"rgba = img[i][j] #print('Bef: %s' % rgba) rgba = filter_to_closest_in_palette(rgba, palette) #print('Aft: %s'",
"in range(len(img[i])): rgba = img[i][j] #print('Bef: %s' % rgba) rgba = filter_to_closest_in_palette(rgba, palette)",
"just an array of RGB values: palette = [ [r,g,b], [r,g,b], ... ]",
"filter_to_closest_in_palette(rgba, palette): best = None dist = 1e9 for prgb in palette: assert",
"abs(rgba[1]-prgb[1]) + abs(rgba[2]-prgb[2]) if diff < dist: dist = diff best = prgb[:]",
"palette = [ [r,g,b], [r,g,b], ... ] \"\"\" lines = open(filename, 'r').readlines() palette",
"None dist = 1e9 for prgb in palette: assert len(prgb) == 3 diff",
"dist = 1e9 for prgb in palette: assert len(prgb) == 3 diff =",
"diff = abs(rgba[0]-prgb[0]) + abs(rgba[1]-prgb[1]) + abs(rgba[2]-prgb[2]) if diff < dist: dist =",
"int('0x'+line[2:4], 0) b = int('0x'+line[4:6], 0) rgb = [r,g,b] palette.append(rgb) except: #print('Ignore %s'",
"abs(rgba[2]-prgb[2]) if diff < dist: dist = diff best = prgb[:] # rgba[3]",
"<filename>kiyoshi_ni_shokuhatsu/tools/png_to_palette.py #!/usr/bin/python3 \"\"\" # Links - https://stackoverflow.com/questions/31386096/importing-png-files-into-numpy \"\"\" import scipy import scipy.misc import",
"in line: line = line.split('#')[1] try: r = int('0x'+line[0:2], 0) g = int('0x'+line[2:4],",
"is just an array of RGB values: palette = [ [r,g,b], [r,g,b], ...",
"= [ [r,g,b], [r,g,b], ... ] \"\"\" lines = open(filename, 'r').readlines() palette =",
"of RGB values: palette = [ [r,g,b], [r,g,b], ... ] \"\"\" lines =",
".txt. palette = load_gimp_palette('./palette.txt') img = scipy.misc.imread(img_file) for i in range(len(img)): for j",
"= filter_to_closest_in_palette(rgba, palette) #print('Aft: %s' % rgba) img[i][j] = rgba scipy.misc.imsave(img_file.replace('input', 'output'), img)",
"for prgb in palette: assert len(prgb) == 3 diff = abs(rgba[0]-prgb[0]) + abs(rgba[1]-prgb[1])",
"% line) pass return palette def filter_to_red(rgba): return [rgba[0], 0, 0, rgba[3]] def",
"= int('0x'+line[0:2], 0) g = int('0x'+line[2:4], 0) b = int('0x'+line[4:6], 0) rgb =",
"len(prgb) == 3 diff = abs(rgba[0]-prgb[0]) + abs(rgba[1]-prgb[1]) + abs(rgba[2]-prgb[2]) if diff <",
"is transparency, which we don't touch. best.append(rgba.tolist()[3]) return best img_file = './test_input.png' if",
"best.append(rgba.tolist()[3]) return best img_file = './test_input.png' if len(sys.argv) >= 2: img_file = sys.argv[1]",
"img_file = sys.argv[1] # Expected format: # #0080fc # ... # This is",
"This is the format using Gimp -> export as .txt. palette = load_gimp_palette('./palette.txt')",
"img = scipy.misc.imread(img_file) for i in range(len(img)): for j in range(len(img[i])): rgba =",
"def filter_to_red(rgba): return [rgba[0], 0, 0, rgba[3]] def filter_to_closest_in_palette(rgba, palette): best = None",
"filter_to_closest_in_palette(rgba, palette) #print('Aft: %s' % rgba) img[i][j] = rgba scipy.misc.imsave(img_file.replace('input', 'output'), img) #scipy.misc.imsave('xxx_'+img_file,",
"in palette: assert len(prgb) == 3 diff = abs(rgba[0]-prgb[0]) + abs(rgba[1]-prgb[1]) + abs(rgba[2]-prgb[2])",
"b = int('0x'+line[4:6], 0) rgb = [r,g,b] palette.append(rgb) except: #print('Ignore %s' % line)",
"0) rgb = [r,g,b] palette.append(rgb) except: #print('Ignore %s' % line) pass return palette",
"2: img_file = sys.argv[1] # Expected format: # #0080fc # ... # This",
"as .txt. palette = load_gimp_palette('./palette.txt') img = scipy.misc.imread(img_file) for i in range(len(img)): for",
"line: line = line.split('#')[1] try: r = int('0x'+line[0:2], 0) g = int('0x'+line[2:4], 0)",
"return palette def filter_to_red(rgba): return [rgba[0], 0, 0, rgba[3]] def filter_to_closest_in_palette(rgba, palette): best",
"0, 0, rgba[3]] def filter_to_closest_in_palette(rgba, palette): best = None dist = 1e9 for",
"= './test_input.png' if len(sys.argv) >= 2: img_file = sys.argv[1] # Expected format: #",
"[r,g,b] palette.append(rgb) except: #print('Ignore %s' % line) pass return palette def filter_to_red(rgba): return",
"int('0x'+line[0:2], 0) g = int('0x'+line[2:4], 0) b = int('0x'+line[4:6], 0) rgb = [r,g,b]",
"Expected format: # #0080fc # ... # This is the format using Gimp",
"# #0080fc # ... # This is the format using Gimp -> export",
"line in lines: if '#' in line: line = line.split('#')[1] try: r =",
"transparency, which we don't touch. best.append(rgba.tolist()[3]) return best img_file = './test_input.png' if len(sys.argv)",
"https://stackoverflow.com/questions/31386096/importing-png-files-into-numpy \"\"\" import scipy import scipy.misc import sys def load_gimp_palette(filename): \"\"\" For simplicity's",
"if '#' in line: line = line.split('#')[1] try: r = int('0x'+line[0:2], 0) g",
"== 3 diff = abs(rgba[0]-prgb[0]) + abs(rgba[1]-prgb[1]) + abs(rgba[2]-prgb[2]) if diff < dist:",
"palette is just an array of RGB values: palette = [ [r,g,b], [r,g,b],"
] |
[
"Solution: def generateMatrix(self, n: int) -> List[List[int]]: if not n: return [] A,",
"[] A, lo = [[n*n]], n*n while lo > 1: lo, hi =",
"int) -> List[List[int]]: if not n: return [] A, lo = [[n*n]], n*n",
"> 1: lo, hi = lo - len(A), lo A = [[ i",
"for i in range(lo, hi)]] + [list(j) for j in zip(*A[::-1])] return A",
"A, lo = [[n*n]], n*n while lo > 1: lo, hi = lo",
"lo - len(A), lo A = [[ i for i in range(lo, hi)]]",
"A = [[ i for i in range(lo, hi)]] + [list(j) for j",
"<reponame>lyphui/Just-Code class Solution: def generateMatrix(self, n: int) -> List[List[int]]: if not n: return",
"[[n*n]], n*n while lo > 1: lo, hi = lo - len(A), lo",
"n*n while lo > 1: lo, hi = lo - len(A), lo A",
"while lo > 1: lo, hi = lo - len(A), lo A =",
"n: return [] A, lo = [[n*n]], n*n while lo > 1: lo,",
"- len(A), lo A = [[ i for i in range(lo, hi)]] +",
"lo A = [[ i for i in range(lo, hi)]] + [list(j) for",
"not n: return [] A, lo = [[n*n]], n*n while lo > 1:",
"= lo - len(A), lo A = [[ i for i in range(lo,",
"return [] A, lo = [[n*n]], n*n while lo > 1: lo, hi",
"lo > 1: lo, hi = lo - len(A), lo A = [[",
"hi = lo - len(A), lo A = [[ i for i in",
"1: lo, hi = lo - len(A), lo A = [[ i for",
"lo = [[n*n]], n*n while lo > 1: lo, hi = lo -",
"i for i in range(lo, hi)]] + [list(j) for j in zip(*A[::-1])] return",
"len(A), lo A = [[ i for i in range(lo, hi)]] + [list(j)",
"lo, hi = lo - len(A), lo A = [[ i for i",
"class Solution: def generateMatrix(self, n: int) -> List[List[int]]: if not n: return []",
"def generateMatrix(self, n: int) -> List[List[int]]: if not n: return [] A, lo",
"= [[ i for i in range(lo, hi)]] + [list(j) for j in",
"[[ i for i in range(lo, hi)]] + [list(j) for j in zip(*A[::-1])]",
"= [[n*n]], n*n while lo > 1: lo, hi = lo - len(A),",
"List[List[int]]: if not n: return [] A, lo = [[n*n]], n*n while lo",
"-> List[List[int]]: if not n: return [] A, lo = [[n*n]], n*n while",
"if not n: return [] A, lo = [[n*n]], n*n while lo >",
"n: int) -> List[List[int]]: if not n: return [] A, lo = [[n*n]],",
"generateMatrix(self, n: int) -> List[List[int]]: if not n: return [] A, lo ="
] |
[
"local para os poços self.generate({'name': 'wumpus','amount':n_wumpus}) # Gera um local para o Wumpus",
"+ 1][y].append(self.perceptions[obj['name']]) # verifica se estar na primeira coluna if y == 0:",
"Mapa self.perceptions = { \"pit\": \"breeze\", \"gold\": \"glitter\", \"wumpus\": \"stench\", } # Tamanho",
"y = self.dimension-1 else: self.coordinate[obj[\"name\"]].append((x,y)) x,y = self.randomCoordinate() self.matrix[x][y] = obj['name'] # Constroi",
"da matrix self.dimension = dimension # Array de Coordenadas dos Elementos do Jogo",
"# Itens Possíves de Perceber no Mapa self.perceptions = { \"pit\": \"breeze\", \"gold\":",
")->tuple: x,y = (0,0) while( ((x,y) == (0,0)) or (self.matrix[x][y] != 'empty') ):",
"coordinate:tuple)->None: x, y = coordinate if not self.isValid(coordinate): return self.matrix[x][y] = 'empty' self.matrix_perceptions[x][y].remove('glitter')",
"else: self.coordinate[obj[\"name\"]].append((x,y)) x,y = self.randomCoordinate() self.matrix[x][y] = obj['name'] # Constroi as matrizes de",
"matrix self.dimension = dimension # Array de Coordenadas dos Elementos do Jogo (Poço",
"else: self.matrix_perceptions[x + 1][y].append(self.perceptions[obj['name']]) self.matrix_perceptions[x - 1][y].append(self.perceptions[obj['name']]) # verifica se estar na primeira",
"y = coordinate if not self.isValid(coordinate): return self.matrix[x][y] = 'empty' def removeGold(self, coordinate:tuple)->None:",
"if self.matrix[direita[0]][direita[1]] != 'pit': nodes.append(direita) elif j == n-1: # MAIS A DIREITA",
"y = 0 if(obj['name'] == 'gold'): x = self.dimension-1 y = self.dimension-1 else:",
"line in range(self.dimension -1, -1, -1): for column in range(self.dimension): if coordinate ==",
"self.matrix_perceptions[x][y - 1].append(self.perceptions[obj['name']]) # verifica se estar nas colunas do meio else: self.matrix_perceptions[x][y",
"== 0: self.matrix_perceptions[x + 1][y].append(self.perceptions[obj['name']]) # verifica se estar na primeira coluna if",
"self.isPerception(coordinate, 'glitter'): perceptions.append('glitter') if self.screamTrigger: perceptions.append('scream') self.screamTrigger = False return perceptions def isPerception(self,",
"for line in range(dimension)] self.generate({'name': 'gold','amount':n_golds}) # Gera um Logal para o Ouro",
"estar nas colunas do meio else: self.matrix_perceptions[x][y + 1].append(self.perceptions[obj['name']]) self.matrix_perceptions[x][y - 1].append(self.perceptions[obj['name']]) def",
"coordinate if not self.isValid(coordinate): return False return self.matrix[x][y] == 'pit' def isWumpus(self, coordinate:tuple)->bool:",
"self.matrix_perceptions[x][y+1].append(self.perceptions[obj['name']]) self.matrix_perceptions[x][y-1].append(self.perceptions[obj['name']]) # verifica se estar na ultima linha elif x == (self.dimension",
"= (0,0) while( ((x,y) == (0,0)) or (self.matrix[x][y] != 'empty') ): x, y",
">= self.dimension: return False if x < 0 or y < 0: return",
"_ in range(obj['amount']): x = y = 0 if(obj['name'] == 'gold'): x =",
"estar na primeira coluna if y == 0: self.matrix_perceptions[x][y + 1].append(self.perceptions[obj['name']]) #verifica se",
"ultima coluna if y == (self.dimension - 1): self.matrix_perceptions[x][y - 1].append(self.perceptions[obj['name']]) # verifica",
"randomCoordinate(self, )->tuple: x,y = (0,0) while( ((x,y) == (0,0)) or (self.matrix[x][y] != 'empty')",
"perceptions.append('breeze') if self.isPerception(coordinate, 'stench'): perceptions.append('stench') if self.isPerception(coordinate, 'glitter'): perceptions.append('glitter') if self.screamTrigger: perceptions.append('scream') self.screamTrigger",
"valid_environment = self.validEnvironment() def generate(self, obj: dict) -> None: for _ in range(obj['amount']):",
"obj['name'] == 'gold': self.matrix_perceptions[x][y].append(self.perceptions[obj['name']]) else: # verifica se estar na primeira linha if",
"estar nas colunas do meio else: self.matrix_perceptions[x][y + 1].append(self.perceptions[obj['name']]) self.matrix_perceptions[x][y - 1].append(self.perceptions[obj['name']]) #",
"= [start] while not_visiteds: current = not_visiteds.pop() for neighbor in graph[current]: if neighbor",
"- 1): self.matrix_perceptions[x - 1][y].append(self.perceptions[obj['name']]) # verifica se estar na primeira coluna if",
"return self.matrix[x][y] == 'pit' def isWumpus(self, coordinate:tuple)->bool: x, y = coordinate if not",
"de Perceber no Mapa self.perceptions = { \"pit\": \"breeze\", \"gold\": \"glitter\", \"wumpus\": \"stench\",",
"== 'pit' def isWumpus(self, coordinate:tuple)->bool: x, y = coordinate if not self.isValid(coordinate): return",
"if not self.isValid(coordinate): return False return self.matrix[x][y] == 'gold' def isExit(self, coordinate:tuple)->bool: return",
"primeira coluna if y == 0: self.matrix_perceptions[x][y + 1].append(self.perceptions[obj['name']]) # verifica se estar",
"na ultima coluna elif y == (self.dimension-1): self.matrix_perceptions[x][y - 1].append(self.perceptions[obj['name']]) #verifica se estar",
"for column in range(dimension)] for line in range(dimension)] self.matrix[0][0] = 'start' self.matrix_perceptions =",
"nodes.append(esquerda) grafo.update({(i,j):nodes}) return grafo def depthSearch(self, start:object): graph = self.getGraph() visiteds = [start]",
"def printMatrix(self, coordinate: tuple): output = '' #print(coordinate) for line in range(self.dimension -1,",
"= coordinate if not self.isValid(coordinate): return False return self.matrix[x][y] == 'gold' def isExit(self,",
"else: # verifica se estar na primeira linha if x == 0: self.matrix_perceptions[x",
"return perceptions def isPerception(self, coordinate, perception)-> bool: x,y = coordinate if not self.isValid(coordinate):",
"!= 'pit': nodes.append(direita) elif j == n-1: # MAIS A DIREITA (apenas testar",
"coluna if y == (self.dimension - 1): self.matrix_perceptions[x][y - 1].append(self.perceptions[obj['name']]) # verifica se",
"nas colunas do meio else: self.matrix_perceptions[x][y + 1].append(self.perceptions[obj['name']]) self.matrix_perceptions[x][y - 1].append(self.perceptions[obj['name']]) # verifica",
"se estar na ultima linha elif x == (self.dimension - 1): self.matrix_perceptions[x -",
"if self.matrix[line][column] == 'wumpus': output += '|W' elif self.matrix[line][column] == 'gold': output +=",
"self.matrix[line][column] == 'wumpus': output += '|W' elif self.matrix[line][column] == 'gold': output += '|G'",
"meio else: self.matrix_perceptions[x][y + 1].append(self.perceptions[obj['name']]) self.matrix_perceptions[x][y - 1].append(self.perceptions[obj['name']]) def printMatrix(self, coordinate: tuple): output",
"if self.isPerception(coordinate, 'glitter'): perceptions.append('glitter') if self.screamTrigger: perceptions.append('scream') self.screamTrigger = False return perceptions def",
"x, y = coordinate if not self.isValid(coordinate): return False return self.matrix[x][y] == 'pit'",
"== 'wumpus': output += '|W' elif self.matrix[line][column] == 'gold': output += '|G' elif",
"or y < 0: return False return True def getObjectCoord(self, name: str): return",
"local para o Wumpus self.screamTrigger = False self.n_pits = n_pits valid_environment = self.validEnvironment()",
"def depthSearch(self, start:object): graph = self.getGraph() visiteds = [start] not_visiteds = [start] while",
"'glitter'): perceptions.append('glitter') if self.screamTrigger: perceptions.append('scream') self.screamTrigger = False return perceptions def isPerception(self, coordinate,",
"printMatrix(self, coordinate: tuple): output = '' #print(coordinate) for line in range(self.dimension -1, -1,",
"column in range(dimension)] for line in range(dimension)] self.generate({'name': 'gold','amount':n_golds}) # Gera um Logal",
"# Constroi as matrizes de adjascências if obj['name'] == 'gold': self.matrix_perceptions[x][y].append(self.perceptions[obj['name']]) else: #",
"na primeira coluna if y == 0: self.matrix_perceptions[x][y + 1].append(self.perceptions[obj['name']]) # verifica se",
"i == n-1: # ULTIMA LINHA if self.matrix[baixo[0]][baixo[1]] != 'pit': nodes.append(baixo) if j",
"False self.n_pits = n_pits valid_environment = self.validEnvironment() def generate(self, obj: dict) -> None:",
"\"stench\", } # Tamanho da largua da matrix self.dimension = dimension # Array",
"self.screamTrigger = False return perceptions def isPerception(self, coordinate, perception)-> bool: x,y = coordinate",
"!= 'pit': nodes.append(baixo) if self.matrix[cima[0]][cima[1]] != 'pit': nodes.append(cima) if j == 0: #",
"\"gold\": \"glitter\", \"wumpus\": \"stench\", } # Tamanho da largua da matrix self.dimension =",
"do meio else: self.matrix_perceptions[x][y+1].append(self.perceptions[obj['name']]) self.matrix_perceptions[x][y-1].append(self.perceptions[obj['name']]) # verifica se estar na ultima linha elif",
"[start] not_visiteds = [start] while not_visiteds: current = not_visiteds.pop() for neighbor in graph[current]:",
"obj['name'] # Constroi as matrizes de adjascências if obj['name'] == 'gold': self.matrix_perceptions[x][y].append(self.perceptions[obj['name']]) else:",
"range(dimension)] self.generate({'name': 'gold','amount':n_golds}) # Gera um Logal para o Ouro self.generate({'name': 'pit','amount':n_pits}) #",
"in range(obj['amount']): x = y = 0 if(obj['name'] == 'gold'): x = self.dimension-1",
"coordinate if not self.isValid(coordinate): return False return self.matrix[x][y] == 'gold' def isExit(self, coordinate:tuple)->bool:",
"BAIXO) if self.matrix[baixo[0]][baixo[1]] != 'pit': nodes.append(baixo) if self.matrix[cima[0]][cima[1]] != 'pit': nodes.append(cima) if j",
"matrizes de adjascências if obj['name'] == 'gold': self.matrix_perceptions[x][y].append(self.perceptions[obj['name']]) else: # verifica se estar",
">= self.dimension or y >= self.dimension: return False if x < 0 or",
"from random import randrange class Environment(object): def __init__(self, dimension:int, n_pits:int, n_golds:int=1, n_wumpus:int=1): #",
"self.generate({'name': 'gold','amount':n_golds}) # Gera um Logal para o Ouro self.generate({'name': 'pit','amount':n_pits}) # Gera",
"um local para os poços self.generate({'name': 'wumpus','amount':n_wumpus}) # Gera um local para o",
"0: return False return True def getObjectCoord(self, name: str): return self.coordinate[name] def getGraph(self,",
"or y >= self.dimension: return False if x < 0 or y <",
"e Y que não seja (0,0) def randomCoordinate(self, )->tuple: x,y = (0,0) while(",
"(self.matrix[x][y] != 'empty') ): x, y = randrange(self.dimension), randrange(self.dimension) return (x,y) def isValid(self,",
"poços self.generate({'name': 'wumpus','amount':n_wumpus}) # Gera um local para o Wumpus self.screamTrigger = False",
"def __init__(self, dimension:int, n_pits:int, n_golds:int=1, n_wumpus:int=1): # Itens Possíves de Perceber no Mapa",
"self.getGraph() visiteds = [start] not_visiteds = [start] while not_visiteds: current = not_visiteds.pop() for",
"line in range(dimension)] self.matrix[0][0] = 'start' self.matrix_perceptions = [[ [] for column in",
"perception)-> bool: x,y = coordinate if not self.isValid(coordinate): return False return perception in",
"n-1: # ULTIMA LINHA if self.matrix[baixo[0]][baixo[1]] != 'pit': nodes.append(baixo) if j == 0:",
"if not self.isValid(coordinate): return False return self.matrix[x][y] == 'wumpus' def isGold(self, coordinate:tuple)->bool: x,",
"y == 0: self.matrix_perceptions[x][y + 1].append(self.perceptions[obj['name']]) # verifica se estar na ultima coluna",
"self.matrix[esquerda[0]][esquerda[1]] != 'pit': nodes.append(esquerda) else: # NAS LINHS DO MEIO (estar CIMA e",
"verifica se estar na ultima linha elif x == (self.dimension - 1): self.matrix_perceptions[x",
"na ultima linha elif x == (self.dimension - 1): self.matrix_perceptions[x - 1][y].append(self.perceptions[obj['name']]) #",
"# verifica se estar nas colunas do meio else: self.matrix_perceptions[x][y + 1].append(self.perceptions[obj['name']]) self.matrix_perceptions[x][y",
"coordinate if not self.isValid(coordinate): return self.matrix[x][y] = 'empty' def removeGold(self, coordinate:tuple)->None: x, y",
"# NAS LINHS DO MEIO (estar CIMA e BAIXO) if self.matrix[baixo[0]][baixo[1]] != 'pit':",
"if y == 0: self.matrix_perceptions[x][y + 1].append(self.perceptions[obj['name']]) #verifica se estar na ultima coluna",
"if neighbor not in visiteds: visiteds.append(neighbor) not_visiteds.append(neighbor) x,y=neighbor if self.matrix[x][y] == 'gold': return",
"== 'pit': output += '|P' else : output += '| ' output +=",
"[[ [] for column in range(dimension)] for line in range(dimension)] self.generate({'name': 'gold','amount':n_golds}) #",
"grafo def depthSearch(self, start:object): graph = self.getGraph() visiteds = [start] not_visiteds = [start]",
"= False return perceptions def isPerception(self, coordinate, perception)-> bool: x,y = coordinate if",
"-> bool: x , y = coordinate if x >= self.dimension or y",
"!= 'pit': nodes.append(esquerda) grafo.update({(i,j):nodes}) return grafo def depthSearch(self, start:object): graph = self.getGraph() visiteds",
"not self.isValid(coordinate): return self.matrix[x][y] = 'empty' def removeGold(self, coordinate:tuple)->None: x, y = coordinate",
"se estar nas linhas do meio else: self.matrix_perceptions[x + 1][y].append(self.perceptions[obj['name']]) self.matrix_perceptions[x - 1][y].append(self.perceptions[obj['name']])",
"{} n = self.dimension for i in range(n): for j in range(n): cima,",
"DIREITA (apenas testar a esquerda) if self.matrix[esquerda[0]][esquerda[1]] != 'pit': nodes.append(esquerda) else: # NO",
"testar a esquerda) if self.matrix[esquerda[0]][esquerda[1]] != 'pit': nodes.append(esquerda) else: # NO MEIO (testar",
"'pit': nodes.append(esquerda) else: # NAS LINHS DO MEIO (estar CIMA e BAIXO) if",
"self.isValid(coordinate): return False return self.matrix[x][y] == 'pit' def isWumpus(self, coordinate:tuple)->bool: x, y =",
"def isPerception(self, coordinate, perception)-> bool: x,y = coordinate if not self.isValid(coordinate): return False",
"+ 1].append(self.perceptions[obj['name']]) # verifica se estar na ultima coluna if y == (self.dimension",
"== 0: self.matrix_perceptions[x][y + 1].append(self.perceptions[obj['name']]) #verifica se estar na ultima coluna elif y",
"for i in range(n): for j in range(n): cima, baixo, direita, esquerda =",
"a esquerda) if self.matrix[esquerda[0]][esquerda[1]] != 'pit': nodes.append(esquerda) else: # NO MEIO (testar a",
"# verifica se estar na primeira coluna if y == 0: self.matrix_perceptions[x][y +",
"self.isPerception(coordinate, 'stench'): perceptions.append('stench') if self.isPerception(coordinate, 'glitter'): perceptions.append('glitter') if self.screamTrigger: perceptions.append('scream') self.screamTrigger = False",
"Array de Coordenadas dos Elementos do Jogo (Poço | Wumpus | Ouro) self.coordinate",
"self.matrix[x][y] = obj['name'] # Constroi as matrizes de adjascências if obj['name'] == 'gold':",
"x,y = (0,0) while( ((x,y) == (0,0)) or (self.matrix[x][y] != 'empty') ): x,",
"-1, -1, -1): for column in range(self.dimension): if coordinate == (line, column): output",
"x >= self.dimension or y >= self.dimension: return False if x < 0",
"nas linhas do meio else: self.matrix_perceptions[x + 1][y].append(self.perceptions[obj['name']]) self.matrix_perceptions[x - 1][y].append(self.perceptions[obj['name']]) # verifica",
"# ULTIMA LINHA if self.matrix[baixo[0]][baixo[1]] != 'pit': nodes.append(baixo) if j == 0: #",
"(estar CIMA e BAIXO) if self.matrix[baixo[0]][baixo[1]] != 'pit': nodes.append(baixo) if self.matrix[cima[0]][cima[1]] != 'pit':",
"for neighbor in graph[current]: if neighbor not in visiteds: visiteds.append(neighbor) not_visiteds.append(neighbor) x,y=neighbor if",
"+ 1][y].append(self.perceptions[obj['name']]) self.matrix_perceptions[x - 1][y].append(self.perceptions[obj['name']]) # verifica se estar na primeira coluna if",
"0: self.matrix_perceptions[x + 1][y].append(self.perceptions[obj['name']]) # verifica se estar na primeira coluna if y",
"'empty') ): x, y = randrange(self.dimension), randrange(self.dimension) return (x,y) def isValid(self, coordinate) ->",
"dimension:int, n_pits:int, n_golds:int=1, n_wumpus:int=1): # Itens Possíves de Perceber no Mapa self.perceptions =",
"'pit': nodes.append(direita) elif j == n-1: # MAIS A DIREITA (apenas testar a",
"# Tamanho da largua da matrix self.dimension = dimension # Array de Coordenadas",
"(i,j+1), (i,j-1) nodes = [] if i == 0: # 1° LINHA if",
"do meio else: self.matrix_perceptions[x][y + 1].append(self.perceptions[obj['name']]) self.matrix_perceptions[x][y - 1].append(self.perceptions[obj['name']]) # verifica se estar",
"y == (self.dimension-1): self.matrix_perceptions[x][y - 1].append(self.perceptions[obj['name']]) #verifica se estar nas colunas do meio",
"self.matrix[cima[0]][cima[1]] != 'pit': nodes.append(cima) if j == 0: # MAIS A ESQUERDA (apenas",
"+ 1].append(self.perceptions[obj['name']]) self.matrix_perceptions[x][y - 1].append(self.perceptions[obj['name']]) def printMatrix(self, coordinate: tuple): output = '' #print(coordinate)",
"self.matrix[line][column] == 'gold': output += '|G' elif self.matrix[line][column] == 'pit': output += '|P'",
"a esquerda e direita) if self.matrix[direita[0]][direita[1]] != 'pit': nodes.append(direita) if self.matrix[esquerda[0]][esquerda[1]] != 'pit':",
"self.dimension for i in range(n): for j in range(n): cima, baixo, direita, esquerda",
"self.matrix[esquerda[0]][esquerda[1]] != 'pit': nodes.append(esquerda) elif i == n-1: # ULTIMA LINHA if self.matrix[baixo[0]][baixo[1]]",
"in range(self.dimension): if coordinate == (line, column): output += '|A' else: if self.matrix[line][column]",
"!= 'pit': nodes.append(direita) if self.matrix[esquerda[0]][esquerda[1]] != 'pit': nodes.append(esquerda) else: # NAS LINHS DO",
"self.dimension-1 else: self.coordinate[obj[\"name\"]].append((x,y)) x,y = self.randomCoordinate() self.matrix[x][y] = obj['name'] # Constroi as matrizes",
"MAIS A ESQUERDA (apenas testar a direita) if self.matrix[direita[0]][direita[1]] != 'pit': nodes.append(direita) elif",
"direita) if self.matrix[direita[0]][direita[1]] != 'pit': nodes.append(direita) if self.matrix[esquerda[0]][esquerda[1]] != 'pit': nodes.append(esquerda) grafo.update({(i,j):nodes}) return",
"= dimension # Array de Coordenadas dos Elementos do Jogo (Poço | Wumpus",
"graph = self.getGraph() visiteds = [start] not_visiteds = [start] while not_visiteds: current =",
"if self.isPerception(coordinate, 'breeze'): perceptions.append('breeze') if self.isPerception(coordinate, 'stench'): perceptions.append('stench') if self.isPerception(coordinate, 'glitter'): perceptions.append('glitter') if",
"= [[ [] for column in range(dimension)] for line in range(dimension)] self.generate({'name': 'gold','amount':n_golds})",
"elif y == (self.dimension-1): self.matrix_perceptions[x][y - 1].append(self.perceptions[obj['name']]) #verifica se estar nas colunas do",
"'pit': nodes.append(esquerda) else: # NO MEIO (testar a esquerda e direita) if self.matrix[direita[0]][direita[1]]",
"coordinate:tuple)->None: self.screamTrigger = True x, y = coordinate if not self.isValid(coordinate): return self.matrix[x][y]",
"0 or y < 0: return False return True def getObjectCoord(self, name: str):",
"'pit','amount':n_pits}) # Gera um local para os poços self.generate({'name': 'wumpus','amount':n_wumpus}) # Gera um",
"+ 1].append(self.perceptions[obj['name']]) self.matrix_perceptions[x][y - 1].append(self.perceptions[obj['name']]) # verifica se estar nas linhas do meio",
"== 0: self.matrix_perceptions[x][y + 1].append(self.perceptions[obj['name']]) # verifica se estar na ultima coluna if",
"(Poço | Wumpus | Ouro) self.coordinate = { \"pit\":[], \"wumpus\":[], \"gold\":[] } valid_environment",
"if self.matrix[direita[0]][direita[1]] != 'pit': nodes.append(direita) if self.matrix[esquerda[0]][esquerda[1]] != 'pit': nodes.append(esquerda) else: # NAS",
"self.matrix_perceptions[x][y] def isPit(self, coordinate:tuple)->bool: x, y = coordinate if not self.isValid(coordinate): return False",
"bool: x,y = coordinate if not self.isValid(coordinate): return False return perception in self.matrix_perceptions[x][y]",
"self.coordinate[name] def getGraph(self, )->dict: grafo = {} n = self.dimension for i in",
"'pit': nodes.append(esquerda) elif i == n-1: # ULTIMA LINHA if self.matrix[baixo[0]][baixo[1]] != 'pit':",
"if self.matrix[baixo[0]][baixo[1]] != 'pit': nodes.append(baixo) if self.matrix[cima[0]][cima[1]] != 'pit': nodes.append(cima) if j ==",
"= False while(not valid_environment): self.matrix = [['empty' for column in range(dimension)] for line",
"= False self.n_pits = n_pits valid_environment = self.validEnvironment() def generate(self, obj: dict) ->",
"range(dimension)] for line in range(dimension)] self.generate({'name': 'gold','amount':n_golds}) # Gera um Logal para o",
"nodes.append(baixo) if self.matrix[cima[0]][cima[1]] != 'pit': nodes.append(cima) if j == 0: # MAIS A",
"return False return perception in self.matrix_perceptions[x][y] def isPit(self, coordinate:tuple)->bool: x, y = coordinate",
"de Coordenadas dos Elementos do Jogo (Poço | Wumpus | Ouro) self.coordinate =",
"ULTIMA LINHA if self.matrix[baixo[0]][baixo[1]] != 'pit': nodes.append(baixo) if j == 0: # MAIS",
"esquerda e direita) if self.matrix[direita[0]][direita[1]] != 'pit': nodes.append(direita) if self.matrix[esquerda[0]][esquerda[1]] != 'pit': nodes.append(esquerda)",
"in range(dimension)] self.generate({'name': 'gold','amount':n_golds}) # Gera um Logal para o Ouro self.generate({'name': 'pit','amount':n_pits})",
"neighbor in graph[current]: if neighbor not in visiteds: visiteds.append(neighbor) not_visiteds.append(neighbor) x,y=neighbor if self.matrix[x][y]",
"y = coordinate if not self.isValid(coordinate): return False return self.matrix[x][y] == 'gold' def",
"LINHA if self.matrix[cima[0]][cima[1]] != 'pit': nodes.append(cima) if j == 0: # MAIS A",
"randrange(self.dimension), randrange(self.dimension) return (x,y) def isValid(self, coordinate) -> bool: x , y =",
"# 1° LINHA if self.matrix[cima[0]][cima[1]] != 'pit': nodes.append(cima) if j == 0: #",
"= self.randomCoordinate() self.matrix[x][y] = obj['name'] # Constroi as matrizes de adjascências if obj['name']",
"self.matrix[x][y] == 'wumpus' def isGold(self, coordinate:tuple)->bool: x, y = coordinate if not self.isValid(coordinate):",
"colunas do meio else: self.matrix_perceptions[x][y+1].append(self.perceptions[obj['name']]) self.matrix_perceptions[x][y-1].append(self.perceptions[obj['name']]) # verifica se estar na ultima linha",
"para o Ouro self.generate({'name': 'pit','amount':n_pits}) # Gera um local para os poços self.generate({'name':",
"direita, esquerda = (i+1,j), (i-1,j), (i,j+1), (i,j-1) nodes = [] if i ==",
"y = coordinate if not self.isValid(coordinate): return self.matrix[x][y] = 'empty' self.matrix_perceptions[x][y].remove('glitter') # Gera",
"CIMA e BAIXO) if self.matrix[baixo[0]][baixo[1]] != 'pit': nodes.append(baixo) if self.matrix[cima[0]][cima[1]] != 'pit': nodes.append(cima)",
"A ESQUERDA (apenas testar a direita) if self.matrix[direita[0]][direita[1]] != 'pit': nodes.append(direita) elif j",
"estar nas linhas do meio else: self.matrix_perceptions[x + 1][y].append(self.perceptions[obj['name']]) self.matrix_perceptions[x - 1][y].append(self.perceptions[obj['name']]) #",
"'pit' def isWumpus(self, coordinate:tuple)->bool: x, y = coordinate if not self.isValid(coordinate): return False",
"return False return self.matrix[x][y] == 'pit' def isWumpus(self, coordinate:tuple)->bool: x, y = coordinate",
"e BAIXO) if self.matrix[baixo[0]][baixo[1]] != 'pit': nodes.append(baixo) if self.matrix[cima[0]][cima[1]] != 'pit': nodes.append(cima) if",
"# Gera um Logal para o Ouro self.generate({'name': 'pit','amount':n_pits}) # Gera um local",
"self.matrix_perceptions[x][y - 1].append(self.perceptions[obj['name']]) def printMatrix(self, coordinate: tuple): output = '' #print(coordinate) for line",
"Possíves de Perceber no Mapa self.perceptions = { \"pit\": \"breeze\", \"gold\": \"glitter\", \"wumpus\":",
"self.matrix_perceptions[x - 1][y].append(self.perceptions[obj['name']]) # verifica se estar na primeira coluna if y ==",
"Wumpus self.screamTrigger = False self.n_pits = n_pits valid_environment = self.validEnvironment() def generate(self, obj:",
"Gera um local para o Wumpus self.screamTrigger = False self.n_pits = n_pits valid_environment",
"isGold(self, coordinate:tuple)->bool: x, y = coordinate if not self.isValid(coordinate): return False return self.matrix[x][y]",
"= self.dimension-1 y = self.dimension-1 else: self.coordinate[obj[\"name\"]].append((x,y)) x,y = self.randomCoordinate() self.matrix[x][y] = obj['name']",
"1].append(self.perceptions[obj['name']]) self.matrix_perceptions[x][y - 1].append(self.perceptions[obj['name']]) def printMatrix(self, coordinate: tuple): output = '' #print(coordinate) for",
"bool: x , y = coordinate if x >= self.dimension or y >=",
"esquerda = (i+1,j), (i-1,j), (i,j+1), (i,j-1) nodes = [] if i == 0:",
"self.screamTrigger = False self.n_pits = n_pits valid_environment = self.validEnvironment() def generate(self, obj: dict)",
"!= 'pit': nodes.append(esquerda) else: # NO MEIO (testar a esquerda e direita) if",
"if self.matrix[direita[0]][direita[1]] != 'pit': nodes.append(direita) if self.matrix[esquerda[0]][esquerda[1]] != 'pit': nodes.append(esquerda) grafo.update({(i,j):nodes}) return grafo",
"que não seja (0,0) def randomCoordinate(self, )->tuple: x,y = (0,0) while( ((x,y) ==",
"os poços self.generate({'name': 'wumpus','amount':n_wumpus}) # Gera um local para o Wumpus self.screamTrigger =",
"-1): for column in range(self.dimension): if coordinate == (line, column): output += '|A'",
"n_pits:int, n_golds:int=1, n_wumpus:int=1): # Itens Possíves de Perceber no Mapa self.perceptions = {",
"self.matrix_perceptions[x][y + 1].append(self.perceptions[obj['name']]) self.matrix_perceptions[x][y - 1].append(self.perceptions[obj['name']]) def printMatrix(self, coordinate: tuple): output = ''",
"- 1].append(self.perceptions[obj['name']]) # verifica se estar nas linhas do meio else: self.matrix_perceptions[x +",
"# verifica se estar na ultima coluna if y == (self.dimension - 1):",
"def getGraph(self, )->dict: grafo = {} n = self.dimension for i in range(n):",
"while not_visiteds: current = not_visiteds.pop() for neighbor in graph[current]: if neighbor not in",
"output += '|P' else : output += '| ' output += '|\\n' print(output)",
"'stench'): perceptions.append('stench') if self.isPerception(coordinate, 'glitter'): perceptions.append('glitter') if self.screamTrigger: perceptions.append('scream') self.screamTrigger = False return",
"str): return self.coordinate[name] def getGraph(self, )->dict: grafo = {} n = self.dimension for",
"self.matrix_perceptions[x + 1][y].append(self.perceptions[obj['name']]) self.matrix_perceptions[x - 1][y].append(self.perceptions[obj['name']]) # verifica se estar na primeira coluna",
"= self.validEnvironment() def generate(self, obj: dict) -> None: for _ in range(obj['amount']): x",
"coordinate, perception)-> bool: x,y = coordinate if not self.isValid(coordinate): return False return perception",
"coordinate if not self.isValid(coordinate): return self.matrix[x][y] = 'empty' self.matrix_perceptions[x][y].remove('glitter') # Gera uma Coordenada",
"self.isValid(coordinate): return self.matrix[x][y] = 'empty' self.matrix_perceptions[x][y].remove('glitter') # Gera uma Coordenada Vazia qualquer de",
"self.matrix_perceptions[x][y - 1].append(self.perceptions[obj['name']]) # verifica se estar nas linhas do meio else: self.matrix_perceptions[x",
"x = self.dimension-1 y = self.dimension-1 else: self.coordinate[obj[\"name\"]].append((x,y)) x,y = self.randomCoordinate() self.matrix[x][y] =",
"elif x == (self.dimension - 1): self.matrix_perceptions[x - 1][y].append(self.perceptions[obj['name']]) # verifica se estar",
"direita) if self.matrix[direita[0]][direita[1]] != 'pit': nodes.append(direita) elif j == n-1: # MAIS A",
"direita) if self.matrix[direita[0]][direita[1]] != 'pit': nodes.append(direita) if self.matrix[esquerda[0]][esquerda[1]] != 'pit': nodes.append(esquerda) elif i",
"output def getPerceptions(self, coordinate:tuple)->list: perceptions = [] if self.isPerception(coordinate, 'breeze'): perceptions.append('breeze') if self.isPerception(coordinate,",
"self.matrix_perceptions[x][y + 1].append(self.perceptions[obj['name']]) self.matrix_perceptions[x][y - 1].append(self.perceptions[obj['name']]) # verifica se estar nas linhas do",
"def getObjectCoord(self, name: str): return self.coordinate[name] def getGraph(self, )->dict: grafo = {} n",
"coordinate if not self.isValid(coordinate): return False return perception in self.matrix_perceptions[x][y] def isPit(self, coordinate:tuple)->bool:",
"nodes.append(direita) if self.matrix[esquerda[0]][esquerda[1]] != 'pit': nodes.append(esquerda) elif i == n-1: # ULTIMA LINHA",
"colunas do meio else: self.matrix_perceptions[x][y + 1].append(self.perceptions[obj['name']]) self.matrix_perceptions[x][y - 1].append(self.perceptions[obj['name']]) # verifica se",
"y >= self.dimension: return False if x < 0 or y < 0:",
"range(dimension)] self.matrix[0][0] = 'start' self.matrix_perceptions = [[ [] for column in range(dimension)] for",
"else: # NO MEIO (testar a esquerda e direita) if self.matrix[direita[0]][direita[1]] != 'pit':",
"class Environment(object): def __init__(self, dimension:int, n_pits:int, n_golds:int=1, n_wumpus:int=1): # Itens Possíves de Perceber",
"not in visiteds: visiteds.append(neighbor) not_visiteds.append(neighbor) x,y=neighbor if self.matrix[x][y] == 'gold': return True return",
"na primeira coluna if y == 0: self.matrix_perceptions[x][y + 1].append(self.perceptions[obj['name']]) #verifica se estar",
"y < 0: return False return True def getObjectCoord(self, name: str): return self.coordinate[name]",
"para o Wumpus self.screamTrigger = False self.n_pits = n_pits valid_environment = self.validEnvironment() def",
"Ouro self.generate({'name': 'pit','amount':n_pits}) # Gera um local para os poços self.generate({'name': 'wumpus','amount':n_wumpus}) #",
"'|A' else: if self.matrix[line][column] == 'wumpus': output += '|W' elif self.matrix[line][column] == 'gold':",
"not_visiteds = [start] while not_visiteds: current = not_visiteds.pop() for neighbor in graph[current]: if",
"output += '|G' elif self.matrix[line][column] == 'pit': output += '|P' else : output",
"1].append(self.perceptions[obj['name']]) def printMatrix(self, coordinate: tuple): output = '' #print(coordinate) for line in range(self.dimension",
"self.matrix[x][y] == 'pit' def isWumpus(self, coordinate:tuple)->bool: x, y = coordinate if not self.isValid(coordinate):",
"x, y = coordinate if not self.isValid(coordinate): return self.matrix[x][y] = 'empty' self.matrix_perceptions[x][y].remove('glitter') #",
"self.matrix[0][0] = 'start' self.matrix_perceptions = [[ [] for column in range(dimension)] for line",
"(i+1,j), (i-1,j), (i,j+1), (i,j-1) nodes = [] if i == 0: # 1°",
"self.screamTrigger = True x, y = coordinate if not self.isValid(coordinate): return self.matrix[x][y] =",
"randrange class Environment(object): def __init__(self, dimension:int, n_pits:int, n_golds:int=1, n_wumpus:int=1): # Itens Possíves de",
"= self.getGraph() visiteds = [start] not_visiteds = [start] while not_visiteds: current = not_visiteds.pop()",
"False return True def getObjectCoord(self, name: str): return self.coordinate[name] def getGraph(self, )->dict: grafo",
"name: str): return self.coordinate[name] def getGraph(self, )->dict: grafo = {} n = self.dimension",
"return perception in self.matrix_perceptions[x][y] def isPit(self, coordinate:tuple)->bool: x, y = coordinate if not",
"current = not_visiteds.pop() for neighbor in graph[current]: if neighbor not in visiteds: visiteds.append(neighbor)",
"for line in range(dimension)] self.matrix[0][0] = 'start' self.matrix_perceptions = [[ [] for column",
"na ultima coluna if y == (self.dimension - 1): self.matrix_perceptions[x][y - 1].append(self.perceptions[obj['name']]) #",
"= True x, y = coordinate if not self.isValid(coordinate): return self.matrix[x][y] = 'empty'",
"def removeGold(self, coordinate:tuple)->None: x, y = coordinate if not self.isValid(coordinate): return self.matrix[x][y] =",
"True def getObjectCoord(self, name: str): return self.coordinate[name] def getGraph(self, )->dict: grafo = {}",
"# verifica se estar na primeira linha if x == 0: self.matrix_perceptions[x +",
"'|P' else : output += '| ' output += '|\\n' print(output) return output",
"{ \"pit\": \"breeze\", \"gold\": \"glitter\", \"wumpus\": \"stench\", } # Tamanho da largua da",
"adjascências if obj['name'] == 'gold': self.matrix_perceptions[x][y].append(self.perceptions[obj['name']]) else: # verifica se estar na primeira",
"MEIO (estar CIMA e BAIXO) if self.matrix[baixo[0]][baixo[1]] != 'pit': nodes.append(baixo) if self.matrix[cima[0]][cima[1]] !=",
"n-1: # MAIS A DIREITA (apenas testar a esquerda) if self.matrix[esquerda[0]][esquerda[1]] != 'pit':",
"visiteds: visiteds.append(neighbor) not_visiteds.append(neighbor) x,y=neighbor if self.matrix[x][y] == 'gold': return True return False def",
"!= 'pit': nodes.append(baixo) if j == 0: # MAIS A ESQUERDA (apenas testar",
"nodes.append(esquerda) else: # NO MEIO (testar a esquerda e direita) if self.matrix[direita[0]][direita[1]] !=",
"um Logal para o Ouro self.generate({'name': 'pit','amount':n_pits}) # Gera um local para os",
"for j in range(n): cima, baixo, direita, esquerda = (i+1,j), (i-1,j), (i,j+1), (i,j-1)",
"j in range(n): cima, baixo, direita, esquerda = (i+1,j), (i-1,j), (i,j+1), (i,j-1) nodes",
"LINHS DO MEIO (estar CIMA e BAIXO) if self.matrix[baixo[0]][baixo[1]] != 'pit': nodes.append(baixo) if",
"random import randrange class Environment(object): def __init__(self, dimension:int, n_pits:int, n_golds:int=1, n_wumpus:int=1): # Itens",
"se estar na primeira linha if x == 0: self.matrix_perceptions[x + 1][y].append(self.perceptions[obj['name']]) #",
"isValid(self, coordinate) -> bool: x , y = coordinate if x >= self.dimension",
"DO MEIO (estar CIMA e BAIXO) if self.matrix[baixo[0]][baixo[1]] != 'pit': nodes.append(baixo) if self.matrix[cima[0]][cima[1]]",
"[] for column in range(dimension)] for line in range(dimension)] self.generate({'name': 'gold','amount':n_golds}) # Gera",
"if not self.isValid(coordinate): return False return self.matrix[x][y] == 'pit' def isWumpus(self, coordinate:tuple)->bool: x,",
"== 'wumpus' def isGold(self, coordinate:tuple)->bool: x, y = coordinate if not self.isValid(coordinate): return",
"!= 'pit': nodes.append(esquerda) else: # NAS LINHS DO MEIO (estar CIMA e BAIXO)",
"output += '|W' elif self.matrix[line][column] == 'gold': output += '|G' elif self.matrix[line][column] ==",
"#verifica se estar nas colunas do meio else: self.matrix_perceptions[x][y+1].append(self.perceptions[obj['name']]) self.matrix_perceptions[x][y-1].append(self.perceptions[obj['name']]) # verifica se",
"if i == 0: # 1° LINHA if self.matrix[cima[0]][cima[1]] != 'pit': nodes.append(cima) if",
"self.randomCoordinate() self.matrix[x][y] = obj['name'] # Constroi as matrizes de adjascências if obj['name'] ==",
"coordinate == (line, column): output += '|A' else: if self.matrix[line][column] == 'wumpus': output",
"self.screamTrigger: perceptions.append('scream') self.screamTrigger = False return perceptions def isPerception(self, coordinate, perception)-> bool: x,y",
"valid_environment): self.matrix = [['empty' for column in range(dimension)] for line in range(dimension)] self.matrix[0][0]",
"column in range(dimension)] for line in range(dimension)] self.matrix[0][0] = 'start' self.matrix_perceptions = [[",
"as matrizes de adjascências if obj['name'] == 'gold': self.matrix_perceptions[x][y].append(self.perceptions[obj['name']]) else: # verifica se",
"self.coordinate = { \"pit\":[], \"wumpus\":[], \"gold\":[] } valid_environment = False while(not valid_environment): self.matrix",
"elif j == n-1: # MAIS A DIREITA (apenas testar a esquerda) if",
"se estar na ultima coluna elif y == (self.dimension-1): self.matrix_perceptions[x][y - 1].append(self.perceptions[obj['name']]) #verifica",
"j == n-1: # MAIS A DIREITA (apenas testar a esquerda) if self.matrix[esquerda[0]][esquerda[1]]",
"self.matrix = [['empty' for column in range(dimension)] for line in range(dimension)] self.matrix[0][0] =",
"0 if(obj['name'] == 'gold'): x = self.dimension-1 y = self.dimension-1 else: self.coordinate[obj[\"name\"]].append((x,y)) x,y",
"def isWumpus(self, coordinate:tuple)->bool: x, y = coordinate if not self.isValid(coordinate): return False return",
"'pit': nodes.append(baixo) if self.matrix[cima[0]][cima[1]] != 'pit': nodes.append(cima) if j == 0: # MAIS",
"isPerception(self, coordinate, perception)-> bool: x,y = coordinate if not self.isValid(coordinate): return False return",
"self.dimension: return False if x < 0 or y < 0: return False",
"se estar na primeira coluna if y == 0: self.matrix_perceptions[x][y + 1].append(self.perceptions[obj['name']]) #",
"- 1].append(self.perceptions[obj['name']]) # verifica se estar nas colunas do meio else: self.matrix_perceptions[x][y +",
"\"glitter\", \"wumpus\": \"stench\", } # Tamanho da largua da matrix self.dimension = dimension",
"'pit': nodes.append(direita) if self.matrix[esquerda[0]][esquerda[1]] != 'pit': nodes.append(esquerda) grafo.update({(i,j):nodes}) return grafo def depthSearch(self, start:object):",
"colunas do meio else: self.matrix_perceptions[x][y + 1].append(self.perceptions[obj['name']]) self.matrix_perceptions[x][y - 1].append(self.perceptions[obj['name']]) def printMatrix(self, coordinate:",
"def isValid(self, coordinate) -> bool: x , y = coordinate if x >=",
"'wumpus','amount':n_wumpus}) # Gera um local para o Wumpus self.screamTrigger = False self.n_pits =",
"Coordenada Vazia qualquer de X e Y que não seja (0,0) def randomCoordinate(self,",
"Coordenadas dos Elementos do Jogo (Poço | Wumpus | Ouro) self.coordinate = {",
"1° LINHA if self.matrix[cima[0]][cima[1]] != 'pit': nodes.append(cima) if j == 0: # MAIS",
"== (self.dimension - 1): self.matrix_perceptions[x - 1][y].append(self.perceptions[obj['name']]) # verifica se estar na primeira",
"for column in range(self.dimension): if coordinate == (line, column): output += '|A' else:",
"se estar na primeira coluna if y == 0: self.matrix_perceptions[x][y + 1].append(self.perceptions[obj['name']]) #verifica",
"esquerda) if self.matrix[esquerda[0]][esquerda[1]] != 'pit': nodes.append(esquerda) else: # NO MEIO (testar a esquerda",
"} # Tamanho da largua da matrix self.dimension = dimension # Array de",
"if not self.isValid(coordinate): return self.matrix[x][y] = 'empty' def removeGold(self, coordinate:tuple)->None: x, y =",
"na primeira linha if x == 0: self.matrix_perceptions[x + 1][y].append(self.perceptions[obj['name']]) # verifica se",
"not self.isValid(coordinate): return False return perception in self.matrix_perceptions[x][y] def isPit(self, coordinate:tuple)->bool: x, y",
"if x < 0 or y < 0: return False return True def",
"' output += '|\\n' print(output) return output def getPerceptions(self, coordinate:tuple)->list: perceptions = []",
": output += '| ' output += '|\\n' print(output) return output def getPerceptions(self,",
"# verifica se estar na ultima linha elif x == (self.dimension - 1):",
"= coordinate if not self.isValid(coordinate): return False return self.matrix[x][y] == 'pit' def isWumpus(self,",
"i in range(n): for j in range(n): cima, baixo, direita, esquerda = (i+1,j),",
"= [['empty' for column in range(dimension)] for line in range(dimension)] self.matrix[0][0] = 'start'",
"meio else: self.matrix_perceptions[x][y+1].append(self.perceptions[obj['name']]) self.matrix_perceptions[x][y-1].append(self.perceptions[obj['name']]) # verifica se estar na ultima linha elif x",
"Vazia qualquer de X e Y que não seja (0,0) def randomCoordinate(self, )->tuple:",
"nodes.append(direita) if self.matrix[esquerda[0]][esquerda[1]] != 'pit': nodes.append(esquerda) grafo.update({(i,j):nodes}) return grafo def depthSearch(self, start:object): graph",
"coordinate if not self.isValid(coordinate): return False return self.matrix[x][y] == 'wumpus' def isGold(self, coordinate:tuple)->bool:",
"coordinate:tuple)->bool: return coordinate == (0,0) def removeWumpus(self, coordinate:tuple)->None: self.screamTrigger = True x, y",
"verifica se estar na primeira coluna if y == 0: self.matrix_perceptions[x][y + 1].append(self.perceptions[obj['name']])",
"y = coordinate if not self.isValid(coordinate): return False return self.matrix[x][y] == 'pit' def",
"estar nas colunas do meio else: self.matrix_perceptions[x][y+1].append(self.perceptions[obj['name']]) self.matrix_perceptions[x][y-1].append(self.perceptions[obj['name']]) # verifica se estar na",
"[['empty' for column in range(dimension)] for line in range(dimension)] self.matrix[0][0] = 'start' self.matrix_perceptions",
"= coordinate if not self.isValid(coordinate): return False return perception in self.matrix_perceptions[x][y] def isPit(self,",
"LINHA if self.matrix[baixo[0]][baixo[1]] != 'pit': nodes.append(baixo) if j == 0: # MAIS A",
"meio else: self.matrix_perceptions[x + 1][y].append(self.perceptions[obj['name']]) self.matrix_perceptions[x - 1][y].append(self.perceptions[obj['name']]) # verifica se estar na",
"y == 0: self.matrix_perceptions[x][y + 1].append(self.perceptions[obj['name']]) #verifica se estar na ultima coluna elif",
"output += '|\\n' print(output) return output def getPerceptions(self, coordinate:tuple)->list: perceptions = [] if",
"0: # MAIS A ESQUERDA (apenas testar a direita) if self.matrix[direita[0]][direita[1]] != 'pit':",
"e direita) if self.matrix[direita[0]][direita[1]] != 'pit': nodes.append(direita) if self.matrix[esquerda[0]][esquerda[1]] != 'pit': nodes.append(esquerda) elif",
"in graph[current]: if neighbor not in visiteds: visiteds.append(neighbor) not_visiteds.append(neighbor) x,y=neighbor if self.matrix[x][y] ==",
"in range(self.dimension -1, -1, -1): for column in range(self.dimension): if coordinate == (line,",
"1].append(self.perceptions[obj['name']]) #verifica se estar nas colunas do meio else: self.matrix_perceptions[x][y+1].append(self.perceptions[obj['name']]) self.matrix_perceptions[x][y-1].append(self.perceptions[obj['name']]) # verifica",
"line in range(dimension)] self.generate({'name': 'gold','amount':n_golds}) # Gera um Logal para o Ouro self.generate({'name':",
"estar na ultima linha elif x == (self.dimension - 1): self.matrix_perceptions[x - 1][y].append(self.perceptions[obj['name']])",
"else: if self.matrix[line][column] == 'wumpus': output += '|W' elif self.matrix[line][column] == 'gold': output",
"range(self.dimension -1, -1, -1): for column in range(self.dimension): if coordinate == (line, column):",
"self.matrix[x][y] == 'gold': return True return False def validEnvironment(self, )-> bool: return self.depthSearch((0,0))",
"Perceber no Mapa self.perceptions = { \"pit\": \"breeze\", \"gold\": \"glitter\", \"wumpus\": \"stench\", }",
"1): self.matrix_perceptions[x][y - 1].append(self.perceptions[obj['name']]) # verifica se estar nas colunas do meio else:",
"} valid_environment = False while(not valid_environment): self.matrix = [['empty' for column in range(dimension)]",
"def randomCoordinate(self, )->tuple: x,y = (0,0) while( ((x,y) == (0,0)) or (self.matrix[x][y] !=",
"elif self.matrix[line][column] == 'gold': output += '|G' elif self.matrix[line][column] == 'pit': output +=",
"perceptions def isPerception(self, coordinate, perception)-> bool: x,y = coordinate if not self.isValid(coordinate): return",
"- 1][y].append(self.perceptions[obj['name']]) # verifica se estar na primeira coluna if y == 0:",
"direita) if self.matrix[direita[0]][direita[1]] != 'pit': nodes.append(direita) if self.matrix[esquerda[0]][esquerda[1]] != 'pit': nodes.append(esquerda) else: #",
"if coordinate == (line, column): output += '|A' else: if self.matrix[line][column] == 'wumpus':",
"else: # NAS LINHS DO MEIO (estar CIMA e BAIXO) if self.matrix[baixo[0]][baixo[1]] !=",
"[] if self.isPerception(coordinate, 'breeze'): perceptions.append('breeze') if self.isPerception(coordinate, 'stench'): perceptions.append('stench') if self.isPerception(coordinate, 'glitter'): perceptions.append('glitter')",
"for column in range(dimension)] for line in range(dimension)] self.generate({'name': 'gold','amount':n_golds}) # Gera um",
"NAS LINHS DO MEIO (estar CIMA e BAIXO) if self.matrix[baixo[0]][baixo[1]] != 'pit': nodes.append(baixo)",
"nodes.append(esquerda) elif i == n-1: # ULTIMA LINHA if self.matrix[baixo[0]][baixo[1]] != 'pit': nodes.append(baixo)",
"'pit': nodes.append(baixo) if j == 0: # MAIS A ESQUERDA (apenas testar a",
"e direita) if self.matrix[direita[0]][direita[1]] != 'pit': nodes.append(direita) if self.matrix[esquerda[0]][esquerda[1]] != 'pit': nodes.append(esquerda) grafo.update({(i,j):nodes})",
"__init__(self, dimension:int, n_pits:int, n_golds:int=1, n_wumpus:int=1): # Itens Possíves de Perceber no Mapa self.perceptions",
"not self.isValid(coordinate): return False return self.matrix[x][y] == 'wumpus' def isGold(self, coordinate:tuple)->bool: x, y",
"'start' self.matrix_perceptions = [[ [] for column in range(dimension)] for line in range(dimension)]",
"def getPerceptions(self, coordinate:tuple)->list: perceptions = [] if self.isPerception(coordinate, 'breeze'): perceptions.append('breeze') if self.isPerception(coordinate, 'stench'):",
"x < 0 or y < 0: return False return True def getObjectCoord(self,",
"if(obj['name'] == 'gold'): x = self.dimension-1 y = self.dimension-1 else: self.coordinate[obj[\"name\"]].append((x,y)) x,y =",
"n = self.dimension for i in range(n): for j in range(n): cima, baixo,",
"= [start] not_visiteds = [start] while not_visiteds: current = not_visiteds.pop() for neighbor in",
"== (line, column): output += '|A' else: if self.matrix[line][column] == 'wumpus': output +=",
"1].append(self.perceptions[obj['name']]) self.matrix_perceptions[x][y - 1].append(self.perceptions[obj['name']]) # verifica se estar nas linhas do meio else:",
"self.isValid(coordinate): return False return self.matrix[x][y] == 'gold' def isExit(self, coordinate:tuple)->bool: return coordinate ==",
"range(obj['amount']): x = y = 0 if(obj['name'] == 'gold'): x = self.dimension-1 y",
"linhas do meio else: self.matrix_perceptions[x + 1][y].append(self.perceptions[obj['name']]) self.matrix_perceptions[x - 1][y].append(self.perceptions[obj['name']]) # verifica se",
"x, y = coordinate if not self.isValid(coordinate): return False return self.matrix[x][y] == 'gold'",
"# Gera um local para os poços self.generate({'name': 'wumpus','amount':n_wumpus}) # Gera um local",
"for _ in range(obj['amount']): x = y = 0 if(obj['name'] == 'gold'): x",
"True x, y = coordinate if not self.isValid(coordinate): return self.matrix[x][y] = 'empty' def",
"'pit': nodes.append(cima) if j == 0: # MAIS A ESQUERDA (apenas testar a",
"\"pit\": \"breeze\", \"gold\": \"glitter\", \"wumpus\": \"stench\", } # Tamanho da largua da matrix",
"): x, y = randrange(self.dimension), randrange(self.dimension) return (x,y) def isValid(self, coordinate) -> bool:",
"self.matrix[esquerda[0]][esquerda[1]] != 'pit': nodes.append(esquerda) else: # NO MEIO (testar a esquerda e direita)",
"return self.matrix[x][y] = 'empty' self.matrix_perceptions[x][y].remove('glitter') # Gera uma Coordenada Vazia qualquer de X",
"seja (0,0) def randomCoordinate(self, )->tuple: x,y = (0,0) while( ((x,y) == (0,0)) or",
"'pit': nodes.append(esquerda) grafo.update({(i,j):nodes}) return grafo def depthSearch(self, start:object): graph = self.getGraph() visiteds =",
"= 'empty' def removeGold(self, coordinate:tuple)->None: x, y = coordinate if not self.isValid(coordinate): return",
"Gera um local para os poços self.generate({'name': 'wumpus','amount':n_wumpus}) # Gera um local para",
"X e Y que não seja (0,0) def randomCoordinate(self, )->tuple: x,y = (0,0)",
"start:object): graph = self.getGraph() visiteds = [start] not_visiteds = [start] while not_visiteds: current",
"False return self.matrix[x][y] == 'wumpus' def isGold(self, coordinate:tuple)->bool: x, y = coordinate if",
"self.matrix[x][y] = 'empty' def removeGold(self, coordinate:tuple)->None: x, y = coordinate if not self.isValid(coordinate):",
"randrange(self.dimension) return (x,y) def isValid(self, coordinate) -> bool: x , y = coordinate",
"x , y = coordinate if x >= self.dimension or y >= self.dimension:",
"'| ' output += '|\\n' print(output) return output def getPerceptions(self, coordinate:tuple)->list: perceptions =",
"= [] if i == 0: # 1° LINHA if self.matrix[cima[0]][cima[1]] != 'pit':",
"return self.coordinate[name] def getGraph(self, )->dict: grafo = {} n = self.dimension for i",
"obj: dict) -> None: for _ in range(obj['amount']): x = y = 0",
"(0,0) def removeWumpus(self, coordinate:tuple)->None: self.screamTrigger = True x, y = coordinate if not",
"self.isValid(coordinate): return self.matrix[x][y] = 'empty' def removeGold(self, coordinate:tuple)->None: x, y = coordinate if",
"valid_environment = False while(not valid_environment): self.matrix = [['empty' for column in range(dimension)] for",
"um local para o Wumpus self.screamTrigger = False self.n_pits = n_pits valid_environment =",
"self.isValid(coordinate): return False return perception in self.matrix_perceptions[x][y] def isPit(self, coordinate:tuple)->bool: x, y =",
"= coordinate if x >= self.dimension or y >= self.dimension: return False if",
"#print(coordinate) for line in range(self.dimension -1, -1, -1): for column in range(self.dimension): if",
"False return perception in self.matrix_perceptions[x][y] def isPit(self, coordinate:tuple)->bool: x, y = coordinate if",
"column in range(self.dimension): if coordinate == (line, column): output += '|A' else: if",
"perceptions.append('glitter') if self.screamTrigger: perceptions.append('scream') self.screamTrigger = False return perceptions def isPerception(self, coordinate, perception)->",
"1].append(self.perceptions[obj['name']]) # verifica se estar na ultima coluna if y == (self.dimension -",
"print(output) return output def getPerceptions(self, coordinate:tuple)->list: perceptions = [] if self.isPerception(coordinate, 'breeze'): perceptions.append('breeze')",
"estar na primeira coluna if y == 0: self.matrix_perceptions[x][y + 1].append(self.perceptions[obj['name']]) # verifica",
"x == 0: self.matrix_perceptions[x + 1][y].append(self.perceptions[obj['name']]) # verifica se estar na primeira coluna",
"== 0: # MAIS A ESQUERDA (apenas testar a direita) if self.matrix[direita[0]][direita[1]] !=",
")->dict: grafo = {} n = self.dimension for i in range(n): for j",
"n_golds:int=1, n_wumpus:int=1): # Itens Possíves de Perceber no Mapa self.perceptions = { \"pit\":",
"perception in self.matrix_perceptions[x][y] def isPit(self, coordinate:tuple)->bool: x, y = coordinate if not self.isValid(coordinate):",
"= {} n = self.dimension for i in range(n): for j in range(n):",
"False while(not valid_environment): self.matrix = [['empty' for column in range(dimension)] for line in",
"para os poços self.generate({'name': 'wumpus','amount':n_wumpus}) # Gera um local para o Wumpus self.screamTrigger",
"[start] while not_visiteds: current = not_visiteds.pop() for neighbor in graph[current]: if neighbor not",
"o Ouro self.generate({'name': 'pit','amount':n_pits}) # Gera um local para os poços self.generate({'name': 'wumpus','amount':n_wumpus})",
"output += '| ' output += '|\\n' print(output) return output def getPerceptions(self, coordinate:tuple)->list:",
"n_pits valid_environment = self.validEnvironment() def generate(self, obj: dict) -> None: for _ in",
"isWumpus(self, coordinate:tuple)->bool: x, y = coordinate if not self.isValid(coordinate): return False return self.matrix[x][y]",
"self.generate({'name': 'pit','amount':n_pits}) # Gera um local para os poços self.generate({'name': 'wumpus','amount':n_wumpus}) # Gera",
"nas colunas do meio else: self.matrix_perceptions[x][y + 1].append(self.perceptions[obj['name']]) self.matrix_perceptions[x][y - 1].append(self.perceptions[obj['name']]) def printMatrix(self,",
"| Wumpus | Ouro) self.coordinate = { \"pit\":[], \"wumpus\":[], \"gold\":[] } valid_environment =",
"def isExit(self, coordinate:tuple)->bool: return coordinate == (0,0) def removeWumpus(self, coordinate:tuple)->None: self.screamTrigger = True",
"'gold': output += '|G' elif self.matrix[line][column] == 'pit': output += '|P' else :",
"== 'gold' def isExit(self, coordinate:tuple)->bool: return coordinate == (0,0) def removeWumpus(self, coordinate:tuple)->None: self.screamTrigger",
"'|W' elif self.matrix[line][column] == 'gold': output += '|G' elif self.matrix[line][column] == 'pit': output",
"(i,j-1) nodes = [] if i == 0: # 1° LINHA if self.matrix[cima[0]][cima[1]]",
"getPerceptions(self, coordinate:tuple)->list: perceptions = [] if self.isPerception(coordinate, 'breeze'): perceptions.append('breeze') if self.isPerception(coordinate, 'stench'): perceptions.append('stench')",
"n_wumpus:int=1): # Itens Possíves de Perceber no Mapa self.perceptions = { \"pit\": \"breeze\",",
"coluna elif y == (self.dimension-1): self.matrix_perceptions[x][y - 1].append(self.perceptions[obj['name']]) #verifica se estar nas colunas",
"if y == 0: self.matrix_perceptions[x][y + 1].append(self.perceptions[obj['name']]) # verifica se estar na ultima",
"ESQUERDA (apenas testar a direita) if self.matrix[direita[0]][direita[1]] != 'pit': nodes.append(direita) elif j ==",
"'|G' elif self.matrix[line][column] == 'pit': output += '|P' else : output += '|",
"0: # 1° LINHA if self.matrix[cima[0]][cima[1]] != 'pit': nodes.append(cima) if j == 0:",
"self.matrix_perceptions[x + 1][y].append(self.perceptions[obj['name']]) # verifica se estar na primeira coluna if y ==",
"(0,0) def randomCoordinate(self, )->tuple: x,y = (0,0) while( ((x,y) == (0,0)) or (self.matrix[x][y]",
"coordinate:tuple)->list: perceptions = [] if self.isPerception(coordinate, 'breeze'): perceptions.append('breeze') if self.isPerception(coordinate, 'stench'): perceptions.append('stench') if",
"Wumpus | Ouro) self.coordinate = { \"pit\":[], \"wumpus\":[], \"gold\":[] } valid_environment = False",
"< 0 or y < 0: return False return True def getObjectCoord(self, name:",
"= coordinate if not self.isValid(coordinate): return self.matrix[x][y] = 'empty' self.matrix_perceptions[x][y].remove('glitter') # Gera uma",
"uma Coordenada Vazia qualquer de X e Y que não seja (0,0) def",
"column): output += '|A' else: if self.matrix[line][column] == 'wumpus': output += '|W' elif",
"= (i+1,j), (i-1,j), (i,j+1), (i,j-1) nodes = [] if i == 0: #",
"+= '| ' output += '|\\n' print(output) return output def getPerceptions(self, coordinate:tuple)->list: perceptions",
"de adjascências if obj['name'] == 'gold': self.matrix_perceptions[x][y].append(self.perceptions[obj['name']]) else: # verifica se estar na",
"self.matrix[direita[0]][direita[1]] != 'pit': nodes.append(direita) elif j == n-1: # MAIS A DIREITA (apenas",
"# MAIS A DIREITA (apenas testar a esquerda) if self.matrix[esquerda[0]][esquerda[1]] != 'pit': nodes.append(esquerda)",
"== n-1: # ULTIMA LINHA if self.matrix[baixo[0]][baixo[1]] != 'pit': nodes.append(baixo) if j ==",
"!= 'empty') ): x, y = randrange(self.dimension), randrange(self.dimension) return (x,y) def isValid(self, coordinate)",
"Itens Possíves de Perceber no Mapa self.perceptions = { \"pit\": \"breeze\", \"gold\": \"glitter\",",
"1][y].append(self.perceptions[obj['name']]) # verifica se estar na primeira coluna if y == 0: self.matrix_perceptions[x][y",
"if self.isPerception(coordinate, 'stench'): perceptions.append('stench') if self.isPerception(coordinate, 'glitter'): perceptions.append('glitter') if self.screamTrigger: perceptions.append('scream') self.screamTrigger =",
"coordinate: tuple): output = '' #print(coordinate) for line in range(self.dimension -1, -1, -1):",
"se estar nas colunas do meio else: self.matrix_perceptions[x][y + 1].append(self.perceptions[obj['name']]) self.matrix_perceptions[x][y - 1].append(self.perceptions[obj['name']])",
"coordinate == (0,0) def removeWumpus(self, coordinate:tuple)->None: self.screamTrigger = True x, y = coordinate",
"= n_pits valid_environment = self.validEnvironment() def generate(self, obj: dict) -> None: for _",
"nas colunas do meio else: self.matrix_perceptions[x][y+1].append(self.perceptions[obj['name']]) self.matrix_perceptions[x][y-1].append(self.perceptions[obj['name']]) # verifica se estar na ultima",
"y = randrange(self.dimension), randrange(self.dimension) return (x,y) def isValid(self, coordinate) -> bool: x ,",
"= not_visiteds.pop() for neighbor in graph[current]: if neighbor not in visiteds: visiteds.append(neighbor) not_visiteds.append(neighbor)",
"o Wumpus self.screamTrigger = False self.n_pits = n_pits valid_environment = self.validEnvironment() def generate(self,",
"return (x,y) def isValid(self, coordinate) -> bool: x , y = coordinate if",
"((x,y) == (0,0)) or (self.matrix[x][y] != 'empty') ): x, y = randrange(self.dimension), randrange(self.dimension)",
"getGraph(self, )->dict: grafo = {} n = self.dimension for i in range(n): for",
"if self.matrix[x][y] == 'gold': return True return False def validEnvironment(self, )-> bool: return",
"else: self.matrix_perceptions[x][y + 1].append(self.perceptions[obj['name']]) self.matrix_perceptions[x][y - 1].append(self.perceptions[obj['name']]) def printMatrix(self, coordinate: tuple): output =",
"in range(n): cima, baixo, direita, esquerda = (i+1,j), (i-1,j), (i,j+1), (i,j-1) nodes =",
"while( ((x,y) == (0,0)) or (self.matrix[x][y] != 'empty') ): x, y = randrange(self.dimension),",
"= randrange(self.dimension), randrange(self.dimension) return (x,y) def isValid(self, coordinate) -> bool: x , y",
"se estar na ultima coluna if y == (self.dimension - 1): self.matrix_perceptions[x][y -",
"do Jogo (Poço | Wumpus | Ouro) self.coordinate = { \"pit\":[], \"wumpus\":[], \"gold\":[]",
"qualquer de X e Y que não seja (0,0) def randomCoordinate(self, )->tuple: x,y",
"False return self.matrix[x][y] == 'pit' def isWumpus(self, coordinate:tuple)->bool: x, y = coordinate if",
"'gold': self.matrix_perceptions[x][y].append(self.perceptions[obj['name']]) else: # verifica se estar na primeira linha if x ==",
"False return self.matrix[x][y] == 'gold' def isExit(self, coordinate:tuple)->bool: return coordinate == (0,0) def",
"removeGold(self, coordinate:tuple)->None: x, y = coordinate if not self.isValid(coordinate): return self.matrix[x][y] = 'empty'",
"if self.matrix[esquerda[0]][esquerda[1]] != 'pit': nodes.append(esquerda) else: # NAS LINHS DO MEIO (estar CIMA",
"return grafo def depthSearch(self, start:object): graph = self.getGraph() visiteds = [start] not_visiteds =",
"!= 'pit': nodes.append(direita) if self.matrix[esquerda[0]][esquerda[1]] != 'pit': nodes.append(esquerda) elif i == n-1: #",
"estar na ultima coluna elif y == (self.dimension-1): self.matrix_perceptions[x][y - 1].append(self.perceptions[obj['name']]) #verifica se",
"nodes.append(direita) elif j == n-1: # MAIS A DIREITA (apenas testar a esquerda)",
"primeira coluna if y == 0: self.matrix_perceptions[x][y + 1].append(self.perceptions[obj['name']]) #verifica se estar na",
"if self.screamTrigger: perceptions.append('scream') self.screamTrigger = False return perceptions def isPerception(self, coordinate, perception)-> bool:",
"nodes.append(esquerda) else: # NAS LINHS DO MEIO (estar CIMA e BAIXO) if self.matrix[baixo[0]][baixo[1]]",
"y = coordinate if x >= self.dimension or y >= self.dimension: return False",
"self.matrix_perceptions[x][y - 1].append(self.perceptions[obj['name']]) #verifica se estar nas colunas do meio else: self.matrix_perceptions[x][y+1].append(self.perceptions[obj['name']]) self.matrix_perceptions[x][y-1].append(self.perceptions[obj['name']])",
"for line in range(self.dimension -1, -1, -1): for column in range(self.dimension): if coordinate",
"não seja (0,0) def randomCoordinate(self, )->tuple: x,y = (0,0) while( ((x,y) == (0,0))",
"nodes.append(cima) if j == 0: # MAIS A ESQUERDA (apenas testar a direita)",
"'' #print(coordinate) for line in range(self.dimension -1, -1, -1): for column in range(self.dimension):",
"(i-1,j), (i,j+1), (i,j-1) nodes = [] if i == 0: # 1° LINHA",
"Constroi as matrizes de adjascências if obj['name'] == 'gold': self.matrix_perceptions[x][y].append(self.perceptions[obj['name']]) else: # verifica",
"!= 'pit': nodes.append(direita) if self.matrix[esquerda[0]][esquerda[1]] != 'pit': nodes.append(esquerda) grafo.update({(i,j):nodes}) return grafo def depthSearch(self,",
"== n-1: # MAIS A DIREITA (apenas testar a esquerda) if self.matrix[esquerda[0]][esquerda[1]] !=",
"== 'gold': self.matrix_perceptions[x][y].append(self.perceptions[obj['name']]) else: # verifica se estar na primeira linha if x",
"'pit': output += '|P' else : output += '| ' output += '|\\n'",
"return False return self.matrix[x][y] == 'wumpus' def isGold(self, coordinate:tuple)->bool: x, y = coordinate",
"not self.isValid(coordinate): return False return self.matrix[x][y] == 'gold' def isExit(self, coordinate:tuple)->bool: return coordinate",
"not_visiteds: current = not_visiteds.pop() for neighbor in graph[current]: if neighbor not in visiteds:",
"if x == 0: self.matrix_perceptions[x + 1][y].append(self.perceptions[obj['name']]) # verifica se estar na primeira",
"depthSearch(self, start:object): graph = self.getGraph() visiteds = [start] not_visiteds = [start] while not_visiteds:",
"x,y = self.randomCoordinate() self.matrix[x][y] = obj['name'] # Constroi as matrizes de adjascências if",
"range(self.dimension): if coordinate == (line, column): output += '|A' else: if self.matrix[line][column] ==",
"output = '' #print(coordinate) for line in range(self.dimension -1, -1, -1): for column",
"def isGold(self, coordinate:tuple)->bool: x, y = coordinate if not self.isValid(coordinate): return False return",
"neighbor not in visiteds: visiteds.append(neighbor) not_visiteds.append(neighbor) x,y=neighbor if self.matrix[x][y] == 'gold': return True",
"primeira linha if x == 0: self.matrix_perceptions[x + 1][y].append(self.perceptions[obj['name']]) # verifica se estar",
"dos Elementos do Jogo (Poço | Wumpus | Ouro) self.coordinate = { \"pit\":[],",
"# MAIS A ESQUERDA (apenas testar a direita) if self.matrix[direita[0]][direita[1]] != 'pit': nodes.append(direita)",
"self.perceptions = { \"pit\": \"breeze\", \"gold\": \"glitter\", \"wumpus\": \"stench\", } # Tamanho da",
"no Mapa self.perceptions = { \"pit\": \"breeze\", \"gold\": \"glitter\", \"wumpus\": \"stench\", } #",
"- 1].append(self.perceptions[obj['name']]) def printMatrix(self, coordinate: tuple): output = '' #print(coordinate) for line in",
"elif self.matrix[line][column] == 'pit': output += '|P' else : output += '| '",
"'gold' def isExit(self, coordinate:tuple)->bool: return coordinate == (0,0) def removeWumpus(self, coordinate:tuple)->None: self.screamTrigger =",
"isExit(self, coordinate:tuple)->bool: return coordinate == (0,0) def removeWumpus(self, coordinate:tuple)->None: self.screamTrigger = True x,",
"meio else: self.matrix_perceptions[x][y + 1].append(self.perceptions[obj['name']]) self.matrix_perceptions[x][y - 1].append(self.perceptions[obj['name']]) # verifica se estar nas",
"coordinate) -> bool: x , y = coordinate if x >= self.dimension or",
"isPit(self, coordinate:tuple)->bool: x, y = coordinate if not self.isValid(coordinate): return False return self.matrix[x][y]",
"perceptions.append('scream') self.screamTrigger = False return perceptions def isPerception(self, coordinate, perception)-> bool: x,y =",
"if self.matrix[direita[0]][direita[1]] != 'pit': nodes.append(direita) if self.matrix[esquerda[0]][esquerda[1]] != 'pit': nodes.append(esquerda) elif i ==",
"'breeze'): perceptions.append('breeze') if self.isPerception(coordinate, 'stench'): perceptions.append('stench') if self.isPerception(coordinate, 'glitter'): perceptions.append('glitter') if self.screamTrigger: perceptions.append('scream')",
"Gera uma Coordenada Vazia qualquer de X e Y que não seja (0,0)",
"y == (self.dimension - 1): self.matrix_perceptions[x][y - 1].append(self.perceptions[obj['name']]) # verifica se estar nas",
"do meio else: self.matrix_perceptions[x][y + 1].append(self.perceptions[obj['name']]) self.matrix_perceptions[x][y - 1].append(self.perceptions[obj['name']]) def printMatrix(self, coordinate: tuple):",
"# NO MEIO (testar a esquerda e direita) if self.matrix[direita[0]][direita[1]] != 'pit': nodes.append(direita)",
"(apenas testar a direita) if self.matrix[direita[0]][direita[1]] != 'pit': nodes.append(direita) elif j == n-1:",
"= [] if self.isPerception(coordinate, 'breeze'): perceptions.append('breeze') if self.isPerception(coordinate, 'stench'): perceptions.append('stench') if self.isPerception(coordinate, 'glitter'):",
"= obj['name'] # Constroi as matrizes de adjascências if obj['name'] == 'gold': self.matrix_perceptions[x][y].append(self.perceptions[obj['name']])",
"| Ouro) self.coordinate = { \"pit\":[], \"wumpus\":[], \"gold\":[] } valid_environment = False while(not",
"ultima linha elif x == (self.dimension - 1): self.matrix_perceptions[x - 1][y].append(self.perceptions[obj['name']]) # verifica",
"(self.dimension - 1): self.matrix_perceptions[x - 1][y].append(self.perceptions[obj['name']]) # verifica se estar na primeira coluna",
"coluna if y == 0: self.matrix_perceptions[x][y + 1].append(self.perceptions[obj['name']]) # verifica se estar na",
"A DIREITA (apenas testar a esquerda) if self.matrix[esquerda[0]][esquerda[1]] != 'pit': nodes.append(esquerda) else: #",
"a direita) if self.matrix[direita[0]][direita[1]] != 'pit': nodes.append(direita) elif j == n-1: # MAIS",
"self.isPerception(coordinate, 'breeze'): perceptions.append('breeze') if self.isPerception(coordinate, 'stench'): perceptions.append('stench') if self.isPerception(coordinate, 'glitter'): perceptions.append('glitter') if self.screamTrigger:",
"if self.matrix[esquerda[0]][esquerda[1]] != 'pit': nodes.append(esquerda) elif i == n-1: # ULTIMA LINHA if",
"1][y].append(self.perceptions[obj['name']]) self.matrix_perceptions[x - 1][y].append(self.perceptions[obj['name']]) # verifica se estar na primeira coluna if y",
"+= '|P' else : output += '| ' output += '|\\n' print(output) return",
"ultima coluna elif y == (self.dimension-1): self.matrix_perceptions[x][y - 1].append(self.perceptions[obj['name']]) #verifica se estar nas",
"\"wumpus\":[], \"gold\":[] } valid_environment = False while(not valid_environment): self.matrix = [['empty' for column",
"else: self.matrix_perceptions[x][y + 1].append(self.perceptions[obj['name']]) self.matrix_perceptions[x][y - 1].append(self.perceptions[obj['name']]) # verifica se estar nas linhas",
"range(n): for j in range(n): cima, baixo, direita, esquerda = (i+1,j), (i-1,j), (i,j+1),",
"x, y = randrange(self.dimension), randrange(self.dimension) return (x,y) def isValid(self, coordinate) -> bool: x",
"self.n_pits = n_pits valid_environment = self.validEnvironment() def generate(self, obj: dict) -> None: for",
"+= '|A' else: if self.matrix[line][column] == 'wumpus': output += '|W' elif self.matrix[line][column] ==",
"self.matrix[x][y] == 'gold' def isExit(self, coordinate:tuple)->bool: return coordinate == (0,0) def removeWumpus(self, coordinate:tuple)->None:",
"!= 'pit': nodes.append(esquerda) elif i == n-1: # ULTIMA LINHA if self.matrix[baixo[0]][baixo[1]] !=",
"Elementos do Jogo (Poço | Wumpus | Ouro) self.coordinate = { \"pit\":[], \"wumpus\":[],",
"= 0 if(obj['name'] == 'gold'): x = self.dimension-1 y = self.dimension-1 else: self.coordinate[obj[\"name\"]].append((x,y))",
"else : output += '| ' output += '|\\n' print(output) return output def",
"while(not valid_environment): self.matrix = [['empty' for column in range(dimension)] for line in range(dimension)]",
"== (0,0) def removeWumpus(self, coordinate:tuple)->None: self.screamTrigger = True x, y = coordinate if",
"if obj['name'] == 'gold': self.matrix_perceptions[x][y].append(self.perceptions[obj['name']]) else: # verifica se estar na primeira linha",
"perceptions.append('stench') if self.isPerception(coordinate, 'glitter'): perceptions.append('glitter') if self.screamTrigger: perceptions.append('scream') self.screamTrigger = False return perceptions",
"largua da matrix self.dimension = dimension # Array de Coordenadas dos Elementos do",
"generate(self, obj: dict) -> None: for _ in range(obj['amount']): x = y =",
"Ouro) self.coordinate = { \"pit\":[], \"wumpus\":[], \"gold\":[] } valid_environment = False while(not valid_environment):",
"Logal para o Ouro self.generate({'name': 'pit','amount':n_pits}) # Gera um local para os poços",
"Gera um Logal para o Ouro self.generate({'name': 'pit','amount':n_pits}) # Gera um local para",
"dict) -> None: for _ in range(obj['amount']): x = y = 0 if(obj['name']",
"'gold'): x = self.dimension-1 y = self.dimension-1 else: self.coordinate[obj[\"name\"]].append((x,y)) x,y = self.randomCoordinate() self.matrix[x][y]",
"self.matrix_perceptions[x][y].append(self.perceptions[obj['name']]) else: # verifica se estar na primeira linha if x == 0:",
"+ 1].append(self.perceptions[obj['name']]) #verifica se estar na ultima coluna elif y == (self.dimension-1): self.matrix_perceptions[x][y",
"-1, -1): for column in range(self.dimension): if coordinate == (line, column): output +=",
"+= '|G' elif self.matrix[line][column] == 'pit': output += '|P' else : output +=",
"self.validEnvironment() def generate(self, obj: dict) -> None: for _ in range(obj['amount']): x =",
"def isPit(self, coordinate:tuple)->bool: x, y = coordinate if not self.isValid(coordinate): return False return",
"x, y = coordinate if not self.isValid(coordinate): return False return self.matrix[x][y] == 'wumpus'",
"self.matrix[x][y] = 'empty' self.matrix_perceptions[x][y].remove('glitter') # Gera uma Coordenada Vazia qualquer de X e",
"self.matrix[direita[0]][direita[1]] != 'pit': nodes.append(direita) if self.matrix[esquerda[0]][esquerda[1]] != 'pit': nodes.append(esquerda) elif i == n-1:",
"= y = 0 if(obj['name'] == 'gold'): x = self.dimension-1 y = self.dimension-1",
"\"gold\":[] } valid_environment = False while(not valid_environment): self.matrix = [['empty' for column in",
"False return perceptions def isPerception(self, coordinate, perception)-> bool: x,y = coordinate if not",
"= 'start' self.matrix_perceptions = [[ [] for column in range(dimension)] for line in",
"0: self.matrix_perceptions[x][y + 1].append(self.perceptions[obj['name']]) # verifica se estar na ultima coluna if y",
"testar a direita) if self.matrix[direita[0]][direita[1]] != 'pit': nodes.append(direita) elif j == n-1: #",
"perceptions = [] if self.isPerception(coordinate, 'breeze'): perceptions.append('breeze') if self.isPerception(coordinate, 'stench'): perceptions.append('stench') if self.isPerception(coordinate,",
"grafo.update({(i,j):nodes}) return grafo def depthSearch(self, start:object): graph = self.getGraph() visiteds = [start] not_visiteds",
"None: for _ in range(obj['amount']): x = y = 0 if(obj['name'] == 'gold'):",
"\"wumpus\": \"stench\", } # Tamanho da largua da matrix self.dimension = dimension #",
"in self.matrix_perceptions[x][y] def isPit(self, coordinate:tuple)->bool: x, y = coordinate if not self.isValid(coordinate): return",
"verifica se estar na ultima coluna if y == (self.dimension - 1): self.matrix_perceptions[x][y",
"return self.matrix[x][y] == 'gold' def isExit(self, coordinate:tuple)->bool: return coordinate == (0,0) def removeWumpus(self,",
"== (self.dimension-1): self.matrix_perceptions[x][y - 1].append(self.perceptions[obj['name']]) #verifica se estar nas colunas do meio else:",
"self.dimension-1 y = self.dimension-1 else: self.coordinate[obj[\"name\"]].append((x,y)) x,y = self.randomCoordinate() self.matrix[x][y] = obj['name'] #",
"'pit': nodes.append(direita) if self.matrix[esquerda[0]][esquerda[1]] != 'pit': nodes.append(esquerda) elif i == n-1: # ULTIMA",
"self.matrix_perceptions[x][y + 1].append(self.perceptions[obj['name']]) #verifica se estar na ultima coluna elif y == (self.dimension-1):",
"se estar nas colunas do meio else: self.matrix_perceptions[x][y+1].append(self.perceptions[obj['name']]) self.matrix_perceptions[x][y-1].append(self.perceptions[obj['name']]) # verifica se estar",
"<reponame>lucascampello/wumpus-cli<filename>game/environment.py from random import randrange class Environment(object): def __init__(self, dimension:int, n_pits:int, n_golds:int=1, n_wumpus:int=1):",
"= 'empty' self.matrix_perceptions[x][y].remove('glitter') # Gera uma Coordenada Vazia qualquer de X e Y",
"x,y=neighbor if self.matrix[x][y] == 'gold': return True return False def validEnvironment(self, )-> bool:",
"+= '|W' elif self.matrix[line][column] == 'gold': output += '|G' elif self.matrix[line][column] == 'pit':",
"# Gera uma Coordenada Vazia qualquer de X e Y que não seja",
"if x >= self.dimension or y >= self.dimension: return False if x <",
"estar na primeira linha if x == 0: self.matrix_perceptions[x + 1][y].append(self.perceptions[obj['name']]) # verifica",
"i == 0: # 1° LINHA if self.matrix[cima[0]][cima[1]] != 'pit': nodes.append(cima) if j",
"(0,0)) or (self.matrix[x][y] != 'empty') ): x, y = randrange(self.dimension), randrange(self.dimension) return (x,y)",
"if j == 0: # MAIS A ESQUERDA (apenas testar a direita) if",
"baixo, direita, esquerda = (i+1,j), (i-1,j), (i,j+1), (i,j-1) nodes = [] if i",
"== (0,0)) or (self.matrix[x][y] != 'empty') ): x, y = randrange(self.dimension), randrange(self.dimension) return",
"import randrange class Environment(object): def __init__(self, dimension:int, n_pits:int, n_golds:int=1, n_wumpus:int=1): # Itens Possíves",
"in visiteds: visiteds.append(neighbor) not_visiteds.append(neighbor) x,y=neighbor if self.matrix[x][y] == 'gold': return True return False",
"output += '|A' else: if self.matrix[line][column] == 'wumpus': output += '|W' elif self.matrix[line][column]",
"x, y = coordinate if not self.isValid(coordinate): return self.matrix[x][y] = 'empty' def removeGold(self,",
"self.matrix[baixo[0]][baixo[1]] != 'pit': nodes.append(baixo) if j == 0: # MAIS A ESQUERDA (apenas",
"return self.matrix[x][y] == 'wumpus' def isGold(self, coordinate:tuple)->bool: x, y = coordinate if not",
"return True def getObjectCoord(self, name: str): return self.coordinate[name] def getGraph(self, )->dict: grafo =",
"def generate(self, obj: dict) -> None: for _ in range(obj['amount']): x = y",
"linha elif x == (self.dimension - 1): self.matrix_perceptions[x - 1][y].append(self.perceptions[obj['name']]) # verifica se",
"return self.matrix[x][y] = 'empty' def removeGold(self, coordinate:tuple)->None: x, y = coordinate if not",
"if not self.isValid(coordinate): return self.matrix[x][y] = 'empty' self.matrix_perceptions[x][y].remove('glitter') # Gera uma Coordenada Vazia",
"self.dimension or y >= self.dimension: return False if x < 0 or y",
"elif i == n-1: # ULTIMA LINHA if self.matrix[baixo[0]][baixo[1]] != 'pit': nodes.append(baixo) if",
"'pit': nodes.append(direita) if self.matrix[esquerda[0]][esquerda[1]] != 'pit': nodes.append(esquerda) else: # NAS LINHS DO MEIO",
"Jogo (Poço | Wumpus | Ouro) self.coordinate = { \"pit\":[], \"wumpus\":[], \"gold\":[] }",
"in range(dimension)] for line in range(dimension)] self.matrix[0][0] = 'start' self.matrix_perceptions = [[ []",
"'gold','amount':n_golds}) # Gera um Logal para o Ouro self.generate({'name': 'pit','amount':n_pits}) # Gera um",
"linha if x == 0: self.matrix_perceptions[x + 1][y].append(self.perceptions[obj['name']]) # verifica se estar na",
"Tamanho da largua da matrix self.dimension = dimension # Array de Coordenadas dos",
"return output def getPerceptions(self, coordinate:tuple)->list: perceptions = [] if self.isPerception(coordinate, 'breeze'): perceptions.append('breeze') if",
"not self.isValid(coordinate): return False return self.matrix[x][y] == 'pit' def isWumpus(self, coordinate:tuple)->bool: x, y",
"self.matrix[line][column] == 'pit': output += '|P' else : output += '| ' output",
"(self.dimension - 1): self.matrix_perceptions[x][y - 1].append(self.perceptions[obj['name']]) # verifica se estar nas colunas do",
"j == 0: # MAIS A ESQUERDA (apenas testar a direita) if self.matrix[direita[0]][direita[1]]",
"'|\\n' print(output) return output def getPerceptions(self, coordinate:tuple)->list: perceptions = [] if self.isPerception(coordinate, 'breeze'):",
"(x,y) def isValid(self, coordinate) -> bool: x , y = coordinate if x",
"if not self.isValid(coordinate): return False return perception in self.matrix_perceptions[x][y] def isPit(self, coordinate:tuple)->bool: x,",
"(self.dimension-1): self.matrix_perceptions[x][y - 1].append(self.perceptions[obj['name']]) #verifica se estar nas colunas do meio else: self.matrix_perceptions[x][y+1].append(self.perceptions[obj['name']])",
"1].append(self.perceptions[obj['name']]) #verifica se estar na ultima coluna elif y == (self.dimension-1): self.matrix_perceptions[x][y -",
"in range(n): for j in range(n): cima, baixo, direita, esquerda = (i+1,j), (i-1,j),",
"{ \"pit\":[], \"wumpus\":[], \"gold\":[] } valid_environment = False while(not valid_environment): self.matrix = [['empty'",
"[] if i == 0: # 1° LINHA if self.matrix[cima[0]][cima[1]] != 'pit': nodes.append(cima)",
"(apenas testar a esquerda) if self.matrix[esquerda[0]][esquerda[1]] != 'pit': nodes.append(esquerda) else: # NO MEIO",
"def removeWumpus(self, coordinate:tuple)->None: self.screamTrigger = True x, y = coordinate if not self.isValid(coordinate):",
"cima, baixo, direita, esquerda = (i+1,j), (i-1,j), (i,j+1), (i,j-1) nodes = [] if",
"nodes = [] if i == 0: # 1° LINHA if self.matrix[cima[0]][cima[1]] !=",
"da largua da matrix self.dimension = dimension # Array de Coordenadas dos Elementos",
"= self.dimension for i in range(n): for j in range(n): cima, baixo, direita,",
"grafo = {} n = self.dimension for i in range(n): for j in",
"self.matrix[direita[0]][direita[1]] != 'pit': nodes.append(direita) if self.matrix[esquerda[0]][esquerda[1]] != 'pit': nodes.append(esquerda) grafo.update({(i,j):nodes}) return grafo def",
"'wumpus': output += '|W' elif self.matrix[line][column] == 'gold': output += '|G' elif self.matrix[line][column]",
"removeWumpus(self, coordinate:tuple)->None: self.screamTrigger = True x, y = coordinate if not self.isValid(coordinate): return",
"if self.matrix[esquerda[0]][esquerda[1]] != 'pit': nodes.append(esquerda) else: # NO MEIO (testar a esquerda e",
"de X e Y que não seja (0,0) def randomCoordinate(self, )->tuple: x,y =",
"Y que não seja (0,0) def randomCoordinate(self, )->tuple: x,y = (0,0) while( ((x,y)",
"coordinate:tuple)->bool: x, y = coordinate if not self.isValid(coordinate): return False return self.matrix[x][y] ==",
"# verifica se estar nas linhas do meio else: self.matrix_perceptions[x + 1][y].append(self.perceptions[obj['name']]) self.matrix_perceptions[x",
"== 'gold': output += '|G' elif self.matrix[line][column] == 'pit': output += '|P' else",
"if y == (self.dimension - 1): self.matrix_perceptions[x][y - 1].append(self.perceptions[obj['name']]) # verifica se estar",
"tuple): output = '' #print(coordinate) for line in range(self.dimension -1, -1, -1): for",
"e direita) if self.matrix[direita[0]][direita[1]] != 'pit': nodes.append(direita) if self.matrix[esquerda[0]][esquerda[1]] != 'pit': nodes.append(esquerda) else:",
"self.matrix[baixo[0]][baixo[1]] != 'pit': nodes.append(baixo) if self.matrix[cima[0]][cima[1]] != 'pit': nodes.append(cima) if j == 0:",
"return False return True def getObjectCoord(self, name: str): return self.coordinate[name] def getGraph(self, )->dict:",
"< 0: return False return True def getObjectCoord(self, name: str): return self.coordinate[name] def",
"or (self.matrix[x][y] != 'empty') ): x, y = randrange(self.dimension), randrange(self.dimension) return (x,y) def",
"1].append(self.perceptions[obj['name']]) # verifica se estar nas linhas do meio else: self.matrix_perceptions[x + 1][y].append(self.perceptions[obj['name']])",
"if self.matrix[esquerda[0]][esquerda[1]] != 'pit': nodes.append(esquerda) grafo.update({(i,j):nodes}) return grafo def depthSearch(self, start:object): graph =",
"= '' #print(coordinate) for line in range(self.dimension -1, -1, -1): for column in",
"not_visiteds.pop() for neighbor in graph[current]: if neighbor not in visiteds: visiteds.append(neighbor) not_visiteds.append(neighbor) x,y=neighbor",
"not self.isValid(coordinate): return self.matrix[x][y] = 'empty' self.matrix_perceptions[x][y].remove('glitter') # Gera uma Coordenada Vazia qualquer",
"x,y = coordinate if not self.isValid(coordinate): return False return perception in self.matrix_perceptions[x][y] def",
"y = coordinate if not self.isValid(coordinate): return False return self.matrix[x][y] == 'wumpus' def",
"\"pit\":[], \"wumpus\":[], \"gold\":[] } valid_environment = False while(not valid_environment): self.matrix = [['empty' for",
"self.matrix_perceptions[x][y + 1].append(self.perceptions[obj['name']]) # verifica se estar na ultima coluna if y ==",
"'empty' self.matrix_perceptions[x][y].remove('glitter') # Gera uma Coordenada Vazia qualquer de X e Y que",
"range(n): cima, baixo, direita, esquerda = (i+1,j), (i-1,j), (i,j+1), (i,j-1) nodes = []",
"self.matrix_perceptions[x][y].remove('glitter') # Gera uma Coordenada Vazia qualquer de X e Y que não",
", y = coordinate if x >= self.dimension or y >= self.dimension: return",
"self.isValid(coordinate): return False return self.matrix[x][y] == 'wumpus' def isGold(self, coordinate:tuple)->bool: x, y =",
"# Array de Coordenadas dos Elementos do Jogo (Poço | Wumpus | Ouro)",
"False if x < 0 or y < 0: return False return True",
"coordinate if x >= self.dimension or y >= self.dimension: return False if x",
"'wumpus' def isGold(self, coordinate:tuple)->bool: x, y = coordinate if not self.isValid(coordinate): return False",
"estar na ultima coluna if y == (self.dimension - 1): self.matrix_perceptions[x][y - 1].append(self.perceptions[obj['name']])",
"== (self.dimension - 1): self.matrix_perceptions[x][y - 1].append(self.perceptions[obj['name']]) # verifica se estar nas colunas",
"= coordinate if not self.isValid(coordinate): return self.matrix[x][y] = 'empty' def removeGold(self, coordinate:tuple)->None: x,",
"== 0: # 1° LINHA if self.matrix[cima[0]][cima[1]] != 'pit': nodes.append(cima) if j ==",
"self.matrix[esquerda[0]][esquerda[1]] != 'pit': nodes.append(esquerda) grafo.update({(i,j):nodes}) return grafo def depthSearch(self, start:object): graph = self.getGraph()",
"in range(dimension)] for line in range(dimension)] self.generate({'name': 'gold','amount':n_golds}) # Gera um Logal para",
"MEIO (testar a esquerda e direita) if self.matrix[direita[0]][direita[1]] != 'pit': nodes.append(direita) if self.matrix[esquerda[0]][esquerda[1]]",
"!= 'pit': nodes.append(cima) if j == 0: # MAIS A ESQUERDA (apenas testar",
"- 1): self.matrix_perceptions[x][y - 1].append(self.perceptions[obj['name']]) # verifica se estar nas colunas do meio",
"(line, column): output += '|A' else: if self.matrix[line][column] == 'wumpus': output += '|W'",
"= coordinate if not self.isValid(coordinate): return False return self.matrix[x][y] == 'wumpus' def isGold(self,",
"x == (self.dimension - 1): self.matrix_perceptions[x - 1][y].append(self.perceptions[obj['name']]) # verifica se estar na",
"graph[current]: if neighbor not in visiteds: visiteds.append(neighbor) not_visiteds.append(neighbor) x,y=neighbor if self.matrix[x][y] == 'gold':",
"# Gera um local para o Wumpus self.screamTrigger = False self.n_pits = n_pits",
"self.generate({'name': 'wumpus','amount':n_wumpus}) # Gera um local para o Wumpus self.screamTrigger = False self.n_pits",
"nodes.append(direita) if self.matrix[esquerda[0]][esquerda[1]] != 'pit': nodes.append(esquerda) else: # NAS LINHS DO MEIO (estar",
"verifica se estar na primeira linha if x == 0: self.matrix_perceptions[x + 1][y].append(self.perceptions[obj['name']])",
"self.matrix_perceptions = [[ [] for column in range(dimension)] for line in range(dimension)] self.generate({'name':",
"+= '|\\n' print(output) return output def getPerceptions(self, coordinate:tuple)->list: perceptions = [] if self.isPerception(coordinate,",
"NO MEIO (testar a esquerda e direita) if self.matrix[direita[0]][direita[1]] != 'pit': nodes.append(direita) if",
"self.coordinate[obj[\"name\"]].append((x,y)) x,y = self.randomCoordinate() self.matrix[x][y] = obj['name'] # Constroi as matrizes de adjascências",
"= { \"pit\": \"breeze\", \"gold\": \"glitter\", \"wumpus\": \"stench\", } # Tamanho da largua",
"return False return self.matrix[x][y] == 'gold' def isExit(self, coordinate:tuple)->bool: return coordinate == (0,0)",
"range(dimension)] for line in range(dimension)] self.matrix[0][0] = 'start' self.matrix_perceptions = [[ [] for",
"= { \"pit\":[], \"wumpus\":[], \"gold\":[] } valid_environment = False while(not valid_environment): self.matrix =",
"return False if x < 0 or y < 0: return False return",
"visiteds.append(neighbor) not_visiteds.append(neighbor) x,y=neighbor if self.matrix[x][y] == 'gold': return True return False def validEnvironment(self,",
"coluna if y == 0: self.matrix_perceptions[x][y + 1].append(self.perceptions[obj['name']]) #verifica se estar na ultima",
"visiteds = [start] not_visiteds = [start] while not_visiteds: current = not_visiteds.pop() for neighbor",
"if self.matrix[baixo[0]][baixo[1]] != 'pit': nodes.append(baixo) if j == 0: # MAIS A ESQUERDA",
"return coordinate == (0,0) def removeWumpus(self, coordinate:tuple)->None: self.screamTrigger = True x, y =",
"'empty' def removeGold(self, coordinate:tuple)->None: x, y = coordinate if not self.isValid(coordinate): return self.matrix[x][y]",
"self.matrix[direita[0]][direita[1]] != 'pit': nodes.append(direita) if self.matrix[esquerda[0]][esquerda[1]] != 'pit': nodes.append(esquerda) else: # NAS LINHS",
"(testar a esquerda e direita) if self.matrix[direita[0]][direita[1]] != 'pit': nodes.append(direita) if self.matrix[esquerda[0]][esquerda[1]] !=",
"MAIS A DIREITA (apenas testar a esquerda) if self.matrix[esquerda[0]][esquerda[1]] != 'pit': nodes.append(esquerda) else:",
"dimension # Array de Coordenadas dos Elementos do Jogo (Poço | Wumpus |",
"getObjectCoord(self, name: str): return self.coordinate[name] def getGraph(self, )->dict: grafo = {} n =",
"self.matrix_perceptions[x][y-1].append(self.perceptions[obj['name']]) # verifica se estar na ultima linha elif x == (self.dimension -",
"not_visiteds.append(neighbor) x,y=neighbor if self.matrix[x][y] == 'gold': return True return False def validEnvironment(self, )->",
"verifica se estar nas colunas do meio else: self.matrix_perceptions[x][y + 1].append(self.perceptions[obj['name']]) self.matrix_perceptions[x][y -",
"\"breeze\", \"gold\": \"glitter\", \"wumpus\": \"stench\", } # Tamanho da largua da matrix self.dimension",
"do meio else: self.matrix_perceptions[x + 1][y].append(self.perceptions[obj['name']]) self.matrix_perceptions[x - 1][y].append(self.perceptions[obj['name']]) # verifica se estar",
"0: self.matrix_perceptions[x][y + 1].append(self.perceptions[obj['name']]) #verifica se estar na ultima coluna elif y ==",
"(0,0) while( ((x,y) == (0,0)) or (self.matrix[x][y] != 'empty') ): x, y =",
"== 'gold'): x = self.dimension-1 y = self.dimension-1 else: self.coordinate[obj[\"name\"]].append((x,y)) x,y = self.randomCoordinate()",
"else: self.matrix_perceptions[x][y+1].append(self.perceptions[obj['name']]) self.matrix_perceptions[x][y-1].append(self.perceptions[obj['name']]) # verifica se estar na ultima linha elif x ==",
"#verifica se estar na ultima coluna elif y == (self.dimension-1): self.matrix_perceptions[x][y - 1].append(self.perceptions[obj['name']])",
"1): self.matrix_perceptions[x - 1][y].append(self.perceptions[obj['name']]) # verifica se estar na primeira coluna if y",
"self.dimension = dimension # Array de Coordenadas dos Elementos do Jogo (Poço |",
"if self.matrix[cima[0]][cima[1]] != 'pit': nodes.append(cima) if j == 0: # MAIS A ESQUERDA",
"= self.dimension-1 else: self.coordinate[obj[\"name\"]].append((x,y)) x,y = self.randomCoordinate() self.matrix[x][y] = obj['name'] # Constroi as",
"- 1].append(self.perceptions[obj['name']]) #verifica se estar nas colunas do meio else: self.matrix_perceptions[x][y+1].append(self.perceptions[obj['name']]) self.matrix_perceptions[x][y-1].append(self.perceptions[obj['name']]) #",
"nodes.append(baixo) if j == 0: # MAIS A ESQUERDA (apenas testar a direita)",
"Environment(object): def __init__(self, dimension:int, n_pits:int, n_golds:int=1, n_wumpus:int=1): # Itens Possíves de Perceber no",
"in range(dimension)] self.matrix[0][0] = 'start' self.matrix_perceptions = [[ [] for column in range(dimension)]",
"1].append(self.perceptions[obj['name']]) # verifica se estar nas colunas do meio else: self.matrix_perceptions[x][y + 1].append(self.perceptions[obj['name']])",
"verifica se estar nas linhas do meio else: self.matrix_perceptions[x + 1][y].append(self.perceptions[obj['name']]) self.matrix_perceptions[x -",
"-> None: for _ in range(obj['amount']): x = y = 0 if(obj['name'] ==",
"x = y = 0 if(obj['name'] == 'gold'): x = self.dimension-1 y ="
] |
[
"DetachedAwardFinancialAssistanceFactory from tests.unit.dataactvalidator.utils import number_of_errors, query_columns _FILE = 'fabsreq9_detached_award_financial_assistance' def test_column_headers(database): expected_subset =",
"fail AwardeeOrRecipientLegalEntityName is required for all submissions except delete records. \"\"\" det_award =",
"from tests.unit.dataactcore.factories.staging import DetachedAwardFinancialAssistanceFactory from tests.unit.dataactvalidator.utils import number_of_errors, query_columns _FILE = 'fabsreq9_detached_award_financial_assistance' def",
"except delete records. \"\"\" det_award = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='c', awardee_or_recipient_legal=None) det_award_2 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr=None, awardee_or_recipient_legal='') errors",
"expected_subset = {'row_number', 'awardee_or_recipient_legal', 'correction_delete_indicatr', 'uniqueid_AssistanceTransactionUniqueKey'} actual = set(query_columns(_FILE, database)) assert expected_subset ==",
"import DetachedAwardFinancialAssistanceFactory from tests.unit.dataactvalidator.utils import number_of_errors, query_columns _FILE = 'fabsreq9_detached_award_financial_assistance' def test_column_headers(database): expected_subset",
"_FILE = 'fabsreq9_detached_award_financial_assistance' def test_column_headers(database): expected_subset = {'row_number', 'awardee_or_recipient_legal', 'correction_delete_indicatr', 'uniqueid_AssistanceTransactionUniqueKey'} actual =",
"database, models=[det_award, det_award_2, det_award_3, det_award_4, det_award_5]) assert errors == 0 def test_failure(database): \"\"\"",
"= DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr=None, awardee_or_recipient_legal='') errors = number_of_errors(_FILE, database, models=[det_award, det_award_2]) assert errors == 2",
"DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='D', awardee_or_recipient_legal='') det_award_5 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='d', awardee_or_recipient_legal='Name') errors = number_of_errors(_FILE, database, models=[det_award, det_award_2, det_award_3,",
"assert expected_subset == actual def test_success(database): \"\"\" Test AwardeeOrRecipientLegalEntityName is required for all",
"submissions except delete records. \"\"\" det_award = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='c', awardee_or_recipient_legal=None) det_award_2 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr=None, awardee_or_recipient_legal='')",
"delete records. \"\"\" det_award = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='c', awardee_or_recipient_legal=None) det_award_2 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr=None, awardee_or_recipient_legal='') errors =",
"= DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='c', awardee_or_recipient_legal=None) det_award_2 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr=None, awardee_or_recipient_legal='') errors = number_of_errors(_FILE, database, models=[det_award, det_award_2])",
"DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='', awardee_or_recipient_legal='Name') # Test ignoring for D records det_award_3 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='d', awardee_or_recipient_legal=None) det_award_4",
"AwardeeOrRecipientLegalEntityName is required for all submissions except delete records. \"\"\" det_award = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='c',",
"database)) assert expected_subset == actual def test_success(database): \"\"\" Test AwardeeOrRecipientLegalEntityName is required for",
"= DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='C', awardee_or_recipient_legal='REDACTED') det_award_2 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='', awardee_or_recipient_legal='Name') # Test ignoring for D records",
"for all submissions except delete records. \"\"\" det_award = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='C', awardee_or_recipient_legal='REDACTED') det_award_2 =",
"awardee_or_recipient_legal='') det_award_5 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='d', awardee_or_recipient_legal='Name') errors = number_of_errors(_FILE, database, models=[det_award, det_award_2, det_award_3, det_award_4,",
"= DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='d', awardee_or_recipient_legal=None) det_award_4 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='D', awardee_or_recipient_legal='') det_award_5 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='d', awardee_or_recipient_legal='Name') errors =",
"records. \"\"\" det_award = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='c', awardee_or_recipient_legal=None) det_award_2 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr=None, awardee_or_recipient_legal='') errors = number_of_errors(_FILE,",
"= DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='', awardee_or_recipient_legal='Name') # Test ignoring for D records det_award_3 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='d', awardee_or_recipient_legal=None)",
"for D records det_award_3 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='d', awardee_or_recipient_legal=None) det_award_4 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='D', awardee_or_recipient_legal='') det_award_5 =",
"= 'fabsreq9_detached_award_financial_assistance' def test_column_headers(database): expected_subset = {'row_number', 'awardee_or_recipient_legal', 'correction_delete_indicatr', 'uniqueid_AssistanceTransactionUniqueKey'} actual = set(query_columns(_FILE,",
"required for all submissions except delete records. \"\"\" det_award = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='c', awardee_or_recipient_legal=None) det_award_2",
"actual def test_success(database): \"\"\" Test AwardeeOrRecipientLegalEntityName is required for all submissions except delete",
"errors = number_of_errors(_FILE, database, models=[det_award, det_award_2, det_award_3, det_award_4, det_award_5]) assert errors == 0",
"awardee_or_recipient_legal='Name') errors = number_of_errors(_FILE, database, models=[det_award, det_award_2, det_award_3, det_award_4, det_award_5]) assert errors ==",
"= set(query_columns(_FILE, database)) assert expected_subset == actual def test_success(database): \"\"\" Test AwardeeOrRecipientLegalEntityName is",
"is required for all submissions except delete records. \"\"\" det_award = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='c', awardee_or_recipient_legal=None)",
"assert errors == 0 def test_failure(database): \"\"\" Test fail AwardeeOrRecipientLegalEntityName is required for",
"tests.unit.dataactcore.factories.staging import DetachedAwardFinancialAssistanceFactory from tests.unit.dataactvalidator.utils import number_of_errors, query_columns _FILE = 'fabsreq9_detached_award_financial_assistance' def test_column_headers(database):",
"det_award_4, det_award_5]) assert errors == 0 def test_failure(database): \"\"\" Test fail AwardeeOrRecipientLegalEntityName is",
"DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='c', awardee_or_recipient_legal=None) det_award_2 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr=None, awardee_or_recipient_legal='') errors = number_of_errors(_FILE, database, models=[det_award, det_award_2]) assert",
"submissions except delete records. \"\"\" det_award = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='C', awardee_or_recipient_legal='REDACTED') det_award_2 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='', awardee_or_recipient_legal='Name')",
"\"\"\" det_award = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='c', awardee_or_recipient_legal=None) det_award_2 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr=None, awardee_or_recipient_legal='') errors = number_of_errors(_FILE, database,",
"== actual def test_success(database): \"\"\" Test AwardeeOrRecipientLegalEntityName is required for all submissions except",
"records det_award_3 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='d', awardee_or_recipient_legal=None) det_award_4 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='D', awardee_or_recipient_legal='') det_award_5 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='d', awardee_or_recipient_legal='Name')",
"det_award_5]) assert errors == 0 def test_failure(database): \"\"\" Test fail AwardeeOrRecipientLegalEntityName is required",
"DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='d', awardee_or_recipient_legal='Name') errors = number_of_errors(_FILE, database, models=[det_award, det_award_2, det_award_3, det_award_4, det_award_5]) assert errors",
"det_award_2 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='', awardee_or_recipient_legal='Name') # Test ignoring for D records det_award_3 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='d',",
"all submissions except delete records. \"\"\" det_award = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='c', awardee_or_recipient_legal=None) det_award_2 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr=None,",
"= DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='D', awardee_or_recipient_legal='') det_award_5 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='d', awardee_or_recipient_legal='Name') errors = number_of_errors(_FILE, database, models=[det_award, det_award_2,",
"test_success(database): \"\"\" Test AwardeeOrRecipientLegalEntityName is required for all submissions except delete records. \"\"\"",
"\"\"\" Test fail AwardeeOrRecipientLegalEntityName is required for all submissions except delete records. \"\"\"",
"Test ignoring for D records det_award_3 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='d', awardee_or_recipient_legal=None) det_award_4 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='D', awardee_or_recipient_legal='')",
"test_failure(database): \"\"\" Test fail AwardeeOrRecipientLegalEntityName is required for all submissions except delete records.",
"det_award_2, det_award_3, det_award_4, det_award_5]) assert errors == 0 def test_failure(database): \"\"\" Test fail",
"awardee_or_recipient_legal='Name') # Test ignoring for D records det_award_3 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='d', awardee_or_recipient_legal=None) det_award_4 =",
"'awardee_or_recipient_legal', 'correction_delete_indicatr', 'uniqueid_AssistanceTransactionUniqueKey'} actual = set(query_columns(_FILE, database)) assert expected_subset == actual def test_success(database):",
"\"\"\" det_award = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='C', awardee_or_recipient_legal='REDACTED') det_award_2 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='', awardee_or_recipient_legal='Name') # Test ignoring for",
"ignoring for D records det_award_3 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='d', awardee_or_recipient_legal=None) det_award_4 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='D', awardee_or_recipient_legal='') det_award_5",
"# Test ignoring for D records det_award_3 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='d', awardee_or_recipient_legal=None) det_award_4 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='D',",
"det_award_2 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr=None, awardee_or_recipient_legal='') errors = number_of_errors(_FILE, database, models=[det_award, det_award_2]) assert errors ==",
"query_columns _FILE = 'fabsreq9_detached_award_financial_assistance' def test_column_headers(database): expected_subset = {'row_number', 'awardee_or_recipient_legal', 'correction_delete_indicatr', 'uniqueid_AssistanceTransactionUniqueKey'} actual",
"awardee_or_recipient_legal=None) det_award_4 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='D', awardee_or_recipient_legal='') det_award_5 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='d', awardee_or_recipient_legal='Name') errors = number_of_errors(_FILE, database,",
"'correction_delete_indicatr', 'uniqueid_AssistanceTransactionUniqueKey'} actual = set(query_columns(_FILE, database)) assert expected_subset == actual def test_success(database): \"\"\"",
"actual = set(query_columns(_FILE, database)) assert expected_subset == actual def test_success(database): \"\"\" Test AwardeeOrRecipientLegalEntityName",
"for all submissions except delete records. \"\"\" det_award = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='c', awardee_or_recipient_legal=None) det_award_2 =",
"awardee_or_recipient_legal=None) det_award_2 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr=None, awardee_or_recipient_legal='') errors = number_of_errors(_FILE, database, models=[det_award, det_award_2]) assert errors",
"det_award_5 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='d', awardee_or_recipient_legal='Name') errors = number_of_errors(_FILE, database, models=[det_award, det_award_2, det_award_3, det_award_4, det_award_5])",
"== 0 def test_failure(database): \"\"\" Test fail AwardeeOrRecipientLegalEntityName is required for all submissions",
"Test AwardeeOrRecipientLegalEntityName is required for all submissions except delete records. \"\"\" det_award =",
"set(query_columns(_FILE, database)) assert expected_subset == actual def test_success(database): \"\"\" Test AwardeeOrRecipientLegalEntityName is required",
"errors == 0 def test_failure(database): \"\"\" Test fail AwardeeOrRecipientLegalEntityName is required for all",
"{'row_number', 'awardee_or_recipient_legal', 'correction_delete_indicatr', 'uniqueid_AssistanceTransactionUniqueKey'} actual = set(query_columns(_FILE, database)) assert expected_subset == actual def",
"det_award_4 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='D', awardee_or_recipient_legal='') det_award_5 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='d', awardee_or_recipient_legal='Name') errors = number_of_errors(_FILE, database, models=[det_award,",
"required for all submissions except delete records. \"\"\" det_award = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='C', awardee_or_recipient_legal='REDACTED') det_award_2",
"test_column_headers(database): expected_subset = {'row_number', 'awardee_or_recipient_legal', 'correction_delete_indicatr', 'uniqueid_AssistanceTransactionUniqueKey'} actual = set(query_columns(_FILE, database)) assert expected_subset",
"= DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='d', awardee_or_recipient_legal='Name') errors = number_of_errors(_FILE, database, models=[det_award, det_award_2, det_award_3, det_award_4, det_award_5]) assert",
"Test fail AwardeeOrRecipientLegalEntityName is required for all submissions except delete records. \"\"\" det_award",
"import number_of_errors, query_columns _FILE = 'fabsreq9_detached_award_financial_assistance' def test_column_headers(database): expected_subset = {'row_number', 'awardee_or_recipient_legal', 'correction_delete_indicatr',",
"all submissions except delete records. \"\"\" det_award = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='C', awardee_or_recipient_legal='REDACTED') det_award_2 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='',",
"records. \"\"\" det_award = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='C', awardee_or_recipient_legal='REDACTED') det_award_2 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='', awardee_or_recipient_legal='Name') # Test ignoring",
"det_award = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='c', awardee_or_recipient_legal=None) det_award_2 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr=None, awardee_or_recipient_legal='') errors = number_of_errors(_FILE, database, models=[det_award,",
"det_award_3, det_award_4, det_award_5]) assert errors == 0 def test_failure(database): \"\"\" Test fail AwardeeOrRecipientLegalEntityName",
"\"\"\" Test AwardeeOrRecipientLegalEntityName is required for all submissions except delete records. \"\"\" det_award",
"AwardeeOrRecipientLegalEntityName is required for all submissions except delete records. \"\"\" det_award = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='C',",
"DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='d', awardee_or_recipient_legal=None) det_award_4 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='D', awardee_or_recipient_legal='') det_award_5 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='d', awardee_or_recipient_legal='Name') errors = number_of_errors(_FILE,",
"= number_of_errors(_FILE, database, models=[det_award, det_award_2, det_award_3, det_award_4, det_award_5]) assert errors == 0 def",
"is required for all submissions except delete records. \"\"\" det_award = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='C', awardee_or_recipient_legal='REDACTED')",
"awardee_or_recipient_legal='REDACTED') det_award_2 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='', awardee_or_recipient_legal='Name') # Test ignoring for D records det_award_3 =",
"def test_failure(database): \"\"\" Test fail AwardeeOrRecipientLegalEntityName is required for all submissions except delete",
"def test_column_headers(database): expected_subset = {'row_number', 'awardee_or_recipient_legal', 'correction_delete_indicatr', 'uniqueid_AssistanceTransactionUniqueKey'} actual = set(query_columns(_FILE, database)) assert",
"expected_subset == actual def test_success(database): \"\"\" Test AwardeeOrRecipientLegalEntityName is required for all submissions",
"delete records. \"\"\" det_award = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='C', awardee_or_recipient_legal='REDACTED') det_award_2 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='', awardee_or_recipient_legal='Name') # Test",
"models=[det_award, det_award_2, det_award_3, det_award_4, det_award_5]) assert errors == 0 def test_failure(database): \"\"\" Test",
"'uniqueid_AssistanceTransactionUniqueKey'} actual = set(query_columns(_FILE, database)) assert expected_subset == actual def test_success(database): \"\"\" Test",
"D records det_award_3 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='d', awardee_or_recipient_legal=None) det_award_4 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='D', awardee_or_recipient_legal='') det_award_5 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='d',",
"def test_success(database): \"\"\" Test AwardeeOrRecipientLegalEntityName is required for all submissions except delete records.",
"except delete records. \"\"\" det_award = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='C', awardee_or_recipient_legal='REDACTED') det_award_2 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='', awardee_or_recipient_legal='Name') #",
"number_of_errors(_FILE, database, models=[det_award, det_award_2, det_award_3, det_award_4, det_award_5]) assert errors == 0 def test_failure(database):",
"number_of_errors, query_columns _FILE = 'fabsreq9_detached_award_financial_assistance' def test_column_headers(database): expected_subset = {'row_number', 'awardee_or_recipient_legal', 'correction_delete_indicatr', 'uniqueid_AssistanceTransactionUniqueKey'}",
"tests.unit.dataactvalidator.utils import number_of_errors, query_columns _FILE = 'fabsreq9_detached_award_financial_assistance' def test_column_headers(database): expected_subset = {'row_number', 'awardee_or_recipient_legal',",
"'fabsreq9_detached_award_financial_assistance' def test_column_headers(database): expected_subset = {'row_number', 'awardee_or_recipient_legal', 'correction_delete_indicatr', 'uniqueid_AssistanceTransactionUniqueKey'} actual = set(query_columns(_FILE, database))",
"DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='C', awardee_or_recipient_legal='REDACTED') det_award_2 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='', awardee_or_recipient_legal='Name') # Test ignoring for D records det_award_3",
"= {'row_number', 'awardee_or_recipient_legal', 'correction_delete_indicatr', 'uniqueid_AssistanceTransactionUniqueKey'} actual = set(query_columns(_FILE, database)) assert expected_subset == actual",
"det_award_3 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='d', awardee_or_recipient_legal=None) det_award_4 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='D', awardee_or_recipient_legal='') det_award_5 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='d', awardee_or_recipient_legal='Name') errors",
"0 def test_failure(database): \"\"\" Test fail AwardeeOrRecipientLegalEntityName is required for all submissions except",
"from tests.unit.dataactvalidator.utils import number_of_errors, query_columns _FILE = 'fabsreq9_detached_award_financial_assistance' def test_column_headers(database): expected_subset = {'row_number',",
"det_award = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='C', awardee_or_recipient_legal='REDACTED') det_award_2 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='', awardee_or_recipient_legal='Name') # Test ignoring for D"
] |
[
"# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you",
"writing, software # distributed under the License is distributed on an \"AS IS\"",
"KIND, either express or implied. # See the License for the specific language",
"Unless required by applicable law or agreed to in writing, software # distributed",
"You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #",
"the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR",
"# See the License for the specific language governing permissions and # limitations",
"License. # You may obtain a copy of the License at # #",
"endpoint is working as expected. \"\"\" from tests import oracle_integration @oracle_integration def test_get_program_account_no_results(client):",
"to assure the ProgramAccount end-point. Test-Suite to ensure that the /programAccount endpoint is",
"# limitations under the License. \"\"\"Tests to assure the ProgramAccount end-point. Test-Suite to",
"law or agreed to in writing, software # distributed under the License is",
"as expected. \"\"\" from tests import oracle_integration @oracle_integration def test_get_program_account_no_results(client): \"\"\"Assert that the",
"the License for the specific language governing permissions and # limitations under the",
"program account info.\"\"\" rv = client.get('/api/v1/programAccount/FM0000001/BNTZLDLBBE3') assert 404 == rv.status_code assert None is",
"compliance with the License. # You may obtain a copy of the License",
"Province of British Columbia # # Licensed under the Apache License, Version 2.0",
"on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,",
"this file except in compliance with the License. # You may obtain a",
"the Apache License, Version 2.0 (the \"License\"); # you may not use this",
"British Columbia # # Licensed under the Apache License, Version 2.0 (the \"License\");",
"you may not use this file except in compliance with the License. #",
"of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable",
"account info.\"\"\" rv = client.get('/api/v1/programAccount/FM0000001/BNTZLDLBBE3') assert 404 == rv.status_code assert None is not",
"ANY KIND, either express or implied. # See the License for the specific",
"working as expected. \"\"\" from tests import oracle_integration @oracle_integration def test_get_program_account_no_results(client): \"\"\"Assert that",
"in compliance with the License. # You may obtain a copy of the",
"License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or",
"# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #",
"for the specific language governing permissions and # limitations under the License. \"\"\"Tests",
"use this file except in compliance with the License. # You may obtain",
"License. \"\"\"Tests to assure the ProgramAccount end-point. Test-Suite to ensure that the /programAccount",
"at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed",
"not use this file except in compliance with the License. # You may",
"WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See",
"See the License for the specific language governing permissions and # limitations under",
"that the program account info.\"\"\" rv = client.get('/api/v1/programAccount/FM0000001/BNTZLDLBBE3') assert 404 == rv.status_code assert",
"BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.",
"License, Version 2.0 (the \"License\"); # you may not use this file except",
"# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may",
"IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or",
"a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required",
"© 2022 Province of British Columbia # # Licensed under the Apache License,",
"distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY",
"the License. \"\"\"Tests to assure the ProgramAccount end-point. Test-Suite to ensure that the",
"# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in",
"import oracle_integration @oracle_integration def test_get_program_account_no_results(client): \"\"\"Assert that the program account info.\"\"\" rv =",
"end-point. Test-Suite to ensure that the /programAccount endpoint is working as expected. \"\"\"",
"OF ANY KIND, either express or implied. # See the License for the",
"the specific language governing permissions and # limitations under the License. \"\"\"Tests to",
"2.0 (the \"License\"); # you may not use this file except in compliance",
"/programAccount endpoint is working as expected. \"\"\" from tests import oracle_integration @oracle_integration def",
"of British Columbia # # Licensed under the Apache License, Version 2.0 (the",
"# you may not use this file except in compliance with the License.",
"governing permissions and # limitations under the License. \"\"\"Tests to assure the ProgramAccount",
"to ensure that the /programAccount endpoint is working as expected. \"\"\" from tests",
"agreed to in writing, software # distributed under the License is distributed on",
"WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the",
"that the /programAccount endpoint is working as expected. \"\"\" from tests import oracle_integration",
"(the \"License\"); # you may not use this file except in compliance with",
"tests import oracle_integration @oracle_integration def test_get_program_account_no_results(client): \"\"\"Assert that the program account info.\"\"\" rv",
"# # Unless required by applicable law or agreed to in writing, software",
"@oracle_integration def test_get_program_account_no_results(client): \"\"\"Assert that the program account info.\"\"\" rv = client.get('/api/v1/programAccount/FM0000001/BNTZLDLBBE3') assert",
"express or implied. # See the License for the specific language governing permissions",
"Version 2.0 (the \"License\"); # you may not use this file except in",
"# Unless required by applicable law or agreed to in writing, software #",
"except in compliance with the License. # You may obtain a copy of",
"by applicable law or agreed to in writing, software # distributed under the",
"permissions and # limitations under the License. \"\"\"Tests to assure the ProgramAccount end-point.",
"copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by",
"specific language governing permissions and # limitations under the License. \"\"\"Tests to assure",
"either express or implied. # See the License for the specific language governing",
"\"\"\"Assert that the program account info.\"\"\" rv = client.get('/api/v1/programAccount/FM0000001/BNTZLDLBBE3') assert 404 == rv.status_code",
"software # distributed under the License is distributed on an \"AS IS\" BASIS,",
"# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0",
"may not use this file except in compliance with the License. # You",
"License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS",
"the ProgramAccount end-point. Test-Suite to ensure that the /programAccount endpoint is working as",
"limitations under the License. \"\"\"Tests to assure the ProgramAccount end-point. Test-Suite to ensure",
"Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not",
"an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either",
"# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to",
"file except in compliance with the License. # You may obtain a copy",
"Columbia # # Licensed under the Apache License, Version 2.0 (the \"License\"); #",
"Test-Suite to ensure that the /programAccount endpoint is working as expected. \"\"\" from",
"expected. \"\"\" from tests import oracle_integration @oracle_integration def test_get_program_account_no_results(client): \"\"\"Assert that the program",
"under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES",
"# Copyright © 2022 Province of British Columbia # # Licensed under the",
"oracle_integration @oracle_integration def test_get_program_account_no_results(client): \"\"\"Assert that the program account info.\"\"\" rv = client.get('/api/v1/programAccount/FM0000001/BNTZLDLBBE3')",
"License for the specific language governing permissions and # limitations under the License.",
"the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law",
"ensure that the /programAccount endpoint is working as expected. \"\"\" from tests import",
"the program account info.\"\"\" rv = client.get('/api/v1/programAccount/FM0000001/BNTZLDLBBE3') assert 404 == rv.status_code assert None",
"ProgramAccount end-point. Test-Suite to ensure that the /programAccount endpoint is working as expected.",
"def test_get_program_account_no_results(client): \"\"\"Assert that the program account info.\"\"\" rv = client.get('/api/v1/programAccount/FM0000001/BNTZLDLBBE3') assert 404",
"the License. # You may obtain a copy of the License at #",
"info.\"\"\" rv = client.get('/api/v1/programAccount/FM0000001/BNTZLDLBBE3') assert 404 == rv.status_code assert None is not rv.json['message']",
"and # limitations under the License. \"\"\"Tests to assure the ProgramAccount end-point. Test-Suite",
"to in writing, software # distributed under the License is distributed on an",
"\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express",
"2022 Province of British Columbia # # Licensed under the Apache License, Version",
"Copyright © 2022 Province of British Columbia # # Licensed under the Apache",
"# distributed under the License is distributed on an \"AS IS\" BASIS, #",
"implied. # See the License for the specific language governing permissions and #",
"from tests import oracle_integration @oracle_integration def test_get_program_account_no_results(client): \"\"\"Assert that the program account info.\"\"\"",
"\"\"\"Tests to assure the ProgramAccount end-point. Test-Suite to ensure that the /programAccount endpoint",
"\"License\"); # you may not use this file except in compliance with the",
"assure the ProgramAccount end-point. Test-Suite to ensure that the /programAccount endpoint is working",
"is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF",
"obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless",
"language governing permissions and # limitations under the License. \"\"\"Tests to assure the",
"required by applicable law or agreed to in writing, software # distributed under",
"applicable law or agreed to in writing, software # distributed under the License",
"or agreed to in writing, software # distributed under the License is distributed",
"or implied. # See the License for the specific language governing permissions and",
"the /programAccount endpoint is working as expected. \"\"\" from tests import oracle_integration @oracle_integration",
"distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT",
"CONDITIONS OF ANY KIND, either express or implied. # See the License for",
"Apache License, Version 2.0 (the \"License\"); # you may not use this file",
"OR CONDITIONS OF ANY KIND, either express or implied. # See the License",
"may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #",
"under the License. \"\"\"Tests to assure the ProgramAccount end-point. Test-Suite to ensure that",
"with the License. # You may obtain a copy of the License at",
"test_get_program_account_no_results(client): \"\"\"Assert that the program account info.\"\"\" rv = client.get('/api/v1/programAccount/FM0000001/BNTZLDLBBE3') assert 404 ==",
"http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,",
"is working as expected. \"\"\" from tests import oracle_integration @oracle_integration def test_get_program_account_no_results(client): \"\"\"Assert",
"in writing, software # distributed under the License is distributed on an \"AS",
"\"\"\" from tests import oracle_integration @oracle_integration def test_get_program_account_no_results(client): \"\"\"Assert that the program account",
"under the Apache License, Version 2.0 (the \"License\"); # you may not use"
] |