content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
# Generated by Django 3.0.5 on 2020-04-16 08:23
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
15,
13,
20,
319,
12131,
12,
3023,
12,
1433,
8487,
25,
1954,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
# coding=utf-8
from OTLMOW.OTLModel.Datatypes.KeuzelijstField import KeuzelijstField
# Generated with OTLEnumerationCreator. To modify: extend, do not edit
class KlPLCModelnaam(KeuzelijstField):
"""De modelnaam van de PLC."""
naam = 'KlPLCModelnaam'
label = 'PLC model'
objectUri = 'https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#KlPLCModelnaam'
definition = 'De modelnaam van de PLC.'
codelist = 'https://wegenenverkeer.data.vlaanderen.be/id/conceptscheme/KlPLCModelnaam'
options = {
}
| [
2,
19617,
28,
40477,
12,
23,
198,
6738,
440,
14990,
44,
3913,
13,
2394,
43,
17633,
13,
27354,
265,
9497,
13,
8896,
10277,
417,
2926,
301,
15878,
1330,
3873,
10277,
417,
2926,
301,
15878,
628,
198,
2,
2980,
515,
351,
21676,
2538,
77,... | 2.366071 | 224 |
from multidict import MultiDict
| [
6738,
1963,
312,
713,
1330,
15237,
35,
713,
628,
628
] | 3.5 | 10 |
import json
import random
import datetime
import threading
import time
import traceback
from flask import redirect, render_template
from flask.ext.classy import FlaskView, route
import inflect
from app import app, db
from app.models import *
from app.forms import SimpleMturkForm
from mturk import grant_qualification
from mturk import manage_hits
import config
# inflect_eng = inflect.engine()
# _quiz_threshold = 0.8
@app.route('/')
@app.route('/index')
WorkerView.register(app)
AnnView.register(app)
TaskView.register(app)
def add_attribute_annotations(data, hit_id, job_id):
'''
Takes list of attribute annotations, format: [{ image_id: xxx, patch_id: xxx, label_id: xxx, value: t/f }, ... ]
Adds annotations to annotations table with reference to corresponding hit_id
'''
print 'adding annotation'
print 'adding annotation hit_id: '+str(hit_id)
# print data
print 'number of annotations from this hit (should be 200): %d' % len(data)
# todo: make this one insert statement
stmt = 'insert into annotation (value, patch_id, image_id, label_id, hit_id) values'
rows = []
for item in data:
# add annotation row
rows.append('(%r, %d, %d, %d, %d)' % (item["value"], item["patch_id"], item["image_id"], item["label_id"], hit_id))
hd = HitDetails.query.filter(HitDetails.image_id == item["image_id"]).filter(HitDetails.patch_id == item["patch_id"]).filter(HitDetails.label_id == item["label_id"]).filter(HitDetails.job_id == job_id).first()
if hd:
if hd.hits:
hd.hits = ', '.join([hd.hits, str(hit_id)])
else:
hd.hits = str(hit_id)
hd.num_hits = hd.num_hits + 1
stmt += ', '.join(rows)
db.engine.execute(stmt)
db.session.commit()
print '*** num active threads %s ***' % str(threading.active_count())
return True
| [
11748,
33918,
198,
11748,
4738,
198,
11748,
4818,
8079,
198,
11748,
4704,
278,
198,
11748,
640,
198,
11748,
12854,
1891,
198,
198,
6738,
42903,
1330,
18941,
11,
8543,
62,
28243,
198,
6738,
42903,
13,
2302,
13,
4871,
88,
1330,
46947,
768... | 2.537135 | 754 |
import os
import shutil
import unittest
from base64 import b64encode
from sonLib.bioio import TestStatus
from sonLib.bioio import getTempFile
from sonLib.bioio import getTempDirectory
from sonLib.bioio import system
from toil.job import Job
from toil.common import Toil
from cactus.shared.common import cactus_call, ChildTreeJob
if __name__ == '__main__':
unittest.main()
| [
11748,
28686,
198,
11748,
4423,
346,
198,
11748,
555,
715,
395,
198,
6738,
2779,
2414,
1330,
275,
2414,
268,
8189,
198,
198,
6738,
3367,
25835,
13,
65,
952,
952,
1330,
6208,
19580,
198,
6738,
3367,
25835,
13,
65,
952,
952,
1330,
651,
... | 3.073171 | 123 |
import factory
from tests.datasets import factories as datasets_factoryboy
from wazimap_ng.profile import models
from django.core.files.base import ContentFile
| [
11748,
8860,
198,
198,
6738,
5254,
13,
19608,
292,
1039,
1330,
17590,
355,
40522,
62,
69,
9548,
7081,
198,
6738,
266,
1031,
320,
499,
62,
782,
13,
13317,
1330,
4981,
198,
198,
6738,
42625,
14208,
13,
7295,
13,
16624,
13,
8692,
1330,
... | 3.469388 | 49 |
#!/usr/bin/python
import UniversalMolecularSystem as UMS
import sys
UMS.MainAsMol22XYZ(len(sys.argv),sys.argv)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
11748,
14499,
44,
2305,
10440,
11964,
355,
471,
5653,
198,
11748,
25064,
198,
198,
52,
5653,
13,
13383,
1722,
44,
349,
1828,
34278,
57,
7,
11925,
7,
17597,
13,
853,
85,
828,
17597,
13,
853,... | 2.434783 | 46 |
from sacred import Experiment
from sacred.utils import apply_backspaces_and_linefeeds
from experiments.utils import get_mongo_observer
from experiments.evaluation import import_weights_into_network
from xview.datasets import get_dataset
from xview.models import get_model
import numpy as np
from os import path, mkdir
from copy import deepcopy
ex = Experiment()
# reduce output of progress bars
ex.captured_out_filter = apply_backspaces_and_linefeeds
ex.observers.append(get_mongo_observer())
@ex.automain
| [
6738,
13626,
1330,
29544,
198,
6738,
13626,
13,
26791,
1330,
4174,
62,
1891,
2777,
2114,
62,
392,
62,
1370,
12363,
82,
198,
6738,
10256,
13,
26791,
1330,
651,
62,
76,
25162,
62,
672,
15388,
198,
6738,
10256,
13,
18206,
2288,
1330,
133... | 3.355263 | 152 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth import authenticate
from spackmon.apps.users.models import User
from getpass import getpass
class Command(BaseCommand):
"""add a user (typically to use the API) without any special permissions."""
help = "Get a user token to interact with the API"
| [
2,
15069,
2211,
12,
1238,
2481,
13914,
45036,
3549,
2351,
4765,
11,
11419,
290,
584,
198,
2,
1338,
441,
4935,
34152,
13,
4091,
262,
1353,
12,
5715,
27975,
38162,
9947,
2393,
329,
3307,
13,
198,
2,
198,
2,
30628,
55,
12,
34156,
12,
... | 3.605263 | 152 |
import itertools
f = open("day9.txt","r")
XMAS = f.readlines()
#print(getFirstWeakness(7, XMAS))
print(getFirstWeakness(25, XMAS))
#preamble is first 25
#test preamble is first 5
#part2
#number is 23278925
#print(contiguous_set(127, XMAS))
print(max(contiguous_set(23278925, XMAS)) + min(contiguous_set(23278925, XMAS))) | [
11748,
340,
861,
10141,
198,
69,
796,
1280,
7203,
820,
24,
13,
14116,
2430,
81,
4943,
198,
55,
31180,
796,
277,
13,
961,
6615,
3419,
198,
2,
4798,
7,
1136,
5962,
44898,
1108,
7,
22,
11,
1395,
31180,
4008,
198,
4798,
7,
1136,
5962,... | 2.48062 | 129 |
from wbb import app
from wbb import app2
from pyrogram import filters
import bs4
import aiohttp
import requests
_MODULE_ = "Apps"
_HELP_ = """To search an app on playstore"""
@app.on_message(filters.command("ply"))
| [
6738,
266,
11848,
1330,
598,
198,
6738,
266,
11848,
1330,
598,
17,
198,
6738,
12972,
39529,
1330,
16628,
198,
11748,
275,
82,
19,
198,
11748,
257,
952,
4023,
198,
11748,
7007,
198,
198,
62,
33365,
24212,
62,
796,
366,
48433,
1,
198,
... | 2.722892 | 83 |
import sys
from vacs.models import Command, Experiment, Vac, Evaluation, Assignment, Participant, Score, ValAssignment, Validation
from django.contrib.auth import get_user_model
import numpy as np
from scipy.misc import comb
import math
# Exclude ids that were not recoverable from the db
full_exclude_id = [2657, 2662, 2666, 2667, 2668, 2672, 2709, 2735, 2737, 2741, 2758, 2784, 2805, 2827, 2844, 2877, 2886, 2920, 2923, 2924, 2927, 2944, 2953, 2971, 3004, 3008, 3012, 3021]
exclude_id = [2657, 2662, 2666, 2668, 2709, 2735, 2737, 2741, 2805, 2827, 2844, 2877, 2886, 2923, 2971, 3012]
# Get all the Validation Assignments for the experiment
experiment_id = 77
all_val_assignments = ValAssignment.objects.filter(
user__participant__experiment__pk=experiment_id).exclude(id__in=exclude_id)
full_selections = [[0,0] for i in range(6)]
full_lax_selections = [[0,0] for i in range(6)]
final_step_selection = 0
final_lax_step_selection = 0
soft_final_step_selection = 0
hard_final_step_selection = 0
soft_final_lax_step_selection= 0
hard_final_lax_step_selection= 0
hard_judge_selections = [[0,0] for i in range(6)]
soft_judge_selections = [[0,0] for i in range(6)]
soft_lax_selections = [[0,0] for i in range(6)]
hard_lax_selections = [[0,0] for i in range(6)]
judge_dividing_factor = 1
avg_first_size =[]
avg_selection_step =[]
for assignment in all_val_assignments:
scores = assignment.evaluated_scores.all()
# Get all the validations for the scores
validations = Validation.objects.filter(score__in=scores)\
.order_by('last_updated').reverse()
all_lexicons = [map(int, validation.selected_lexicons[:-1].split('.')) for validation in validations]
all_lexicons.sort(key=len,reverse=True)
flat_lexicons = [item for sublist in all_lexicons for item in sublist]
# Get all the validations for the scores for the lax step
all_lax_lexicons = [map(int, validation.selected_lexicons[:-1].split('.'))+[validation.pk] for validation in validations]
all_lax_lexicons.sort(key=len,reverse=True)
####################################################
################## WITH HARD STEPS #################
####################################################
# Get the avg selection step
avg_selection_step.append(len(all_lexicons))
# Get the avg number of elements in the first selection
if len(all_lexicons) > 1:
avg_first_size.append(len(all_lexicons[0]))
#####################################
###### FOR ALL THE JUDGES ###########
#####################################
# Get all the selections per step
lexicon_index = 0
for lexicon in all_lexicons:
if assignment.lexicon_number in lexicon:
full_selections[lexicon_index][0] += 1
full_selections[lexicon_index][1] += 1
lexicon_index += 1
# Get selected in the last step
if assignment.lexicon_number in all_lexicons[-1]:
final_step_selection += 1
#####################################
###### DIVIDED BY JUDGE CRITERIA ####
#####################################
# Soft judges
if len(all_lexicons) > judge_dividing_factor:
# Get all the selections per step
lexicon_index = 0
for lexicon in all_lexicons:
if assignment.lexicon_number in lexicon:
soft_judge_selections[lexicon_index][0] += 1
soft_judge_selections[lexicon_index][1] += 1
lexicon_index += 1
# Get selected in the last step
if assignment.lexicon_number in all_lexicons[-1]:
soft_final_step_selection += 1
# harsh judges
else:
# Get all the selections per step
lexicon_index = 0
for lexicon in all_lexicons:
if assignment.lexicon_number in lexicon:
hard_judge_selections[lexicon_index][0] += 1
hard_judge_selections[lexicon_index][1] += 1
lexicon_index += 1
# Get selected in the last step
if assignment.lexicon_number in all_lexicons[-1]:
hard_final_step_selection += 1
####################################################
################## WITH LAX STEPS ##################
####################################################
lax_step = 3
#####################################
###### FOR ALL THE JUDGES ###########
#####################################
# Get all the selections per step
lexicon_index = 0
for lexicon in all_lax_lexicons:
# Get the next 3 closest to the assigned
val = Validation.objects.get(pk=lexicon[-1])
val_score = val.score
all_lexicon_scores = Score.objects.filter(
experiment=val_score.experiment,
vac=val_score.vac,
command=val_score.command)
for s in all_lexicon_scores:
s.diff_score = abs(s.score-val_score.score)
sorted_scores = sorted(list(all_lexicon_scores), key=lambda s:s.diff_score)
lax_set = set([s.lexicon_number for s in sorted_scores[:lax_step+1]])
if bool(lax_set.intersection(set(lexicon[:-1]))):
full_lax_selections[lexicon_index][0] += 1
full_lax_selections[lexicon_index][1] += 1
lexicon_index += 1
# Get selected in the last step
# Get the next 3 closest to the assigned
val = Validation.objects.get(pk=all_lax_lexicons[-1][-1])
val_score = val.score
all_lexicon_scores = Score.objects.filter(
experiment=val_score.experiment,
vac=val_score.vac,
command=val_score.command)
for s in all_lexicon_scores:
s.diff_score = abs(s.score-val_score.score)
sorted_scores = sorted(list(all_lexicon_scores), key=lambda s:s.diff_score)
lax_set = set([s.lexicon_number for s in sorted_scores[:lax_step+1]])
if bool(lax_set.intersection(set(all_lax_lexicons[-1][:-1]))):
final_lax_step_selection += 1
#####################################
###### DIVIDED BY JUDGE CRITERIA ####
#####################################
# Soft judges
if len(all_lexicons) > judge_dividing_factor:
# Get all the selections per step
lexicon_index = 0
for lexicon in all_lax_lexicons:
# Get the next 3 closest to the assigned
val = Validation.objects.get(pk=lexicon[-1])
val_score = val.score
all_lexicon_scores = Score.objects.filter(
experiment=val_score.experiment,
vac=val_score.vac,
command=val_score.command)
for s in all_lexicon_scores:
s.diff_score = abs(s.score-val_score.score)
sorted_scores = sorted(list(all_lexicon_scores), key=lambda s:s.diff_score)
lax_set = set([s.lexicon_number for s in sorted_scores[:lax_step+1]])
if bool(lax_set.intersection(set(lexicon[:-1]))):
soft_lax_selections[lexicon_index][0] += 1
soft_lax_selections[lexicon_index][1] += 1
lexicon_index += 1
# Get selected in the last step
# Get the next 3 closest to the assigned
val = Validation.objects.get(pk=all_lax_lexicons[-1][-1])
val_score = val.score
all_lexicon_scores = Score.objects.filter(
experiment=val_score.experiment,
vac=val_score.vac,
command=val_score.command)
for s in all_lexicon_scores:
s.diff_score = abs(s.score-val_score.score)
sorted_scores = sorted(list(all_lexicon_scores), key=lambda s:s.diff_score)
lax_set = set([s.lexicon_number for s in sorted_scores[:lax_step+1]])
if bool(lax_set.intersection(set(all_lax_lexicons[-1][:-1]))):
soft_final_lax_step_selection += 1
# harsh judges
else:
# Get all the selections per step
# Get all the selections per step
lexicon_index = 0
for lexicon in all_lax_lexicons:
# Get the next 3 closest to the assigned
val = Validation.objects.get(pk=lexicon[-1])
val_score = val.score
all_lexicon_scores = Score.objects.filter(
experiment=val_score.experiment,
vac=val_score.vac,
command=val_score.command)
for s in all_lexicon_scores:
s.diff_score = abs(s.score-val_score.score)
sorted_scores = sorted(list(all_lexicon_scores), key=lambda s:s.diff_score)
lax_set = set([s.lexicon_number for s in sorted_scores[:lax_step+1]])
if bool(lax_set.intersection(set(lexicon[:-1]))):
hard_lax_selections[lexicon_index][0] += 1
hard_lax_selections[lexicon_index][1] += 1
lexicon_index += 1
# Get selected in the last step
# Get the next 3 closest to the assigned
val = Validation.objects.get(pk=all_lax_lexicons[-1][-1])
val_score = val.score
all_lexicon_scores = Score.objects.filter(
experiment=val_score.experiment,
vac=val_score.vac,
command=val_score.command)
for s in all_lexicon_scores:
s.diff_score = abs(s.score-val_score.score)
sorted_scores = sorted(list(all_lexicon_scores), key=lambda s:s.diff_score)
lax_set = set([s.lexicon_number for s in sorted_scores[:lax_step+1]])
if bool(lax_set.intersection(set(all_lax_lexicons[-1][:-1]))):
hard_final_lax_step_selection += 1
avg_first_size = round(np.mean(avg_first_size),2)
print "Total Selections by step:"
print full_selections
print "Total Selections in the last step:"
print final_step_selection
print "Avg First Step Selection size (if there is more than one step)"
print avg_first_size
print "random chance of choosing the right value on the second step"
print round(comb(8,math.ceil(avg_first_size-1.))/comb(9,math.ceil(avg_first_size))*1.0/math.ceil(avg_first_size),2)
print "Avg selection step"
print round(np.mean(avg_selection_step),2)
print "####################################"
print "Total Soft Selections by step:"
print soft_judge_selections
print "Total Soft Selections in the last step:"
print soft_final_step_selection
print "Total hard Selections by step:"
print hard_judge_selections
print "Total hard Selections in the last step:"
print hard_final_step_selection
print "####################################"
print "Total Lax Selections by step:"
print full_lax_selections
print "Total Selections in the last step:"
print final_lax_step_selection
print "####################################"
print "Total Soft judge lax Selections by step:"
print soft_lax_selections
print "Total Soft judge lax Selections in the last step:"
print soft_final_lax_step_selection
print "Total hard judge lax Selections by step:"
print hard_lax_selections
print "Total hard judge lax Selections in the last step:"
print hard_final_lax_step_selection
| [
11748,
25064,
198,
6738,
6658,
82,
13,
27530,
1330,
9455,
11,
29544,
11,
25709,
11,
34959,
11,
50144,
11,
29880,
11,
15178,
11,
3254,
8021,
16747,
11,
3254,
24765,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
1330,
651,
62,
7220... | 2.416593 | 4,532 |
__version__ = 0.805 | [
834,
9641,
834,
796,
657,
13,
28256
] | 2.714286 | 7 |
from ConfigParser import SafeConfigParser
from datetime import datetime, timedelta
import HTMLParser
import logging, logging.config, re, sys, os
from time import time
from dateutil import parser, rrule, tz
import praw
from requests.exceptions import HTTPError
from sqlalchemy import create_engine
from sqlalchemy import Boolean, Column, DateTime, String, Text
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm.exc import NoResultFound
import yaml
# global reddit session
r = None
cfg_file = SafeConfigParser()
path_to_cfg = os.path.abspath(os.path.dirname(sys.argv[0]))
path_to_cfg = os.path.join(path_to_cfg, 'schedulebot.cfg')
cfg_file.read(path_to_cfg)
if cfg_file.get('database', 'system').lower() == 'sqlite':
engine = create_engine(
cfg_file.get('database', 'system')+':///'+\
cfg_file.get('database', 'database'))
else:
engine = create_engine(
cfg_file.get('database', 'system')+'://'+\
cfg_file.get('database', 'username')+':'+\
cfg_file.get('database', 'password')+'@'+\
cfg_file.get('database', 'host')+'/'+\
cfg_file.get('database', 'database'))
Base = declarative_base()
Session = sessionmaker(bind=engine, expire_on_commit=False)
session = Session()
class Subreddit(Base):
"""Table containing the subreddits for the bot to monitor.
name - The subreddit's name. "gaming", not "/r/gaming".
enabled - Subreddit schedule will not be executed if False
schedule_yaml - YAML definition of the subreddit's schedule
updated - Time that the subreddit was last updated (UTC)
"""
__tablename__ = 'schedule'
name = Column(String(100), nullable=False, primary_key=True)
enabled = Column(Boolean, nullable=False, default=True)
schedule_yaml = Column(Text)
updated = Column(DateTime, nullable=False)
def update_from_wiki(subreddit, requester):
"""Updates events from the subreddit's wiki."""
global r
username = cfg_file.get('reddit', 'username')
try:
page = subreddit.get_wiki_page(cfg_file.get('reddit', 'wiki_page_name'))
except Exception:
send_error_message(requester, subreddit.display_name,
'The wiki page could not be accessed. Please ensure the page '
'http://www.reddit.com/r/{0}/wiki/{1} exists and that {2} '
'has the "wiki" mod permission to be able to access it.'
.format(subreddit.display_name,
cfg_file.get('reddit', 'wiki_page_name'),
username))
return False
html_parser = HTMLParser.HTMLParser()
page_content = html_parser.unescape(page.content_md)
# check that all the events are valid yaml
event_defs = yaml.safe_load_all(page_content)
event_num = 1
try:
for event_def in event_defs:
event_num += 1
except Exception as e:
indented = ''
for line in str(e).split('\n'):
indented += ' {0}\n'.format(line)
send_error_message(requester, subreddit.display_name,
'Error when reading schedule from wiki - '
'Syntax invalid in section #{0}:\n\n{1}'
.format(event_num, indented))
return False
# reload and actually process the events
event_defs = yaml.safe_load_all(page_content)
event_num = 1
kept_sections = []
for event_def in event_defs:
# ignore any non-dict sections (can be used as comments, etc.)
if not isinstance(event_def, dict):
continue
event_def = lowercase_keys_recursively(event_def)
try:
check_event_valid(event_def)
event = ScheduledEvent(event_def)
except ValueError as e:
send_error_message(requester, subreddit.display_name,
'Invalid event in section #{0} - {1}'
.format(event_num, e))
return False
event_num += 1
kept_sections.append(event_def)
# Update the subreddit, or add it if necessary
try:
db_subreddit = (session.query(Subreddit)
.filter(Subreddit.name == subreddit.display_name.lower())
.one())
except NoResultFound:
db_subreddit = Subreddit()
db_subreddit.name = subreddit.display_name.lower()
session.add(db_subreddit)
db_subreddit.updated = datetime.utcnow()
db_subreddit.schedule_yaml = page_content
session.commit()
r.send_message(requester,
'{0} schedule updated'.format(username),
"{0}'s schedule was successfully updated for /r/{1}"
.format(username, subreddit.display_name))
return True
def lowercase_keys_recursively(subject):
"""Recursively lowercases all keys in a dict."""
lowercased = dict()
for key, val in subject.iteritems():
if isinstance(val, dict):
val = lowercase_keys_recursively(val)
lowercased[key.lower()] = val
return lowercased
def check_event_valid(event):
"""Checks if an event defined on a wiki page is valid."""
validate_keys(event)
validate_values_not_empty(event)
validate_type(event, 'first', basestring)
validate_type(event, 'repeat', basestring)
validate_type(event, 'rrule', basestring)
validate_type(event, 'title', basestring)
validate_type(event, 'text', basestring)
validate_type(event, 'distinguish', bool)
validate_type(event, 'sticky', bool)
validate_type(event, 'contest_mode', bool)
validate_type(event, 'link_flair_text', basestring)
validate_type(event, 'link_flair_class', basestring)
validate_regex(event, 'repeat', ScheduledEvent.repeat_regex)
def validate_values_not_empty(check):
"""Checks (recursively) that no values in the dict are empty."""
for key, val in check.iteritems():
if isinstance(val, dict):
validate_values_not_empty(val)
elif (val is None or
(isinstance(val, (basestring, list)) and len(val) == 0)):
raise ValueError('`{0}` set to an empty value'.format(key))
def validate_keys(check):
"""Checks if all the keys in the event are valid."""
valid_keys = set(['first', 'rrule', 'title', 'text'])
valid_keys |= set(ScheduledEvent._defaults.keys())
for key in check:
if key not in valid_keys:
raise ValueError('Invalid variable: `{0}`'.format(key))
# make sure that all of the required keys are being set
if ('first' not in check or
'title' not in check or
'text' not in check):
raise ValueError('All the required variables were not set.')
def validate_type(check, key, req_type):
"""Validates that a dict value is of the correct type."""
if key not in check:
return
if req_type == int:
try:
int(str(check[key]))
except ValueError:
raise ValueError('{0} must be an integer'.format(key))
else:
if not isinstance(check[key], req_type):
raise ValueError('{0} must be {1}'.format(key, req_type))
def validate_regex(check, key, pattern):
"""Validates that a dict value matches a regex."""
if key not in check:
return
if not re.match(pattern, check[key]):
raise ValueError('Invalid {0}: {1}'.format(key, check[key]))
def send_error_message(user, sr_name, error):
"""Sends an error message to the user if a wiki update failed."""
global r
r.send_message(user,
'Error updating from wiki in /r/{0}'.format(sr_name),
'### Error updating from [wiki configuration in /r/{0}]'
'(http://www.reddit.com/r/{0}/wiki/{1}):\n\n---\n\n{2}'
.format(sr_name,
cfg_file.get('reddit', 'wiki_page_name'),
error))
def process_messages():
"""Processes the bot's messages looking for invites/commands."""
global r
stop_time = int(cfg_file.get('reddit', 'last_message'))
owner_username = cfg_file.get('reddit', 'owner_username')
new_last_message = None
update_srs = set()
invite_srs = set()
logging.debug('Checking messages')
try:
for message in r.get_inbox():
if int(message.created_utc) <= stop_time:
break
if message.was_comment:
continue
if not new_last_message:
new_last_message = int(message.created_utc)
if message.body.strip().lower() == 'schedule':
# handle if they put in something like '/r/' in the subject
if '/' in message.subject:
sr_name = message.subject[message.subject.rindex('/')+1:]
else:
sr_name = message.subject
if (sr_name.lower(), message.author.name) in update_srs:
continue
try:
subreddit = r.get_subreddit(sr_name)
if (message.author.name == owner_username or
message.author in subreddit.get_moderators()):
update_srs.add((sr_name.lower(), message.author.name))
else:
send_error_message(message.author, sr_name,
'You do not moderate /r/{0}'.format(sr_name))
except HTTPError as e:
send_error_message(message.author, sr_name,
'Unable to access /r/{0}'.format(sr_name))
# do requested updates from wiki pages
updated_srs = []
for subreddit, sender in update_srs:
if update_from_wiki(r.get_subreddit(subreddit),
r.get_redditor(sender)):
updated_srs.append(subreddit)
logging.info('Updated from wiki in /r/{0}'.format(subreddit))
else:
logging.info('Error updating from wiki in /r/{0}'
.format(subreddit))
except Exception as e:
logging.error('ERROR: {0}'.format(e))
raise
finally:
# update cfg with new last_message value
if new_last_message:
cfg_file.set('reddit', 'last_message', str(new_last_message))
cfg_file.write(open(path_to_cfg, 'w'))
if __name__ == '__main__':
main()
| [
6738,
17056,
46677,
1330,
19978,
16934,
46677,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
28805,
12514,
198,
11748,
11532,
46677,
198,
11748,
18931,
11,
18931,
13,
11250,
11,
302,
11,
25064,
11,
28686,
198,
6738,
640,
1330,
640,
198,
... | 2.285558 | 4,563 |
import tensorflow as tf
import numpy as np
import os
import glob
def tf_cov(x):
"""Calculate covariance for x.
Equivalent to np.cov(x.T)
"""
mean_x = tf.reduce_mean(x, axis=0, keepdims=True)
mx = tf.matmul(tf.transpose(mean_x), mean_x)
vx = tf.matmul(tf.transpose(x), x)/tf.cast(tf.shape(x)[0], tf.float32)
cov_xx = vx - mx
return cov_xx
def tf_corrcoef(x):
"""Calculate correlation matrix for x.
Equivalent to np.corrcoef(x.T)
"""
mean, variance = tf.nn.moments(x, [0])
x /= tf.sqrt(variance)
mean_x = tf.reduce_mean(x, axis=0, keepdims=True)
mx = tf.matmul(tf.transpose(mean_x), mean_x)
vx = tf.matmul(tf.transpose(x), x)/tf.cast(tf.shape(x)[0], tf.float32)
corr_xx = vx - mx
return corr_xx
def corr_loss(y_latent_vars):
"""Loss term correlation.
"""
corr = tf_corrcoef(y_latent_vars)
return tf.reduce_sum(tf.abs(corr))
def emd_loss(y_true, y_pred, reduction_axis=None, num_bins=327, **kwargs):
"""Earth Mover Distance between two waveforms
Parameters
----------
y_true : tf.Tensor
A tensorflow tensor defining the true waveform
shape: [batch_size, num_bins]
y_pred : tf.Tensor
A tensorflow tensor defining the true waveform
shape: [batch_size, num_bins]
Returns
-------
tf.tensor
EMD between two waveforms.
Shape: []
"""
y_pred = tf.reshape(y_pred, [-1, num_bins])
y_true = tf.reshape(y_true, [-1, num_bins])
# set first element to 0
emd_list = [tf.zeros_like(y_true[..., 0])]
# walk through 1D histogram
for i in range(num_bins):
P_i = y_true[..., i]
Q_i = y_pred[..., i]
emd_list.append(P_i + emd_list[-1] - Q_i)
# calculate sum
emd_list = tf.stack(emd_list, axis=-1)
emd = tf.reduce_sum(tf.abs(emd_list), axis=reduction_axis)
return emd
def np_emd_loss(y_true, y_pred, reduction_axis=None, num_bins=327, **kwargs):
"""Earth Mover Distance between two waveforms
Parameters
----------
y_true : np.ndarray
A tensorflow tensor defining the true waveform
shape: [batch_size, num_bins]
y_pred : np.ndarray
A tensorflow tensor defining the true waveform
shape: [batch_size, num_bins]
Returns
-------
np.ndarray
EMD between two waveforms.
Shape: []
"""
y_pred = np.reshape(y_pred, [-1, num_bins])
y_true = np.reshape(y_true, [-1, num_bins])
# set first element to 0
emd_list = [np.zeros_like(y_true[..., 0])]
# walk through 1D histogram
for i in range(num_bins):
P_i = y_true[..., i]
Q_i = y_pred[..., i]
emd_list.append(P_i + emd_list[-1] - Q_i)
# calculate sum
emd_list = np.stack(emd_list, axis=-1)
emd = np.sum(np.abs(emd_list), axis=reduction_axis)
return emd
| [
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28686,
198,
11748,
15095,
628,
198,
4299,
48700,
62,
66,
709,
7,
87,
2599,
198,
220,
220,
220,
37227,
9771,
3129,
378,
44829,
590,
329,
2124,
13,
62... | 2.113152 | 1,361 |
from selenium import webdriver
from selenium.webdriver.support.ui import Select
from time import sleep
pathWebdriver = 'C:\\Program Files (x86)\chromedriver.exe'
driver = webdriver.Chrome(pathWebdriver)
xubioLogin = 'https://xubio.com/NXV/newLogin'
xubioMayores = 'https://xubio.com/NXV/contabilidad/libro-mayor'
userName = 'administracion@argenbio.org'
userPass = '*#Xubio1977#*'
# Ids de la página de logueo de xubio.com
htmlIdUserName = 'userName'
htmlIdUserPass = 'password'
htmlIdIngresarButton = 'loginbuton'
# Carga de la página xubio.com y toma del control por parte del webdriver
driver.get(xubioLogin)
# Ubicación de los elementos por ids
windowUser = driver.find_element_by_id(htmlIdUserName)
windowPass = driver.find_element_by_id(htmlIdUserPass)
ingresarButton = driver.find_element_by_id(htmlIdIngresarButton)
windowUser.send_keys(userName)
windowPass.send_keys(userPass)
ingresarButton.click()
# Carga de la página https://xubio.com/NXV/contabilidad/libro-mayor y toma del control por parte del webdriver
driver.get(xubioMayores)
# Xpaths de la página de mayores de xubio.com
fechaDia = driver.find_elements_by_name('day').text
fechaMes = driver.find_elements_by_name('month').text
fechaAño = driver.find_elements_by_name('year').text
cuentaContable = driver.find_element_by_xpath('/html/body/div[8]/table/tbody/tr[1]/td/div/table/tbody/tr[6]/td[1]/input')
print(fechaDia)
print(fechaMes)
print(fechaAño)
driver.quit() | [
6738,
384,
11925,
1505,
1330,
3992,
26230,
198,
6738,
384,
11925,
1505,
13,
12384,
26230,
13,
11284,
13,
9019,
1330,
9683,
198,
6738,
640,
1330,
3993,
198,
198,
6978,
13908,
26230,
796,
705,
34,
25,
6852,
15167,
13283,
357,
87,
4521,
... | 2.52007 | 573 |
from .wms import WMS
from .session import session
wms = wms.WMS()
# Create a session object accessible via wms.session
wms.session = session(WMS)
| [
6738,
764,
86,
907,
1330,
370,
5653,
198,
6738,
764,
29891,
1330,
6246,
198,
198,
86,
907,
796,
266,
907,
13,
54,
5653,
3419,
198,
2,
13610,
257,
6246,
2134,
9857,
2884,
266,
907,
13,
29891,
198,
86,
907,
13,
29891,
796,
6246,
7,
... | 3.0625 | 48 |
from django.forms.models import BaseModelFormSet
from django.forms.formsets import BaseFormSet
from django.forms.util import ErrorDict
# add some helpful methods to the formset
# add the FormSetMixin to the base FormSet classes
| [
6738,
42625,
14208,
13,
23914,
13,
27530,
1330,
7308,
17633,
8479,
7248,
198,
6738,
42625,
14208,
13,
23914,
13,
23914,
1039,
1330,
7308,
8479,
7248,
198,
6738,
42625,
14208,
13,
23914,
13,
22602,
1330,
13047,
35,
713,
628,
198,
2,
751,... | 3.640625 | 64 |
#!python
# file listtree.py
class ListTree:
'''
Mix-in that returns an __str__ trace of the entire class tree and all
its objects' attrs at and above self; run by print(), str() returns
constructed string; uses __X attr names to avoid impacting clients;
recurses to superclasses explicitly, uses str.format() to clarity
'''
if __name__ == '__main__':
import testmixin
testmixin.tester(ListTree)
| [
2,
0,
29412,
198,
2,
2393,
1351,
21048,
13,
9078,
198,
198,
4871,
7343,
27660,
25,
198,
220,
220,
220,
705,
7061,
198,
220,
220,
220,
15561,
12,
259,
326,
5860,
281,
11593,
2536,
834,
12854,
286,
262,
2104,
1398,
5509,
290,
477,
1... | 2.972603 | 146 |
import pymysql.cursors
| [
11748,
279,
4948,
893,
13976,
13,
66,
1834,
669,
628
] | 2.4 | 10 |
import unittest
import os
import pytest
import distutils.spawn
DLPOLY_FOUND = distutils.spawn.find_executable('DLPOLY.Z')
# needsDLPOLY = unittest.skipIf(not DLPOLY_FOUND, "DLPOLY not available")
needsDLPOLY = pytest.mark.skipif(not DLPOLY_FOUND, reason="DLPOLY not available")
| [
11748,
555,
715,
395,
198,
11748,
28686,
198,
11748,
12972,
9288,
198,
198,
11748,
1233,
26791,
13,
48183,
198,
19260,
45472,
56,
62,
37,
15919,
796,
1233,
26791,
13,
48183,
13,
19796,
62,
18558,
18187,
10786,
19260,
45472,
56,
13,
57,
... | 2.67619 | 105 |
c = str(input('Digite o nome da sua cidade: ')).lower().strip()
#'c' recebe o nome da cidade que são colocadas em letras minúsculas e logo depois é retirado os espaços desnecessários no começo e no fim da cadeia de caracteres
print(f'Sua cidade tem "Santo" no nome? {"santo" in c}')
#O operador 'in' verifica se tem "santo" em 'c'
#ou->
print(f'Sua cidade tem "Santo" no nome? {c[:5] == "santo"}')
#Utilizamos o index para localizar o "santo" na cadeia de caracteres
| [
66,
796,
965,
7,
15414,
10786,
19511,
578,
267,
299,
462,
12379,
424,
64,
269,
312,
671,
25,
705,
29720,
21037,
22446,
36311,
3419,
198,
2,
6,
66,
6,
1407,
1350,
267,
299,
462,
12379,
269,
312,
671,
8358,
264,
28749,
951,
420,
387... | 2.35 | 200 |
"""
Definition for singly-linked list with a random pointer.
class RandomListNode:
def __init__(self, x):
self.label = x
self.next = None
self.random = None
"""
# @param head: A RandomListNode
# @return: A RandomListNode
| [
37811,
198,
36621,
329,
1702,
306,
12,
25614,
1351,
351,
257,
4738,
17562,
13,
198,
4871,
14534,
8053,
19667,
25,
198,
220,
220,
220,
825,
11593,
15003,
834,
7,
944,
11,
2124,
2599,
198,
220,
220,
220,
220,
220,
220,
220,
2116,
13,
... | 2.59 | 100 |
from fastapi import FastAPI
import random
app = FastAPI()
@app.get("/api")
@app.get("/api/{name}")
@app.get("/piada") | [
6738,
3049,
15042,
1330,
12549,
17614,
198,
11748,
4738,
198,
198,
1324,
796,
12549,
17614,
3419,
198,
198,
31,
1324,
13,
1136,
7203,
14,
15042,
4943,
628,
198,
31,
1324,
13,
1136,
7203,
14,
15042,
14,
90,
3672,
92,
4943,
628,
198,
... | 2.411765 | 51 |
# For spymaster AI, doesn't need to hide or shuffle words, just define red/blue/neutral/assassin at the start, and put team color in explicitly
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.neighbors import NearestNeighbors
| [
2,
1114,
599,
4948,
1603,
9552,
11,
1595,
470,
761,
284,
7808,
393,
36273,
2456,
11,
655,
8160,
2266,
14,
17585,
14,
29797,
14,
562,
44961,
379,
262,
923,
11,
290,
1234,
1074,
3124,
287,
11777,
198,
11748,
299,
32152,
355,
45941,
19... | 3.628571 | 70 |
# Generated by Django 2.2.11 on 2021-04-18 21:19
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
362,
13,
17,
13,
1157,
319,
33448,
12,
3023,
12,
1507,
2310,
25,
1129,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628
] | 2.8 | 30 |
from qtpy import QtCore, QtGui
| [
6738,
10662,
83,
9078,
1330,
33734,
14055,
11,
33734,
8205,
72,
628
] | 2.666667 | 12 |
from . import mlp
#from . import lenet
from . import resnet
#from . import senet
| [
6738,
764,
1330,
25962,
79,
198,
2,
6738,
764,
1330,
18896,
316,
198,
6738,
764,
1330,
581,
3262,
198,
2,
6738,
764,
1330,
3308,
316,
198
] | 3.115385 | 26 |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from google.ads.google_ads.v5.proto.resources import feed_mapping_pb2 as google_dot_ads_dot_googleads__v5_dot_proto_dot_resources_dot_feed__mapping__pb2
from google.ads.google_ads.v5.proto.services import feed_mapping_service_pb2 as google_dot_ads_dot_googleads__v5_dot_proto_dot_services_dot_feed__mapping__service__pb2
class FeedMappingServiceStub(object):
"""Proto file describing the FeedMapping service.
Service to manage feed mappings.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetFeedMapping = channel.unary_unary(
'/google.ads.googleads.v5.services.FeedMappingService/GetFeedMapping',
request_serializer=google_dot_ads_dot_googleads__v5_dot_proto_dot_services_dot_feed__mapping__service__pb2.GetFeedMappingRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v5_dot_proto_dot_resources_dot_feed__mapping__pb2.FeedMapping.FromString,
)
self.MutateFeedMappings = channel.unary_unary(
'/google.ads.googleads.v5.services.FeedMappingService/MutateFeedMappings',
request_serializer=google_dot_ads_dot_googleads__v5_dot_proto_dot_services_dot_feed__mapping__service__pb2.MutateFeedMappingsRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v5_dot_proto_dot_services_dot_feed__mapping__service__pb2.MutateFeedMappingsResponse.FromString,
)
class FeedMappingServiceServicer(object):
"""Proto file describing the FeedMapping service.
Service to manage feed mappings.
"""
def GetFeedMapping(self, request, context):
"""Returns the requested feed mapping in full detail.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MutateFeedMappings(self, request, context):
"""Creates or removes feed mappings. Operation statuses are
returned.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
# This class is part of an EXPERIMENTAL API.
class FeedMappingService(object):
"""Proto file describing the FeedMapping service.
Service to manage feed mappings.
"""
@staticmethod
@staticmethod
| [
2,
2980,
515,
416,
262,
308,
49,
5662,
11361,
8435,
17050,
13877,
13,
8410,
5626,
48483,
0,
198,
37811,
11792,
290,
4382,
6097,
11188,
284,
1237,
672,
3046,
12,
23211,
2594,
526,
15931,
198,
11748,
1036,
14751,
198,
198,
6738,
23645,
... | 2.595745 | 1,034 |
import numpy as np
from collections import Counter
from Tree import DecisionTree | [
11748,
299,
32152,
355,
45941,
198,
6738,
17268,
1330,
15034,
198,
6738,
12200,
1330,
26423,
27660
] | 5 | 16 |
"""
FCS file reader supporting file format spec 3.0, 3.1.
Data extraction currently supports:
$MODE: (L) List
$DATATYPE: I,F,D
FCS3.0 http://murphylab.web.cmu.edu/FCSAPI/FCS3.html
FCS3.1 https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2892967/
A data set is a (HEADER, TEXT, DATA) group.
Multiple data sets in one file is deprecated.
A keyword is the label of a data field. A keyword-value pair is the label of the
data field with its associated value. Keywords are unique in data sets,
i.e., there are no multiple instances of the same keyword in the data set.
--> keywords == params and are contained with FCSFile.text
Required FCS primary TEXT segment keywords:
$BEGINANALYSIS $BEGINDATA $BEGINSTEXT $BYTEORD $DATATYPE $ENDANALYSIS
$ENDDATA $ENDSTEXT $MODE $NEXTDATA $PAR $TOT $PnB $PnE $PnN $PnR
"""
from itertools import chain
import os
import re
import struct
from xfcs.FCSFile.DataSection import DataSection
from xfcs.FCSFile.Metadata import Metadata
from xfcs.FCSFile import validate
# ------------------------------------------------------------------------------
def filter_numeric(s):
"""If the given string is numeric, return a numeric value for it"""
if s.isnumeric():
return int(s)
else:
try:
fval = float(s)
return fval
except ValueError:
return s
def filter_ascii32(hex_str):
"""If hex string is repetition of '20', return 0 else convert to int"""
hex_char_set = set(hex_str[i*2:i*2+2] for i in range(len(hex_str)//2))
twozero = set(['20'])
if hex_char_set == twozero:
return 0
else:
return int(hex_str, 16)
def channel_name_keywords(meta_keys):
"""Finds any channel name keyword in the form: $PxN.
Yields:
keyword
"""
spxn = re.compile(r'^\$P\d+N$', re.IGNORECASE)
for key in meta_keys:
if spxn.match(key):
yield key
# ------------------------------------------------------------------------------
class FCSFile(object):
"""Instantiates an FCSFile object.
Public Attributes:
version: version ID for FCS file.
name: filename of fcs file.
parentdir: directory containing fcs file.
text: dict containing all Parameter metadata key : value
param_keys: iterable of Parameter keys in order of location in fcs text section
data: Data class instance to access extracted data sets.
Public Methods:
load: Load an FCS file for reading and confirm version id is supported.
load_data: Load Data Section for reading
load_from_csv: Init FCSFile object from csv containing Parameter key, value pairs.
check_file_format: Confirms metadata format.
load_file_spec: Loads all header, text contents into namedtuple.
Confirms if file is supported for data extraction.
has_param: Confirm Parameter key in text section.
param: Retrieve value for given Parameter key.
numeric_param: Force retrieve numeric value for given Parameter key or 0.
set_param: Sets value for given Parameter key within fcs.text.
meta_hash: Generates unique fingerprint based on current Parameter key, value pairs.
NOTE: this does not provide a hash value for the actual file.
"""
def __init__(self, quiet=False):
"""Initialize an FCSFile object.
Attributes:
version: version ID for FCS file.
name: filename of fcs file.
parentdir: directory containing fcs file.
text: dict of text section metadata Parameter key, value pairs.
param_keys: iterable of Parameter keys in order of location in fcs
text section.
spec: namedtuple instance containing all necessary header, text values
to extract and scale parameter data.
data: Data class instance to access extracted data sets.
"""
self.version = None
self.name = ''
self.parentdir = ''
self.valid = False
self.supported_format = False
self._fcs = None
self.__header = None
self.text = {}
self.param_keys = None
self._param_values = None
self.__key_set = {}
self.__n_keys = 0
self._name_id = None
self.spec = None
self.__hashkey = ''
self.__raw_data = None
self.data = None
self.__supp_text = None
self.__analysis = None
self.quiet = quiet
def load(self, fcs_file):
"""Load an FCS file and confirm version id is supported.
Arg:
f: A fcs filepath.
Returns:
f: A file descriptor
Raises:
NotImplementedError: if fcs file format version is not supported
"""
if self._fcs:
self.__init__()
fcs_obj = open(fcs_file, 'rb')
self.parentdir, self.name = os.path.split(os.path.abspath(fcs_file))
version_id = fcs_obj.read(6).decode('utf-8')
if version_id in ('FCS3.0', 'FCS3.1'):
self.version = version_id
self.__load_30(fcs_obj)
else:
raise NotImplementedError('Not able to parse {vid} files'.format(vid=version_id))
self._fcs = fcs_obj
def __load_30(self, fcs_obj):
"""Load an FCS 3.0 file and read text section (metadata).
Arg:
fcs_obj: A file descriptor
"""
fcs_obj.seek(10)
self.__header = {
'text_start': int(fcs_obj.read(8).decode('utf-8')),
'text_end': int(fcs_obj.read(8).decode('utf-8')),
'data_start': int(fcs_obj.read(8).decode('utf-8')),
'data_end': int(fcs_obj.read(8).decode('utf-8')),
'analysis_start': filter_ascii32(fcs_obj.read(8).hex()),
'analysis_end': filter_ascii32(fcs_obj.read(8).hex())}
# Read the TEXT section
fcs_obj.seek(self.__header['text_start'])
text_delimiter = fcs_obj.read(1).decode('utf-8')
_read_len = self.__header['text_end'] - self.__header['text_start'] - 1
tokens = fcs_obj.read(_read_len).decode('utf-8').split(text_delimiter)
# Collect Parameter keys and values for text map
all_keys = tuple(key.strip().upper() for key in tokens[::2])
all_vals = tuple(filter_numeric(val.strip()) for val in tokens[1::2])
self.text = dict(zip(all_keys, all_vals))
self.param_keys = all_keys
self._param_values = all_vals
self.__update_key_set()
self.check_file_format()
# --------------------------------------------------------------------------
def load_data(self, norm_count=False, norm_time=False):
"""Public access point to load and read the data section.
Args:
norm_count: bool - force event count to start at 1.
norm_time: bool - force time to start at 0.
"""
if not self.spec:
self.load_file_spec()
if not (self.__header or self._fcs):
print('>>> No FCS file loaded.')
return
validate.file_format(self.text, self.spec)
if self.spec.datatype == 'I':
self.__read_int_data()
else:
self.__read_float_data()
self._fcs.close()
self.data = DataSection(self.__raw_data, self.spec, norm_count, norm_time)
def __read_float_data(self):
"""Reads fcs $DATATYPE (F|D) - floats (32|64) bit word length"""
data_start, data_end = self.__get_data_seek()
read_len = data_end - data_start
if read_len + 1 == self.spec.data_len:
read_len += 1
self._fcs.seek(data_start)
data_bytes = self._fcs.read(read_len)
float_format = '{}{}'.format(self.spec.byteord, self.spec.datatype.lower())
bytes_to_float = struct.Struct(float_format)
self.__raw_data = tuple(chain.from_iterable(bytes_to_float.iter_unpack(data_bytes)))
def __read_int_data(self):
"""Reads fcs $DATATYPE I - integer data with fixed word length"""
data_start, _ = self.__get_data_seek()
self._fcs.seek(data_start)
nbytes = self.spec.word_len // 8
tot_reads = self.spec.data_len // nbytes
byteord = self.spec.byteord
# transform hex data to separate, numerical entries
bytes_to_int = int.from_bytes
__raw_read = (self._fcs.read(nbytes) for _ in range(tot_reads))
self.__raw_data = tuple(bytes_to_int(n, byteord) for n in __raw_read)
def __get_data_seek(self):
"""Finds data start and end values within either the header or text section"""
data_start = self.__header['data_start']
data_end = self.__header['data_end']
if not (data_start and data_end):
data_start = self.spec.begindata
data_end = self.spec.enddata
return data_start, data_end
# --------------------------------------------------------------------------
def load_from_csv(self, keys_in, param_vals):
"""Initialize an FCSFile text attribute instance using keys, values from
a previously generated csv file. Loads data for:
self.text, self.param_keys, self.__key_set
Args:
keys_in: Parameter keys located in csv file
param_vals: the keys respective values
"""
for param, value in param_vals.items():
self.set_param(param, value)
self.param_keys = tuple(keys_in)
self.__update_key_set()
self.name = self.text.get('SRC_FILE', '')
def meta_hash(self, meta_keys=None):
"""Generates a hash fingerprint for the fcs file based on Parameter keys
and their respective values. Key order is maintained. Accepts an
optional subset of Parameter keys for use in comparing fcs files to
partial data located in an appended csv file.
Arg:
meta_keys: iterable of Parameter keys to use in place of param_keys
Returns:
Calculated hash as str
"""
txt = []
if not meta_keys:
meta_keys = self.param_keys
for param in meta_keys:
if param in ('SRC_DIR', 'SRC_FILE', 'CSV_CREATED'):
continue
txt.extend((param, str(self.text[param])))
return hash(''.join(txt))
@property
def hashkey(self):
"""Creates hash fingerprint using ordered text section keywords and
values for required channel parameter keywords ($PxBENR).
"""
if not self.__hashkey:
ch_key = re.compile(r'^\$P\d+[BENR]$', re.IGNORECASE)
ch_vals = (str(self.text[kw]) for kw in self.param_keys if ch_key.match(kw))
self.__hashkey = hash(''.join(chain.from_iterable((self.param_keys, ch_vals))))
return self.__hashkey
def get_attr_by_channel_name(self, channel_name, attr):
"""Pre-format channel_name to remove spaces and force upper case.
e.g. FL 5 Log --> FL5LOG
"""
if not self._name_id:
self._name_id = {
v.replace(' ','').upper():k[:-1]
for k,v in self.text.items()
if k.startswith('$P') and k.endswith('N')}
spx_id = self._name_id.get(channel_name, '') + attr
return spx_id if self.has_param(spx_id) else ''
def has_param(self, key):
"""Return True if given parameter key is in text section"""
if self.__n_keys != len(self.text):
self.__update_key_set()
return key in self.__key_set
def param_is_numeric(self, param):
"""Return True if param value is numeric"""
return isinstance(self.param(param), (float, int))
def param(self, param):
"""Return the value for the given parameter"""
return self.text.get(param, 'N/A')
def numeric_param(self, param):
"""Return numeric value for the given parameter or zero"""
return self.text.get(param, 0)
def set_param(self, param, value):
"""Set the value of the given parameter"""
if isinstance(value, str) and not value.isalpha():
value = filter_numeric(value)
self.text[param] = value
def __write(self):
"""Write an FCS file (not implemented)"""
raise NotImplementedError("Can't write FCS files yet")
# ------------------------------------------------------------------------------
| [
37811,
198,
220,
220,
220,
376,
7902,
2393,
9173,
6493,
2393,
5794,
1020,
513,
13,
15,
11,
513,
13,
16,
13,
198,
220,
220,
220,
6060,
22236,
3058,
6971,
25,
198,
220,
220,
220,
220,
220,
220,
220,
720,
49058,
25,
357,
43,
8,
734... | 2.341006 | 5,387 |
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from django.core.serializers import serialize
from .forms import ConfirmedCaseForm, SpaceTimeFormset, ContagionSiteForm
from .models import ConfirmedCase, ContagionSite
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
31077,
11,
367,
29281,
31077,
7738,
1060,
198,
6738,
42625,
14208,
13,
7295,
13,
46911,
11341,
1330,
11389,
1096,
198,
198,
6738,
764,
... | 3.722222 | 72 |
# Generated by Django 2.1.7 on 2020-02-10 11:42
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
16,
13,
22,
319,
12131,
12,
2999,
12,
940,
1367,
25,
3682,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
from algosdk import kmd
from algosdk.wallet import Wallet
from algosdk.v2client import algod
import json
# define sandbox values for kmd client
kmd_address = "http://localhost:4002"
kmd_token = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
# define sandbox values for algod client
algod_address = "http://localhost:4001"
algod_token = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
main() | [
6738,
435,
70,
418,
34388,
1330,
479,
9132,
198,
6738,
435,
70,
418,
34388,
13,
44623,
1330,
37249,
198,
6738,
435,
70,
418,
34388,
13,
85,
17,
16366,
1330,
435,
25344,
198,
11748,
33918,
198,
198,
2,
8160,
35204,
3815,
329,
479,
91... | 3.175182 | 137 |
import logging
from functools import partial
import numpy as np
from ..utils import find_borders, find_inconsistent
np.seterr(invalid='raise')
def find_active_pixels(e_coord, n_coord, e_coord_inc, n_coord_inc, min_inc=1e-6):
"""
Finds active pixels
Returns a boolean array identifying which pixels have increments larger than the tolerance
and within the bounds [0,1] of the frame.
Parameters
----------
e_coord : ndarray
The e coordinates of the pixel
n_coord : ndarray
The n coordinates of the pixel
e_coord_inc : ndarray.
The e coordinate increment of the pixel
n_coord_inc : ndarray
The n coordinate increment of the pixel
min_inc : float
The increment size which defines convergence
Returns
-------
active_pixels : ndarry
The active pixels given as a 1d boolean array
Notes
-----
"""
e_coord_inbound = np.logical_and(e_coord > 0., e_coord < 1.)
n_coord_inbound = np.logical_and(n_coord > 0., n_coord < 1.)
not_converged = np.logical_or(np.abs(e_coord_inc) > min_inc, np.abs(n_coord_inc) > min_inc)
return np.logical_and(np.logical_and(e_coord_inbound, n_coord_inbound), not_converged)
def clip_args(func, arg1, arg2, bounds=(0., 1.)):
"""
Clip the arguments to bounds
Return the results of the function where the clipped arguments have been used.
Arguments below the lower bound are set to the lower bound and the arguments
above the upper bound are set to the upper bound.
Parameters
----------
func : func(arg1,arg2)
The function which the clipped arguments are passed to
arg1 : ndarray
1D array with floats.
arg2 : ndarray.
1D array with floats.
bounds : tuple, optional
The bounds that the arguments are limited to.
Returns
-------
clipped_func : func(arg1_clipped,arg2_clipped)
The results of the function where the clipped agruments have been applied.
Notes
-----
This function does not do any type of typechecking
"""
upper_bound = bounds[1]
lower_bound = bounds[0]
arg1_inbound = arg1.copy()
arg2_inbound = arg2.copy()
arg1_inbound[arg1 < lower_bound] = lower_bound
arg1_inbound[arg1 > upper_bound] = upper_bound
arg2_inbound[arg2 < lower_bound] = lower_bound
arg2_inbound[arg2 > upper_bound] = upper_bound
return func(arg1_inbound, arg2_inbound)
def identify_pixels_within_frame(xnod, ynod, elm, over_sampling=1.1):
"""
Identify pixels covered by an element frame.
Returns the coordinates of the covered pixels in the image frame and an estimate of the coordinates
in the element frame.
This is done by evaluating the element shape functions on a denser grid than the
image grid, rounds the indices to nearest integer and removes duplicates.
The element cordinates to the corresponding pixels is then obtained from the same mask.
Parameters
----------
xnod : ndarray
1D array with floats.
The x coordinates of the control points.
ynod : ndarray
1D array with floats.
The y coordinates of the control points.
elm : interpolator object.
The interpolator object provides the shape functions used to calculate the coordinates within the element.
over_sampling : float, optional
The degree of oversampling used to find the pixels.
Returns
-------
pixel_x : ndarray
The x-coordinates of the pixels covered by the element
pixel_y : ndarray
The y-coordinates of the pixels covered by the element
pixel_es : ndarray
The elemental e-coordinates of the pixels covered by the element
pixel_ns : ndarray
The elemental n-coordinates of the pixels covered by the element
Notes
-----
There is no guarantee that all pixels are found, so when in doubt, increase the over_sampling factor.
"""
x_min, x_max = find_borders(xnod)
y_min, y_max = find_borders(ynod)
# Calculate coordinates (e,n) covered by the element on a fine grid
n_search_pixels = np.int(over_sampling * max((x_max - x_min), y_max - y_min))
es, ns = np.meshgrid(np.linspace(0., 1., n_search_pixels), np.linspace(0., 1., n_search_pixels))
es = es.flatten()
ns = ns.flatten()
pixel_xs = np.dot(elm.Nn(es, ns), xnod)
pixel_ys = np.dot(elm.Nn(es, ns), ynod)
pixel_xs_closest = np.around(pixel_xs).astype(np.int)
pixel_ys_closest = np.around(pixel_ys).astype(np.int)
xs_ys = np.stack([pixel_xs_closest, pixel_ys_closest], axis=0)
xs_ys_unique, unique_inds = np.unique(xs_ys, return_index=True, axis=1)
pixel_x = xs_ys_unique[0, :].astype(np.float64)
pixel_y = xs_ys_unique[1, :].astype(np.float64)
pixel_es = es[unique_inds].astype(np.float64)
pixel_ns = ns[unique_inds].astype(np.float64)
return pixel_x, pixel_y, pixel_es, pixel_ns
def find_covered_pixel_blocks(node_x, node_y, elm,xs=None,ys=None,keep_all=False, max_iter=200, block_size=1e7, tol=1.e-6):
"""
Find element coordinates to all pixels covered by the element.
Returns the coordinates of the covered pixels in the image coordinates and in the element coordinates.
This is done by first identifiying the pixels within the frame and then finding the corresponding
element coordinates by using a modified Newton-Raphson scheme. For reduced memory usage, the
image covered by the element is subdivided into blocks.
Parameters
----------
node_x : ndarray
1D array with floats.
The x coordinates of the control points.
node_y : ndarray
1D array with floats.
The y coordinates of the control points.
elm : interpolator object.
The interpolator object provides the shape functions used to calculate the coordinates within the element.
max_iter : int, optional
The maximum allowed number of iterations
block_size :int, optional
The maximum number of elements in each block
The number of elements are N-pixels X N-Control points
tol : float, optional
The convergence criteria
Returns
-------
pixel_x : ndarray
The x-coordinates of the pixels covered by the element
pixel_y : ndarray
The y-coordinates of the pixels covered by the element
pixel_es : ndarray
The elemental e-coordinates of the pixels covered by the element
pixel_ns : ndarray
The elemental n-coordinates of the pixels covered by the element
Notes
-----
"""
logger = logging.getLogger(__name__)
# e and n are element coordinates
found_e = []
founc_n = []
# x and y are the corresponding image coordinates
found_x = []
found_y = []
# These are just estimates
if xs is not None and ys is not None:
pix_Xs, pix_Ys = xs,ys
pix_es = (pix_Xs-np.min(pix_Xs))/(np.max(pix_Xs)-np.min(pix_Xs))
pix_ns = (pix_Ys-np.min(pix_Ys))/(np.max(pix_Ys)-np.min(pix_Ys))
else:
pix_Xs, pix_Ys, pix_es, pix_ns = identify_pixels_within_frame(node_x, node_y, elm)
# Split into blocks
n_pix_in_block = block_size / np.float(len(node_x))
num_blocks = np.ceil(len(pix_es) / n_pix_in_block).astype(np.int)
logger.info("Splitting in %s blocks:" % str(num_blocks))
pix_e_blocks = np.array_split(pix_es, num_blocks)
pix_n_blocks = np.array_split(pix_ns, num_blocks)
pix_X_blocks = np.array_split(pix_Xs, num_blocks)
pix_Y_blocks = np.array_split(pix_Ys, num_blocks)
for block_id in range(num_blocks):
e_coord = pix_e_blocks[block_id]
n_coord = pix_n_blocks[block_id]
X_coord = pix_X_blocks[block_id]
Y_coord = pix_Y_blocks[block_id]
# Empty increment vectors
n_coord_inc = np.zeros_like(n_coord)
e_coord_inc = np.zeros_like(e_coord)
# Pre-calculate the gradients. This results in a modified Newton scheme
dxNn = clip_args(elm.dxNn, e_coord, n_coord)
dyNn = clip_args(elm.dyNn, e_coord, n_coord)
for i in range(max_iter):
Nn = clip_args(elm.Nn, e_coord, n_coord)
n_coord_inc[:] = (Y_coord - np.dot(Nn, node_y) - np.dot(dxNn, node_y) * (
X_coord - np.dot(Nn, node_x)) / (np.dot(dxNn, node_x))) / (
np.dot(dyNn, node_y) - np.dot(dxNn, node_y) * np.dot(
dyNn, node_x) / np.dot(dxNn, node_x))
e_coord_inc[:] = (X_coord - np.dot(Nn, node_x) - np.dot(dxNn, node_x) *
n_coord_inc) / np.dot(dxNn, node_x)
e_coord[:] += e_coord_inc
n_coord[:] += n_coord_inc
active_pixels = find_active_pixels(e_coord, n_coord, e_coord_inc, n_coord_inc, tol)
if not np.any(active_pixels):
logger.info('Pixel coordinates found in %i iterations', i)
if keep_all:
epE_block, nyE_block, Xe_block, Ye_block = e_coord, n_coord, X_coord, Y_coord
else:
epE_block, nyE_block, Xe_block, Ye_block = map(
partial(np.delete, obj=find_inconsistent(e_coord, n_coord)),
[e_coord, n_coord, X_coord, Y_coord])
found_e.append(epE_block)
founc_n.append(nyE_block)
found_x.append(Xe_block.astype(np.int))
found_y.append(Ye_block.astype(np.int))
break
if (i + 1) == max_iter:
raise RuntimeError("Did not converge in %i iterations" % max_iter)
return found_e, founc_n, found_x, found_y
def generate_reference(nodal_position, mesh, image, settings, image_id=None):
"""
Generates a Reference object
The Reference object contains all internals that will be used during the correlation procedure.
Parameters
----------
nodal_position : ndarray
2D array with floats.
The coordinates of the control points.
mesh : Mesh object
Mesh definitions
image : ndarray
image as an 2d array
image_id : int, optional
The image id, stored for further reference
Returns
-------
reference : Reference
The Reference object
Notes
-----
The current implementation is slow but not very memory intensive.
Theory
-----
"""
logger = logging.getLogger()
elm = mesh.element_def
node_xs = nodal_position[0]
node_ys = nodal_position[1]
img_grad = np.gradient(image)
try:
pixel_e_blocks, pixel_n_blocks, pixel_x_blocks, pixel_y_blocks = find_covered_pixel_blocks(node_xs,
node_ys,
elm,
block_size=settings.block_size)
num_blocks = len(pixel_e_blocks)
num_pixels = np.sum([block.size for block in pixel_e_blocks])
K = np.zeros((2 * mesh.n_nodes, num_pixels), dtype=settings.precision)
A = np.zeros((mesh.n_nodes * 2, mesh.n_nodes * 2), dtype=settings.precision)
img_covered = image[np.concatenate(pixel_y_blocks), np.concatenate(pixel_x_blocks)]
# Calculate A = B^T * B
for block_id in range(num_blocks):
block_len = pixel_e_blocks[block_id].shape[0]
B = np.zeros((block_len, 2 * mesh.n_nodes), dtype=settings.precision)
# Weight the image gradients with the value of the shape functions
B[:, :elm.n_nodes] = (
img_grad[1][pixel_y_blocks[block_id], pixel_x_blocks[block_id]][:, np.newaxis] * elm.Nn(
pixel_e_blocks[block_id], pixel_n_blocks[block_id]))
B[:, elm.n_nodes:] = (
img_grad[0][pixel_y_blocks[block_id], pixel_x_blocks[block_id]][:, np.newaxis] * elm.Nn(
pixel_e_blocks[block_id], pixel_n_blocks[block_id]))
A += np.dot(B.transpose(), B)
pixel_ind = 0
pixel_ind_last = 0
# Determine K
for block_id in range(num_blocks):
block_len = pixel_e_blocks[block_id].shape[0]
B = np.zeros((2 * mesh.n_nodes, block_len), dtype=settings.precision)
pixel_ind += block_len
# Weight the image gradients with the value of the shape functions
# TODO: This operation is duplicate
B[:elm.n_nodes, :] = (
img_grad[1][pixel_y_blocks[block_id], pixel_x_blocks[block_id]][:, np.newaxis] * elm.Nn(
pixel_e_blocks[block_id], pixel_n_blocks[block_id])).transpose()
B[elm.n_nodes:, :] = (
img_grad[0][pixel_y_blocks[block_id], pixel_x_blocks[block_id]][:, np.newaxis] * elm.Nn(
pixel_e_blocks[block_id], pixel_n_blocks[block_id])).transpose()
K_block = np.linalg.solve(A, B)
K[:, pixel_ind_last:pixel_ind] = K_block
pixel_ind_last = pixel_ind
# Remove for reduced memory usage
del B, K_block
Nn = elm.Nn(np.concatenate(pixel_e_blocks), np.concatenate(pixel_n_blocks)).transpose()
pixel_es = np.concatenate(pixel_e_blocks)
pixel_ns = np.concatenate(pixel_n_blocks)
except Exception as e:
logger.exception(e)
raise RuntimeError('Failed to generate reference')
return Reference(Nn, img_covered, K, None, num_pixels, pixel_es, pixel_ns,
image_id=image_id)
| [
11748,
18931,
198,
6738,
1257,
310,
10141,
1330,
13027,
198,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
11485,
26791,
1330,
1064,
62,
65,
6361,
11,
1064,
62,
1939,
684,
7609,
198,
198,
37659,
13,
82,
2357,
81,
7,
259,
12102,
... | 2.226102 | 6,214 |
import os
class Config:
'''
General configuration parent class
'''
# SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://qyunky:Lewis860@localhost/blog'
SECRET_KEY = os.environ.get('SECRET_KEY')
QUOTES_API_BASE_URL = ' http://quotes.stormconsultancy.co.uk/popular.json'
MAIL_SERVER = 'smtp.googlemail.com'
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USERNAME = os.environ.get("MAIL_USERNAME")
MAIL_PASSWORD = os.environ.get("MAIL_PASSWORD")
class prodConfig(Config):
'''
Production configuration child class
Args:
Config: The parent configuration class with General configuration settings
'''
SQLALCHEMY_DATABASE_URI = os.environ.get("DATABASE_URL")
class DevConfig(Config):
'''
Development configuration child class
Args:
Config: The parent configuration class with General configuration settings
'''
DEBUG = True
SQLALCHEMY_TRACK_MODIFICATIONS = False
config_options = {
'development':DevConfig,
'production':prodConfig
} | [
11748,
28686,
198,
4871,
17056,
25,
198,
220,
220,
220,
705,
7061,
198,
220,
220,
220,
3611,
8398,
2560,
1398,
198,
220,
220,
220,
705,
7061,
198,
220,
220,
220,
1303,
16363,
1847,
3398,
3620,
56,
62,
35,
1404,
6242,
11159,
62,
4726... | 2.578947 | 399 |
# Driver code
n=raw_input('Enter size : ')
arr=[]
for i in range(0,n):
arr.append(input('enter element'))
print(firstNR(arr, n))
| [
197,
198,
2,
12434,
2438,
198,
77,
28,
1831,
62,
15414,
10786,
17469,
2546,
1058,
705,
8,
198,
3258,
28,
21737,
198,
1640,
1312,
287,
2837,
7,
15,
11,
77,
2599,
198,
220,
5240,
13,
33295,
7,
15414,
10786,
9255,
5002,
6,
4008,
198,... | 2.462963 | 54 |
# -*- coding: utf-8 -*-
"""
Tests the main functionalities of the tree_models module.
"""
import logging
import couchdb
from django.utils.unittest.case import TestCase
from bilanci import tree_models
from bilanci.models import Voce
__author__ = 'guglielmo'
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
51,
3558,
262,
1388,
10345,
871,
286,
262,
5509,
62,
27530,
8265,
13,
198,
37811,
198,
198,
11748,
18931,
198,
11748,
18507,
9945,
198,
6738,
42625,
14208,
1... | 2.977011 | 87 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import httplib
# causes httplib to return the partial response from a server in case the read fails to be complete.
httplib.HTTPResponse.read = patch_http_response_read(httplib.HTTPResponse.read) | [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
1841,
489,
571,
198,
198,
2,
5640,
1841,
489,
571,
284,
1441,
262,
13027,
2882,
422,
257,
4382,
287,
1339,
262,
... | 2.845238 | 84 |
# Copyright (c) 2020, The InferLO authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 - see LICENSE file.
import numpy as np
from inferlo import PairWiseFiniteModel
from inferlo.testing import tree_potts_model, line_potts_model
from inferlo.testing.test_utils import check_samples
| [
2,
15069,
357,
66,
8,
12131,
11,
383,
554,
2232,
21982,
7035,
13,
1439,
2489,
10395,
13,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
532,
766,
38559,
24290,
2393,
13,
198,
11748,
299,
32152,
355,
45941,
198,
198,
... | 3.450549 | 91 |
n = int(input('Digite um número inteiro: '))
print(f'O antecessor de {n} é \033[7;30m{n-1}\033[m e o sucessor é \033[30m{n+1}\033[m')
| [
77,
796,
493,
7,
15414,
10786,
19511,
578,
23781,
299,
21356,
647,
78,
493,
68,
7058,
25,
705,
4008,
198,
4798,
7,
69,
6,
46,
29692,
919,
273,
390,
1391,
77,
92,
38251,
3467,
44427,
58,
22,
26,
1270,
76,
90,
77,
12,
16,
32239,
... | 1.942029 | 69 |
# coding=utf-8
if __name__ == '__main__':
s = Solution()
# s.parse_and_print()
print s.find_position([1,2,3,4], 3)
| [
2,
19617,
28,
40477,
12,
23,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
264,
796,
28186,
3419,
198,
220,
220,
220,
1303,
264,
13,
29572,
62,
392,
62,
4798,
3419,
198,
220,
220,
220... | 2.096774 | 62 |
import math
a = int(input())
b = int(input())
g = math.gcd(a, b)
a //= g
b //= g
if a%b:
if a//b:
print(a//b, end=' ')
print(f'{a%b}/{b}')
else:
print(a//b) | [
11748,
10688,
201,
198,
64,
796,
493,
7,
15414,
28955,
201,
198,
65,
796,
493,
7,
15414,
28955,
201,
198,
70,
796,
10688,
13,
70,
10210,
7,
64,
11,
275,
8,
201,
198,
64,
3373,
28,
308,
201,
198,
65,
3373,
28,
308,
201,
198,
20... | 1.673077 | 104 |
#!/home/mostafa_karimi/anaconda2/bin/python
# -*- coding: utf-8 -*-
import pandas as pn
import numpy as np
from sklearn import preprocessing
from scipy.stats.stats import pearsonr
print("kernel methylation islands completed")
Kernel()
| [
198,
2,
48443,
11195,
14,
1712,
28485,
62,
21070,
25236,
14,
272,
330,
13533,
17,
14,
8800,
14,
29412,
220,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
628,
198,
11748,
19798,
292,
355,
279,
77,
198,
11748,
299,... | 2.880952 | 84 |
"""
Tests the packet encoder
Created on Jul 10, 2020
@author: Joseph Paetz, hpaulson
"""
from __future__ import absolute_import
from fprime_gds.common.encoders.pkt_encoder import PktEncoder
from fprime_gds.common.data_types.pkt_data import PktData
from fprime_gds.common.utils.config_manager import ConfigManager
from fprime_gds.common.templates.ch_template import ChTemplate
from fprime_gds.common.templates.pkt_template import PktTemplate
from fprime_gds.common.data_types.ch_data import ChData
from fprime.common.models.serialize.time_type import TimeType
from fprime.common.models.serialize.u8_type import U8Type
from fprime.common.models.serialize.u16_type import U16Type
from fprime.common.models.serialize.u32_type import U32Type
def test_pkt_encoder():
"""
Tests the encoding of the packet encoder
"""
config = ConfigManager()
config.set("types", "msg_len", "U16")
enc = PktEncoder()
enc_config = PktEncoder(config)
ch_temp_1 = ChTemplate(101, "test_ch", "test_comp", U32Type())
ch_temp_2 = ChTemplate(102, "test_ch2", "test_comp2", U8Type())
ch_temp_3 = ChTemplate(103, "test_ch3", "test_comp3", U16Type())
pkt_temp = PktTemplate(64, "test_pkt", [ch_temp_1, ch_temp_2, ch_temp_3])
time_obj = TimeType(2, 0, 1533758629, 123456)
ch_obj_1 = ChData(U32Type(1356), time_obj, ch_temp_1)
ch_obj_2 = ChData(U8Type(143), time_obj, ch_temp_2)
ch_obj_3 = ChData(U16Type(1509), time_obj, ch_temp_3)
pkt_obj = PktData([ch_obj_1, ch_obj_2, ch_obj_3], time_obj, pkt_temp)
desc_bin = b"\x00\x00\x00\x04"
id_bin = b"\x00\x40"
time_bin = b"\x00\x02\x00\x5b\x6b\x4c\xa5\x00\x01\xe2\x40"
ch_bin = b"\x00\x00\x05\x4c\x8F\x05\xe5"
long_len_bin = b"\x00\x00\x00\x18"
short_len_bin = b"\x00\x18"
reg_expected = long_len_bin + desc_bin + id_bin + time_bin + ch_bin
config_expected = short_len_bin + desc_bin + id_bin + time_bin + ch_bin
reg_output = enc.encode_api(pkt_obj)
assert reg_output == reg_expected, (
"FAIL: expected regular output to be %s, but found %s"
% (list(reg_expected), list(reg_output))
)
config_output = enc_config.encode_api(pkt_obj)
assert config_output == config_expected, (
"FAIL: expected configured output to be %s, but found %s"
% (list(config_expected), list(config_output))
)
| [
37811,
198,
51,
3558,
262,
19638,
2207,
12342,
198,
198,
41972,
319,
5979,
838,
11,
12131,
198,
31,
9800,
25,
7212,
11243,
23773,
11,
27673,
2518,
1559,
198,
37811,
198,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
628,
198,
6... | 2.334322 | 1,011 |
from rest_framework import serializers
from django.contrib.auth.models import Group, Permission
from .models import Accounts, Ticket, TicketType, TicketRecord
| [
6738,
1334,
62,
30604,
1330,
11389,
11341,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
4912,
11,
2448,
3411,
198,
198,
6738,
764,
27530,
1330,
35584,
11,
24014,
11,
24014,
6030,
11,
24014,
23739,
628,
198
] | 4.05 | 40 |
from random import randint
import sys
file = ['edit', 'delicious', 'wiki', 'flickr', 'youtube', 'dblp']
vertexNum = [21504191,4535197,1870709,2302925,3223589,1103412]
file = ['edit']
for i in range(len(file)):
print file[i]
f = open(file[i]+'.earliest.query', 'w')
for j in range(int(sys.argv[1])):
line = str(6) + " " + str(randint(0, vertexNum[i]-1)) + '\n'
f.write(line)
f.close()
| [
6738,
4738,
1330,
43720,
600,
198,
11748,
25064,
198,
7753,
796,
37250,
19312,
3256,
705,
12381,
6243,
3256,
705,
15466,
3256,
705,
2704,
18994,
3256,
705,
11604,
3256,
705,
67,
2436,
79,
20520,
198,
332,
16886,
33111,
796,
685,
2481,
3... | 2.201058 | 189 |
#!/usr/bin/python3
"""Update DNS records."""
import logging
import os
import sys
from CloudFlare import CloudFlare
from CloudFlare.exceptions import (
CloudFlareError,
CloudFlareInternalError,
CloudFlareAPIError,
)
import requests
DEBUG = True
if DEBUG:
logging.getLogger().setLevel(logging.INFO)
# Helper functions
def get_public_ip() -> str:
"""Get our public IP address.
Using https://api.ipify.org/ as it has strong backing and guarantees stability.
It would be nice if I could use https://www.cloudflare.com/cdn-cgi/trace, but that
endpoint isn't necessarily stable.
"""
return requests.get("https://api.ipify.org/").text
def get_api(token: str) -> CloudFlare:
"""Get the Cloudflare API object and verify token."""
api = CloudFlare(token=token)
try:
api.user.tokens.verify.get()
except (
CloudFlareError,
CloudFlareInternalError,
CloudFlareAPIError,
):
logging.error("Failed to verify, token likely invalid")
raise
return api
def get_zone_id(api: CloudFlare, zone_name: str) -> str:
"""Get a zone id from a zone name."""
# Get matching zones
zones = api.zones.get()
matching_zones = [zone for zone in zones if zone["name"] == zone_name]
# Ensure we got exactly one
if not matching_zones:
zone_names = [zone["name"] for zone in zones]
logging.error(f"No matching zone for {zone_name} in {zone_names}")
raise Exception("Failed to find zone")
if len(matching_zones) > 1:
logging.error(f"Found multiple matching zones for {zone_name} in {zones}")
raise Exception("Found multiple zones")
# Return
return matching_zones[0]["id"]
# Main function
def update_ddns(token: str, zone_name: str, dns_name: str):
"""Update Cloudflare DNS.
args:
token: Your Cloudflare Api Token
zone_name: The name of the target zone, e.g. 'example.com'
dns_name: The full url we are DDNSing to us, e.g. 'api.example.com'
"""
# Get our public ip
public_ip = get_public_ip()
logging.info(f"Got public ip: {public_ip}")
# Get api
api = get_api(token=token)
logging.info("Api verified")
# Get zone id
zone_id = get_zone_id(api=api, zone_name=zone_name)
logging.info(f"Found zone_name {zone_name}: {zone_id}")
# Get zone dns records and clear old ones
valid_exists = False
query = {
"name": dns_name,
"type": "A",
}
zone_dns_records = api.zones.dns_records.get(zone_id, params=query)
for dns_record in zone_dns_records:
if dns_record["content"] != public_ip or valid_exists:
logging.info(f"Deleting unwanted record: {dns_record['id']}")
api.zones.dns_records.delete(zone_id, dns_record["id"])
else:
logging.info(f"Found matching record: {dns_record['id']}")
valid_exists = True
# Add a new record if necessary
if not valid_exists:
logging.info("No valid record, creating new one")
dns_record_data = {
"name": dns_name,
"type": "A",
"content": public_ip,
"TTL": 300,
}
dns_record = api.zones.dns_records.post(zone_id, data=dns_record_data)
logging.info(f"Created new record: {dns_record['id']}")
else:
logging.info("Matching record exists, no need for new one")
logging.info("Complete")
# Run if we are directly called
if __name__ == "__main__":
"""Run the main function from either cli args or env vars, or fail if neither."""
if len(sys.argv) == 4:
# CLI args
token = sys.argv[1]
zone_name = sys.argv[2]
dns_name = sys.argv[3]
elif len(sys.argv) == 1:
# Env vars
token = os.environ["CLOUDFLARE_TOKEN"]
zone_name = os.environ["CLOUDFLARE_ZONE_NAME"]
dns_name = os.environ["CLOUDFLARE_DNS_NAME"]
else:
raise Exception("Supply either all CLI args or all env vars")
update_ddns(token=token, zone_name=zone_name, dns_name=dns_name)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
37811,
10260,
18538,
4406,
526,
15931,
198,
198,
11748,
18931,
198,
11748,
28686,
198,
11748,
25064,
198,
198,
6738,
10130,
7414,
533,
1330,
10130,
7414,
533,
198,
6738,
10130,
7414,
533,
1... | 2.367264 | 1,729 |
from .imagenet import imagenet_iterator
from .imagenet import multiple_imagenet_iterator
from .cifar10 import cifar10_iterator
from .cifar10 import cifar100_iterator
# from dali_imagenet import get_dali_iter
| [
6738,
764,
320,
11286,
316,
1330,
3590,
268,
316,
62,
48727,
198,
6738,
764,
320,
11286,
316,
1330,
3294,
62,
320,
11286,
316,
62,
48727,
198,
6738,
764,
66,
361,
283,
940,
1330,
269,
361,
283,
940,
62,
48727,
198,
6738,
764,
66,
... | 2.971429 | 70 |
import os
from settings import job_directory
| [
11748,
28686,
198,
198,
6738,
6460,
1330,
1693,
62,
34945,
628
] | 4.272727 | 11 |
#-*- coding:Utf-8 -*-
"""
.. currentmodule:: pylayers.antprop.antenna
This module handles antennas
An antenna can be loaded from various file formats among
+ .vsh2
+ .vsh3
+ .sh2
+ .sh3
+ .mat
+ .trx
Antenna derives from Pattern
Examples
--------
.. plot::
:include-source:
>>> import matplotlib.pyplot as plt
>>> from pylayers.antprop.antenna import *
>>> A = Antenna()
>>> fig,ax = A.plotG(fGHz=[2,3,4],plan='theta',angdeg=0)
Pattern Class
-------------
.. autosummary::
:toctree: generated/
Pattern.eval
Pattern.gain
Pattern.radF
Pattern Functions
=================
Pattern.__pOmni
Pattern.__pGauss
Pattern.__p3gpp
Pattern.__p3gpp
Pattern from SH coeff
=====================
Pattern.__pvsh3
Pattern.__psh3
Antenna Class
-------------
.. autosummary::
:toctree: generated/
Utility Functions
=================
.. autosummary::
:toctree: generated/
Antenna.__init__
Antenna.__repr__
Antenna.ls
Antenna.errel
Antenna.checkpole
Antenna.info
Antenna.pol2cart
Antenna.cart2pol
Antenna.minsh3
Antenna.mse
Antenna.getdelay
Antenna.elec_delay
Synthesis Functions
===================
.. autosummary::
:toctree: generated/
Antenna.Fsynth
Antenna.Fsynth1
Antenna.Fsynth2s
Antenna.Fsynth2b
Antenna.Fsynth2
Antenna.Fsynth3
Visualization functions
=======================
.. autosummary::
:toctree: generated/
Antenna.pattern
Antenna.plotG
Antenna._show3
Antenna.show3
Antenna.plot3d
Antenna.pol3d
Antenna.load_trx
Antenna.movie_vsh
Loading and Saving
==================
.. autosummary::
:toctree: generated/
Antenna.loadhfss
Antenna.loadtrx
Antenna.loadmat
Antenna.savevsh3
Antenna.savesh2
Antenna.savesh3
Antenna.loadvsh3
Antenna.loadsh3
Antenna.savevsh2
Antenna.loadsh2
Antenna.loadvsh2
Miscellaneous functions
========================
.. autosummary::
:toctree: generated/
forcesympol
compdiag
show3D
"""
#from __future__ import print_function
import doctest
import os
import glob
import re
import pdb
import sys
if sys.version_info.major==2:
import PIL.Image as Image
try:
import mayavi.mlab as mlab
except:
pass
else:
import image
import numpy as np
import scipy.linalg as la
from scipy import io
import pylayers.util.pyutil as pyu
import pylayers.util.geomutil as geu
from pylayers.util.project import *
from pylayers.antprop.spharm import *
try:
from pylayers.antprop.antvsh import vsh
except:
pass
from pylayers.antprop.antssh import ssh,SSHFunc2, SSHFunc, SSHCoeff, CartToSphere
from pylayers.antprop.coeffModel import *
from matplotlib import rc
from matplotlib import cm # colormaps
from mpl_toolkits.mplot3d import axes3d
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.ticker import MaxNLocator
from scipy.special import sici , fresnel
import pandas as pd
import matplotlib.pylab as plt
class Pattern(PyLayers):
""" Class Pattern
MetaClass of Antenna
A pattern is evaluated with the 3 np.array parameters
theta
phi
fGHz
This class implements pattern methods.
The name of a pattern method starts by p.
Each pattern method has a unique dictionnary argument 'param'
If self.grid dimensions are
Nt x Np x Nf
else:
Ndir x Nf
"""
def eval(self,**kwargs):
""" evaluate pattern functions
Parameters
----------
th: list
[]
ph: list
[]
pt : np.array (3,N)
pr : np.array (3,N)
azoffset : int (0)
Rfloor:bool
if true add gain value to reflected ray on the floor.
values are append at the end of sqG.
fGHz:list
[]
nth: int
90
nph: int
181
first: boolean
True if first call (to define self.param)
grid: boolean
True for pattern mode, False for Ray Tracing mode
th0 : float
theta initial value
th1 : float
theta finale value
ph0 : float
phi initial value
ph1 : float
phi final value
Examples
--------
>>> from pylayers.antprop.aarray import *
>>> A0=Antenna('Omni',param={'pol':'t','GmaxdB':0})
>>> A1=Antenna('Gauss')
>>> A2=Antenna('3gpp')
>>> A3=ULArray()
>>> A0.eval()
>>> A1.eval()
>>> A2.eval()
>>> #A3.eval()
"""
defaults = {'Rfloor':False,
'nth':90,
'nph':181,
'grid':True,
'th0':0,
'th1':np.pi,
'ph0':0,
'ph1':2*np.pi,
'azoffset':0,
'inplace':True
}
for k in defaults:
if k not in kwargs:
kwargs[k]=defaults[k]
if 'fGHz' not in kwargs:
if 'fGHz' not in self.__dict__:
self.fGHz = np.array([2.4])
else:
if type(kwargs['fGHz'])==np.ndarray:
self.fGHz = kwargs['fGHz']
else:
self.fGHz = np.array([kwargs['fGHz']])
self.nf = len(self.fGHz)
self.grid = kwargs['grid']
#
# if th and ph are empty
# if pt and pr are empty
# calculates from th0,th1,nth
# ph0,phi,nph
# else
# calculates from points coordinates pt and pr
# else
# take specified values
if ('th' not in kwargs) and ('ph' not in kwargs):
if ('pt' not in kwargs) and ('pr' not in kwargs):
self.theta = np.linspace(kwargs['th0'],kwargs['th1'],kwargs['nth'])
self.phi = np.linspace(kwargs['ph0'],kwargs['ph1'],kwargs['nph'],endpoint=False)
self.grid = True
self.full_evaluated = True
else:
si = kwargs['pr']-kwargs['pt']
ssi = np.sqrt(np.sum(si*si,axis=0))
sn = si/ssi[None,:]
self.theta = np.arccos(sn[2,:])
self.phi = np.mod(np.arctan2(sn[1,:],sn[0,:])+kwargs['azoffset'],2*np.pi)
self.grid = False
self.full_evaluated = True
if kwargs['Rfloor']:
dR = np.sqrt(ssi**2 + (kwargs['pr'][2,:] + kwargs['pt'][2,:])**2) # reflexion length
thetaR = np.arccos((kwargs['pr'][2,:] + kwargs['pt'][2,:]) / dR)
self.theta = np.hstack([self.theta,thetaR])
self.phi = np.hstack([self.phi,self.phi])
else :
assert(len(kwargs['th'])==len(kwargs['ph']))
self.theta = kwargs['th']
self.phi = kwargs['ph']
self.full_evaluated = False
if self.typ=='azel':
self.theta=np.linspace(-np.pi,np.pi,360)
self.phi=np.linspace(-np.pi,np.pi,360)
self.full_evaluated = False
self.nth = len(self.theta)
self.nph = len(self.phi)
#
# evaluation of the specific Pattern__p function
#
Ft,Fp = eval('self._Pattern__p'+self.typ)(param=self.param)
if kwargs['inplace']:
self.Ft = Ft
self.Fp = Fp
self.evaluated = True
self.gain()
else:
return Ft,Fp
def __pOmni(self,**kwargs):
""" omnidirectional pattern
Parameters
----------
param : dict
dictionnary of parameters
+ pol : string
't'| 'p'
+ GmaxdB : float
0
self.grid is used for switching between :
if True angular grid : nth x nph x nf
if False direction : ndir x nf
"""
defaults = { 'param' : { 'pol' : 't', 'GmaxdB': 0 } }
if 'param' not in kwargs or kwargs['param']=={}:
kwargs['param']=defaults['param']
self.param = kwargs['param']
self.GmaxdB = self.param['GmaxdB']
self.pol = self.param['pol']
G = pow(10.,self.GmaxdB/10.) # linear gain
if self.grid:
# Nth x Nphx Nf
self.sqG = np.array(np.sqrt(G))*np.ones(len(self.fGHz))[None,None,:]
self.evaluated = True
else:
# Nd x Nf
self.sqG = np.array(np.sqrt(G))*np.ones(len(self.fGHz))[None,:]
Ft,Fp = self.radF()
return Ft,Fp
def __paperture(self,**kwargs):
""" Aperture Pattern
Aperture in the (x,y) plane. Main lobe in theta=0 direction
polar indicates the orientation of the Electric field either 'x' or 'y'
See theoretical background in :
http://www.ece.rutgers.edu/~orfanidi/ewa/ch18.pdf
Parameters
----------
HPBW_x_deg : float
Half Power Beamwidth (degrees)
HPBW_y_deg : float
Half Power Beamwidth (degrees)
"""
defaults = {'param': {'HPBW_x_deg':40,
'HPBW_y_deg':10,
'Gfactor':27000,
'fcGHz': 27.5,
'polar':'x',
'window':'rect'
}}
if 'param' not in kwargs or kwargs['param']=={}:
kwargs['param']=defaults['param']
self.param = kwargs['param']
deg_to_rad = np.pi/180.
ld_c = 0.3/self.param['fcGHz']
ld = 0.3/self.fGHz
Dx = 0.886*ld_c/(self.param['HPBW_x_deg']*deg_to_rad)
Dy = 0.886*ld_c/(self.param['HPBW_y_deg']*deg_to_rad)
Dx_n = Dx/ld
Dy_n = Dy/ld
if self.grid:
# Nth x Nph x Nf
theta = self.theta[:,None,None]
phi = self.phi[None,:,None]
else:
# Ndir x Nf
theta = self.theta[:,None]
phi = self.phi[:,None]
vx = Dx_n[...,:]*np.sin(theta)*np.cos(phi) # 18.1.4
vy = Dy_n[...,:]*np.sin(theta)*np.sin(phi) # 18.1.4
F_nor = ((1+np.cos(theta))/2.)*np.abs(np.sinc(vx)*np.sinc(vy))
HPBW_x = (0.886*ld/Dx)/deg_to_rad
HPBW_y = (0.886*ld/Dy)/deg_to_rad
Gmax = self.param['Gfactor']/(HPBW_x*HPBW_y)
F = np.sqrt(Gmax[...,:])*F_nor # Ndir x Nf
# Handling repartition on both vector components
# enforce E.y = 0
if self.param['polar']=='x':
Ft = F/np.sqrt(1+(np.cos(theta)*np.sin(phi)/np.cos(phi))**2)
Fp = (-np.cos(theta)*np.sin(phi)/np.cos(phi))*Ft
nan_bool = np.isnan(Fp)
Fp[nan_bool] = F[nan_bool]
# enforce E.x = 0
if self.param['polar']=='y':
Ft = F/np.sqrt(1+(np.cos(theta)*np.cos(phi)/np.sin(phi))**2)
Fp = (np.cos(theta)*np.cos(phi)/np.sin(phi))*Ft
nan_bool = np.isnan(Fp)
Fp[nan_bool] = F[nan_bool]
# enforce E.x = 0
#
# This is experimental
# How to apply the 2D windowing properly ?
#
# if self.param['window']!='rect':
# Nt = self.Fp.shape[0]
# Np = self.Fp.shape[1]
# Wp = np.fft.ifftshift(np.hamming(Nt)[:,None]*np.ones(Np)[None,:])[:,:,None]
# Wt = np.fft.ifftshift(np.ones(Nt)[:,None]*np.hamming(Np)[None,:])[:,:,None]
# Wu = np.fft.ifftshift(np.ones(Nt)[:,None]*np.ones(Np)[None,:])[:,:,None]
# Wi = np.fft.ifftshift(np.hamming(Nt)[:,None]*np.hamming(Np)[None,:])[:,:,None]
# W = np.fft.fftshift(np.hamming(Nt)[:,None]*np.hamming(Np)[None,:])[:,:,None]
# # Fp : t x p x f ou r x f
# # Ft : t x p x f ou r x f
#
# Kp = np.fft.ifft2(self.Fp,axes=(0,1))
# Kt = np.fft.ifft2(self.Ft,axes=(0,1))
#
# self.Fp = np.fft.fft2(Kp*Wt,axes=(0,1))
# self.Ft = np.fft.fft2(Kt*Wp,axes=(0,1))
return Ft,Fp
def __paperture2(self,**kwargs):
""" Aperture Pattern
Aperture in the (x,y) plane. Main lobe in theta=0 direction
polar indicates the orientation of the Electric field either 'x' or 'y'
See theoretical background in :
http://www.ece.rutgers.edu/~orfanidi/ewa/ch18.pdf
Parameters
----------
HPBW_x_deg : float
Half Power Beamwidth (degrees)
HPBW_y_deg : float
Half Power Beamwidth (degrees)
"""
defaults = {'param': {'HPBW_a_deg':40,
'HPBW_b_deg':10,
'Gfactor':27000,
'fcGHz': 27.5,
'polar':'x',
'window':'rect'
}}
if 'param' not in kwargs or kwargs['param']=={}:
kwargs['param']=defaults['param']
self.param = kwargs['param']
deg_to_rad = np.pi/180.
ld_c = 0.3/self.param['fcGHz']
ld = 0.3/self.fGHz
a = 1.189*ld_c/(self.param['HPBW_a_deg']*deg_to_rad)
b = 0.886*ld_c/(self.param['HPBW_b_deg']*deg_to_rad)
a_n = a/ld
b_n = b/ld
if self.grid:
# Nth x Nph x Nf
theta = self.theta[:,None,None]
phi = self.phi[None,:,None]
else:
# Ndir x Nf
theta = self.theta[:,None]
phi = self.phi[:,None]
vx = a_n[...,:]*np.sin(theta)*np.cos(phi) # 18.1.4
vy = b_n[...,:]*np.sin(theta)*np.sin(phi) # 18.1.4
#F_nor = ((1+np.cos(theta))/2.)*np.abs(np.sinc(vx)*np.sinc(vy))
F_nor = (1+np.cos(theta))/2*(np.cos(np.pi*vx)/(1-4*vx**2))*np.sinc(vy) # 18.1.3 + suppression rear radiation
HPBW_a = (1.189*ld/a)/deg_to_rad
HPBW_b = (0.886*ld/b)/deg_to_rad
Gmax = self.param['Gfactor']/(HPBW_a*HPBW_b)
F = np.sqrt(Gmax[...,:])*F_nor # Ndir x Nf
# Handling repartition on both vector components
# enforce E.y = 0
if self.param['polar']=='x':
Ft = F/np.sqrt(1+(np.cos(theta)*np.sin(phi)/np.cos(phi))**2)
Fp = (-np.cos(theta)*np.sin(phi)/np.cos(phi))*Ft
nan_bool = np.isnan(Fp)
Fp[nan_bool] = F[nan_bool]
# enforce E.x = 0
if self.param['polar']=='y':
Ft = F/np.sqrt(1+(np.cos(theta)*np.cos(phi)/np.sin(phi))**2)
Fp = (np.cos(theta)*np.cos(phi)/np.sin(phi))*Ft
nan_bool = np.isnan(Fp)
Fp[nan_bool] = F[nan_bool]
# enforce E.x = 0
#
# This is experimeintal
# How to apply the 2D windowing properly ?
#
# if self.param['window']!='rect':
# Nt = self.Fp.shape[0]
# Np = self.Fp.shape[1]
# Wp = np.fft.ifftshift(np.hamming(Nt)[:,None]*np.ones(Np)[None,:])[:,:,None]
# Wt = np.fft.ifftshift(np.ones(Nt)[:,None]*np.hamming(Np)[None,:])[:,:,None]
# Wu = np.fft.ifftshift(np.ones(Nt)[:,None]*np.ones(Np)[None,:])[:,:,None]
# Wi = np.fft.ifftshift(np.hamming(Nt)[:,None]*np.hamming(Np)[None,:])[:,:,None]
# W = np.fft.fftshift(np.hamming(Nt)[:,None]*np.hamming(Np)[None,:])[:,:,None]
# # Fp : t x p x f ou r x f
# # Ft : t x p x f ou r x f
#
# Kp = np.fft.ifft2(self.Fp,axes=(0,1))
# Kt = np.fft.ifft2(self.Ft,axes=(0,1))
#
# self.Fp = np.fft.fft2(Kp*Wt,axes=(0,1))
# self.Ft = np.fft.fft2(Kt*Wp,axes=(0,1))
return Ft,Fp
def __phplanesectoralhorn(self,**kwargs):
""" H plane sectoral horn
Parameters
----------
rho1 : float
sector radius (meter)
a1 : float
aperture dimension along x (greatest value in meters)
b1 : float
aperture dimension along y (greatest value in meters)
Notes
-----
Maximum gain in theta =0
Polarized along y axis (Jx=0,Jz=0)
"""
defaults = {'param': {'rho1':0.198,
'a1':0.088, # aperture dimension along x
'b1':0.0126, # aperture dimension along y
'fcGHz':28,
'GcmaxdB':19,
'Nx':20,
'Ny':20}}
if 'param' not in kwargs or kwargs['param']=={}:
kwargs['param']=defaults['param']
self.param = kwargs['param']
#H-plane antenna
rho1 = self.param['rho1']
a1 = self.param['a1']
b1 = self.param['b1']
Nx = self.param['Nx']
Ny = self.param['Ny']
fcGHz = self.param['fcGHz']
GcmaxdB = self.param['GcmaxdB']
assert(a1>b1), "a1 should be greater than b1 (see fig 13.1O(a) Balanis"
lbda = 0.3/self.fGHz
k = 2*np.pi/lbda
eta0 = np.sqrt(4*np.pi*1e-7/8.85429e-12)
if self.grid:
# X,Y aperture points (t,p,x,y,f)
X = np.arange(-a1/2,a1/2,a1/(Nx-1))[None,None,:,None,None]
Y = np.arange(-b1/2,b1/2,b1/(Ny-1))[None,None,None,:,None]
# angular domain (theta,phi)
Theta= self.theta[:,None,None,None,None]
Phi = self.phi[None,:,None,None,None]
else:
# X,Y aperture points (r,x,y,f)
X = np.arange(-a1/2,a1/2,a1/(Nx-1))[None,:,None,None]
Y = np.arange(-b1/2,b1/2,b1/(Ny-1))[None,None,:,None]
# angular domain (theta,phi)
Theta= self.theta[:,None,None,None]
Phi= self.phi[:,None,None,None]
#% Aperture field Ea:
# Ea is an approximation of the aperture field:
# (from: C. A. Balanis, Antenna Theoy: Analysis and Design. New York
# Wiley, 1982. ... Section 13.3.1 )
Ea = np.cos(X*np.pi/a1)*np.exp(-.5*1j*k*((X**2)/(rho1)+(Y**2)/(rho1)))
Jy = -Ea/eta0
Mx = Ea
# cosine direction
ctsp = np.cos(Theta)*np.sin(Phi)
cp = np.cos(Phi)
ctcp = np.cos(Theta)*np.cos(Phi)
sp = np.sin(Phi)
stcp = np.sin(Theta)*np.cos(Phi)
stsp = np.sin(Theta)*np.sin(Phi)
# N & L
ejkrrp = np.exp(1j*k*( X*stcp + Y*stsp)) # exp(jk (r.r'))
if self.grid:
N_theta = np.einsum('tpnmf->tpf',Jy*ctsp*ejkrrp) # 12-12 a assuming Jx,Jz=0
N_phi = np.einsum('tpnmf->tpf',Jy*cp*ejkrrp) # 12-12 b ""
L_theta = np.einsum('tpnmf->tpf',Mx*ctcp*ejkrrp) # 12-12 c assuming My,Mz=0
L_phi = np.einsum('tpnmf->tpf',-Mx*sp*ejkrrp) # 12-12 d ""
else:
N_theta = np.einsum('rnmf->rf',Jy*ctsp*ejkrrp) # 12-12 a assuming Jx,Jz=0
N_phi = np.einsum('rnmf->rf',Jy*cp*ejkrrp) # 12-12 b ""
L_theta = np.einsum('rnmf->rf',Mx*ctcp*ejkrrp) # 12-12 c assuming My,Mz=0
L_phi = np.einsum('rnmf->rf',-Mx*sp*ejkrrp) # 12-12 d ""
# Far-Field
Ft = -L_phi - eta0*N_theta # 12-10b p 661
Fp = L_theta - eta0*N_phi # 12-10c p 661
G = Ft*np.conj(Ft)+Fp*np.conj(Fp)
if self.grid:
# Umax : ,f
self.Umax = G.max(axis=(0,1))
Ft = Ft/np.sqrt(self.Umax[None,None,:])
Fp = Fp/np.sqrt(self.Umax[None,None,:])
# centered frequency range
fcc = np.abs(self.fGHz-fcGHz)
idxc = np.where(fcc==np.min(fcc))[0][0]
# Gain @ center frequency
#G = _gain(Ft[:,:,idxc],Fp[:,:,idxc])
G = _gain(Ft,Fp)
# effective half power beamwidth
self.ehpbw, self.hpster = _hpbw(G,self.theta,self.phi)
self.Gfactor = 10**(GcmaxdB/10.)*self.ehpbw[idxc]
Gmax = self.Gfactor/self.ehpbw
Ft = np.sqrt(Gmax[None,None,:])*Ft
Fp = np.sqrt(Gmax[None,None,:])*Fp
else:
##
## Ft (r x f )
## Fp (r x f )
##
Ft = Ft/np.sqrt(self.Umax[None,:])
Fp = Fp/np.sqrt(self.Umax[None,:])
Gmax = self.Gfactor/self.ehpbw
Ft = np.sqrt(Gmax[None,:])*Ft
Fp = np.sqrt(Gmax[None,:])*Fp
return Ft,Fp
def __phorn(self,**kwargs):
""" Horn antenna
http://www.ece.rutgers.edu/~orfanidi/ewa/ch18.pdf (18.2)
Parameters
----------
Half Power Beamwidth (degrees)
"""
defaults = {'param': {'sigma_a':1.2593,
'sigma_b':1.0246,
'A_wl':16,
'B_wl':3,
'fcGHz':28.,
'polar':'x'
}}
if 'param' not in kwargs or kwargs['param']=={}:
kwargs['param']=defaults['param']
self.param = kwargs['param']
deg_to_rad = np.pi/180.
ld_c = 0.3/self.param['fcGHz']
ld = 0.3/self.fGHz
A_wl = kwargs['param']['A_wl']
B_wl = kwargs['param']['B_wl']
A = A_wl*ld_c
B = B_wl*ld_c
sigma_a = kwargs['param']['sigma_a']
sigma_b = kwargs['param']['sigma_b']
#b = kwargs['param']['b']
#Ra = (A/(A-a))*RA
#Rb = (B/(B-b))*RB
#La = np.sqrt(Ra**2+A**2/4)
#Lb = np.sqrt(Rb**2+B**2/4)
#alpha = np.arctan(A/(2*Ra))
#beta = np.arctan(B/(2*Rb))
#Delta_a = A**2/(8*Ra)
#Delta_b = B**2/(8*Rb)
#sigma_a = A/np.sqrt((2*ld*Ra))
#sigma_b = B/np.sqrt((2*ld*Rb))
A_n = A/ld
B_n = B/ld
if self.grid:
# Nth x Nph x Nf
theta = self.theta[:,None,None]
phi = self.phi[None,:,None]
else:
# Ndir x Nf
theta = self.theta[:,None]
phi = self.phi[:,None]
vx = A_n[...,:]*np.sin(theta)*np.cos(phi) # 18.3.4
vy = B_n[...,:]*np.sin(theta)*np.sin(phi) # 18.3.4
F = ((1+np.cos(theta))/2.)*(F1(vx,sigma_a)*F0(vy,sigma_b))
normF = np.abs(F1(0,sigma_a)*F0(0,sigma_b))**2
F_nor = F/np.sqrt(normF)
efficiency = 0.125*normF # 18.4.3
Gmax = efficiency*4*np.pi*A*B/ld**2
F = np.sqrt(Gmax[...,:])*F_nor # Ndir x Nf
# Handling repatition on both vector components
# enforce E.y = 0
if self.param['polar']=='x':
Ft = F/np.sqrt(1+(np.cos(theta)*np.sin(phi)/np.cos(phi))**2)
Fp = (-np.cos(theta)*np.sin(phi)/np.cos(phi))*Ft
nan_bool = np.isnan(Fp)
Fp[nan_bool] = F[nan_bool]
# enforce E.x = 0
if self.param['polar']=='y':
Ft = F/np.sqrt(1+(np.cos(theta)*np.cos(phi)/np.sin(phi))**2)
Fp = (np.cos(theta)*np.cos(phi)/np.sin(phi))*Ft
nan_bool = np.isnan(Fp)
Fp[nan_bool] = F[nan_bool]
return Ft,Fp
def __pazel(self,**kwargs):
""" Azimuth Elevation pattern from file
Parameters
----------
filename : ANT filename
"""
defaults = {'param': {'filename' : '',
'pol':'V'}}
f = open(kwargs['param']['filename'])
Gthetaphi = f.readlines()
f.close()
Gthetaphi = np.array(Gthetaphi).astype('float')
Gaz = Gthetaphi[360:]
Gel = Gthetaphi[:360]
sqGazlin = np.sqrt(pow(10,Gaz/10.))
sqGellin = np.sqrt(pow(10,Gel/10.))
if self.grid :
# Nth x Nph x Nf
if kwargs['param']['pol']=='V':
Ft = np.ones((360,360,1))
Fp = np.zeros((360,360,1))
#Ft[180,:] = sqGazlin[:,None]
#Ft[:,180] = sqGellin[:,None]
Ft = sqGazlin[None,:,None]*sqGellin[:,None,None]
if kwargs['param']['pol']=='H':
Fp = np.ones((360,360,1))
Ft = np.zeros((360,360,1))
Fp = sqGazlin[None,:,None]*sqGellin[:,None,None]
#self.Fp[180,:]= sqGazlin[:,None]
#self.Fp[:,180]= sqGellin[:,None]
if kwargs['param']['pol']=='45':
Fp = np.ones((360,360,1))
Ft = np.ones((360,360,1))
# Azimuth
Ft = (1/sqrt(2))*sqGazlin[None,:,None]*sqGellin[:,None,None]
Fp = (1/sqrt(2))*sqGazlin[None,:,None]*sqGellin[:,None,None]
#self.Fp[180,:]= sqGazlin[:,None]
#self.Fp[180,:]= (1/sqrt(2))*sqGazlin[:,None]
#Ft[180,:]= (1/sqrt(2))*sqGazlin[:,None]
# Elevation
#self.Fp[:,180]= (1/sqrt(2))*sqGellin[:,None]
#Ft[:,180]= (1/sqrt(2))*sqGellin[:,None]
#Ft = sqGthlin[:,None,None]
#self.Fp = sqGphlin[None,:,None]
# Ft = self.sqGmax * ( np.exp(-2.76*argth[:,None,None]) * np.exp(-2.76*argphi[None,:,None]) )
# self.Fp = self.sqGmax * ( np.exp(-2.76*argth[:,None,None]) * np.exp(-2.76*argphi[None,:,None]) )
self.evaluated = True
else:
pass
# #
# # Nd x Nf
# #
# Ft = self.sqGmax * ( np.exp(-2.76*argth) * np.exp(-2.76*argphi) )
# Fp = self.sqGmax * ( np.exp(-2.76*argth) * np.exp(-2.76*argphi) )
# # add frequency axis (Ndir x Nf)
# Ft = np.dot(Ft[:,None],np.ones(len(self.fGHz))[None,:])
# self.Fp = np.dot(Fp[:,None],np.ones(len(self.fGHz))[None,:])
return Ft,Fp
def __pGauss(self,**kwargs):
""" Gauss pattern
Parameters
----------
p0 : phi main lobe (0-2pi)
p3 : 3dB aperture angle
t0 : theta main lobe (0-pi)
t3 : 3dB aperture angle
TODO : finish implementation of polar
"""
defaults = {'param':{'p0' : 0,
't0' : np.pi/2,
'p3' : np.pi/6,
't3' : np.pi/6,
'pol':'th'
}}
if 'param' not in kwargs or kwargs['param']=={}:
kwargs['param']=defaults['param']
self.typ='Gauss'
self.param = kwargs['param']
p0 = self.param['p0']
t0 = self.param['t0']
p3 = self.param['p3']
t3 = self.param['t3']
pol = self.param['pol']
self.Gmax = 16/(t3*p3)
self.GdB = 10*np.log10(self.Gmax)
self.sqGmax = np.sqrt(self.Gmax)
argth = ((self.theta-t0)**2)/t3
e1 = np.mod(self.phi-p0,2*np.pi)
e2 = np.mod(p0-self.phi,2*np.pi)
e = np.array(map(lambda x: min(x[0],x[1]),zip(e1,e2)))
argphi = (e**2)/p3
Nf = len(self.fGHz)
if self.grid :
Nt = len(self.theta)
Np = len(self.phi)
# Nth x Nph x Nf
# Ft = self.sqGmax * ( np.exp(-2.76*argth[:,None,None]) * np.exp(-2.76*argphi[None,:,None]) )
# self.Fp = self.sqGmax * ( np.exp(-2.76*argth[:,None,None]) * np.exp(-2.76*argphi[None,:,None]) )
if pol=='th':
Ft = self.sqGmax * ( np.exp(-2.76*argth[:,None,None]) * np.exp(-2.76*argphi[None,:,None]) *np.ones(len(self.fGHz))[None,None,:])
Fp = np.zeros((Nt,Np,Nf))
if pol=='ph':
Ft = np.zeros((Nt,Np,Nf))
Fp = self.sqGmax * ( np.exp(-2.76*argth[:,None,None]) * np.exp(-2.76*argphi[None,:,None]) *np.ones(len(self.fGHz))[None,None,:])
else:
#
# Nd x Nf
#
Nd = len(self.theta)
assert(len(self.phi)==Nd)
if pol=='th':
Ft = self.sqGmax * ( np.exp(-2.76*argth) * np.exp(-2.76*argphi) )
Fp = np.zeros(Nd)
if pol=='ph':
Ft = np.zeros(Nd)
Fp = self.sqGmax * ( np.exp(-2.76*argth) * np.exp(-2.76*argphi) )
# add frequency axis (Ndir x Nf)
Ft = np.dot(Ft[:,None],np.ones(len(self.fGHz))[None,:])
Fp = np.dot(Fp[:,None],np.ones(len(self.fGHz))[None,:])
return Ft,Fp
def __p3gpp(self,**kwargs):
""" 3GPP pattern
Parameters
----------
thtilt : theta tilt antenna
hpbwv : half power beamwidth v
hpbwh : half power beamwidth h
sllv : side lobe level
fbrh : front back ratio
gm :
pol : h | v | c
if pattern
Ft nth x nphi x nf
Fp nth x nphi x nf
else
Ft ndir x nf (==nth, ==nph)
Fp ndir x nf (==nth, ==nph)
"""
defaults = {'param' : {'thtilt':0, # antenna tilt
'hpbwv' :6.2,# half power beamwidth v
'hpbwh' :65, # half power beamwidth h
'sllv': -18, # side lobe level
'fbrh': 30, # front back ratio
'gm': 18, #
'pol':'p' #t , p , c
}}
if 'param' not in kwargs or kwargs['param']=={}:
kwargs['param'] = defaults['param']
#if 'param' not in kwargs:
#kwargs['param']=defaults['param']
self.typ = "3gpp"
self.param = kwargs['param']
thtilt = self.param['thtilt']
hpbwh = self.param['hpbwh']
hpbwv = self.param['hpbwv']
sllv = self.param['sllv']
fbrh = self.param['fbrh']
gm = self.param['gm']
pol = self.param['pol']
self.pol = pol
# convert radian to degree
phi = self.phi*180/np.pi-180
theta = self.theta*180/np.pi-90
if self.grid:
#Nth x Nph x Nf
GvdB = np.maximum(-12*((theta-thtilt)/hpbwv)**2,sllv)[:,None,None]
GhdB = (-np.minimum(12*(phi/hpbwh)**2,fbrh)+gm)[None,:,None]
GdB = GhdB+GvdB
self.sqG = np.sqrt(10**(GdB/10.))*np.ones(self.nf)[None,None,:]
self.evaluated = True
else:
#Nd x Nf
GvdB = np.maximum(-12*((theta-thtilt)/hpbwv)**2,sllv)[:,None]
GhdB = (-np.minimum(12*(phi/hpbwh)**2,fbrh)+gm)[:,None]
GdB = GhdB+GvdB
self.sqG = np.sqrt(10**(GdB/10.))
# radiating functions are deduced from square root of gain
Ft,Fp = self.radF()
return Ft,Fp
def __pvsh1(self,**kwargs):
""" calculate pattern from VSH Coeffs (shape 1)
Parameters
----------
theta : ndarray (1xNdir)
phi : ndarray (1xNdir)
k : int
frequency index
Returns
-------
Ft , Fp
"""
assert hasattr(self,'C'),'no spherical coefficient'
assert hasattr(self.C.Br,'s1'),'no shape 1 coeff in vsh'
if self.grid:
theta = np.kron(self.theta, np.ones(self.nph))
phi = np.kron(np.ones(self.nth),self.phi)
else:
theta = self.theta
phi = self.phi
Nt = len(theta)
Np = len(phi)
if self.grid:
theta = np.kron(theta, np.ones(Np))
phi = np.kron(np.ones(Nt),phi)
nray = len(theta)
Br = self.C.Br.s1[:, :, :]
Bi = self.C.Bi.s1[:, :, :]
Cr = self.C.Cr.s1[:, :, :]
Ci = self.C.Ci.s1[:, :, :]
L = self.C.Br.L1
M = self.C.Br.M1
# The - sign is necessary to get the good reconstruction
# deduced from observation
# May be it comes from a different definition of theta in SPHEREPACK
ind = index_vsh(L, M)
l = ind[:, 0]
m = ind[:, 1]
#
V, W = VW(l, m, theta, phi)
#
# broadcasting along frequency axis
#
V = np.expand_dims(V,0)
W = np.expand_dims(V,0)
#
# k : frequency axis
# l : axis l (theta)
# m : axis m (phi)
#
Fth = np.eisum('klm,kilm->ki',Br,np.real(V.T)) - \
np.eisum('klm,kilm->ki',Bi,np.imag(V.T)) + \
np.eisum('klm,kilm->ki',Ci,np.real(W.T)) + \
np.eisum('klm,kilm->ki',Cr,np.imag(W.T))
Fph = -np.eisum('klm,kilm->ki',Cr,np.real(V.T)) + \
np.eisum('klm,kilm->ki',Ci,np.imag(V.T)) + \
np.eisum('klm,kilm->ki',Bi,np.real(W.T)) + \
np.eisum('klm,kilm->ki',Br,np.imag(W.T))
# here Nf x Nd
Ft = Fth.transpose()
Fp = Fph.transpose()
# then Nd x Nf
if self.grid:
# Nth x Nph x Nf
Ft = Ft.reshape(self.nth, self.nph,self.nf)
Fp = Fp.reshape(self.nth, self.nph,self.nf)
# last axis should be frequency
assert(Ft.shape[-1]==self.nf)
assert(Fp.shape[-1]==self.nf)
return Ft, Fp
def __pvsh3(self,**kwargs):
""" calculate pattern from vsh3
"""
assert hasattr(self,'C'),'no spherical coefficient'
assert hasattr(self.C.Br,'s3'),'no shape 3 coeff in vsh'
if self.grid:
theta = np.kron(self.theta, np.ones(self.nph))
phi = np.kron(np.ones(self.nth),self.phi)
else:
theta = self.theta
phi = self.phi
Br = self.C.Br.s3
lBr = self.C.Br.ind3[:, 0]
mBr = self.C.Br.ind3[:, 1]
Bi = self.C.Bi.s3
Cr = self.C.Cr.s3
Ci = self.C.Ci.s3
L = lBr.max()
M = mBr.max()
# vector spherical harmonics basis functions
# V, W = VW(lBr, mBr, theta, phi)
V, W = VW(lBr, mBr, theta, phi)
Fth = np.dot(Br, np.real(V.T)) - \
np.dot(Bi, np.imag(V.T)) + \
np.dot(Ci, np.real(W.T)) + \
np.dot(Cr, np.imag(W.T))
Fph = -np.dot(Cr, np.real(V.T)) + \
np.dot(Ci, np.imag(V.T)) + \
np.dot(Bi, np.real(W.T)) + \
np.dot(Br, np.imag(W.T))
# here Nf x Nd
Ft = Fth.transpose()
Fp = Fph.transpose()
# then Nd x Nf
if self.grid:
# Nth x Nph x Nf
Ft = Ft.reshape(self.nth, self.nph,self.nf)
Fp = Fp.reshape(self.nth, self.nph,self.nf)
# last axis should be frequency
assert(Ft.shape[-1]==self.nf)
assert(Fp.shape[-1]==self.nf)
return Ft,Fp
def __psh3(self,**kwargs):
""" calculate pattern for sh3
Parameters
----------
"""
assert hasattr(self,'S'),'no spherical coefficient'
assert hasattr(self.S.Cx,'s3'),'no shape 3 coeff in ssh'
if self.grid:
theta = np.kron(self.theta, np.ones(self.nph))
phi = np.kron(np.ones(self.nth),self.phi)
else:
theta = self.theta
phi = self.phi
cx = self.S.Cx.s3
cy = self.S.Cy.s3
cz = self.S.Cz.s3
lmax = self.S.Cx.lmax
Y ,indx = SSHFunc2(lmax, theta,phi)
k = self.S.Cx.k2
if self.grid:
Ex = np.dot(cx,Y[k])
Ey = np.dot(cy,Y[k])
Ez = np.dot(cz,Y[k])
Fth,Fph = CartToSphere(theta, phi, Ex, Ey,Ez, bfreq = True, pattern = True )
Ft = Fth.transpose()
Fp = Fph.transpose()
Ft = Ft.reshape(self.nth, self.nph,self.nf)
Fp = Fp.reshape(self.nth, self.nph,self.nf)
else:
Ex = np.dot(cx,Y[k])
Ey = np.dot(cy,Y[k])
Ez = np.dot(cz,Y[k])
Fth,Fph = CartToSphere(theta, phi, Ex, Ey,Ez, bfreq = True, pattern = False)
Ft = Fth.transpose()
Fp = Fph.transpose()
assert(Ft.shape[-1]==self.nf)
assert(Fp.shape[-1]==self.nf)
return Ft,Fp
def __pwireplate(self,**kwargs):
""" pattern wire plate antenna
"""
defaults = {'param':{'t0' : 5*np.pi/6,
'GmaxdB': 5
}}
if 'param' not in kwargs or kwargs['param']=={}:
kwargs['param']=defaults['param']
self.typ='wireplate'
self.param = kwargs['param']
t0 = self.param['t0']
GmaxdB = self.param['GmaxdB']
Gmax = pow(GmaxdB/10.,10)
sqGmax = np.sqrt(Gmax)
uth1 = np.where(self.theta < t0)[0]
uth2 = np.where(self.theta >= t0)[0]
p = t0
q = np.pi/2.
A = np.array(([[3*p**2,2*p,1],[p**3,p**2,p],[q**3,q**2,q]]))
Y = np.array(([0,1,1/(1.*sqGmax)]))
self.poly = la.solve(A,Y)
argth1 = np.abs(self.poly[0]*self.theta[uth1]**3
+ self.poly[1]*self.theta[uth1]**2
+ self.poly[2]*self.theta[uth1])
argth2 = -(1/(np.pi-t0)**2)*(self.theta[uth2]-t0)**2+1
argth = np.hstack((argth1,argth2))[::-1]
if self.grid:
Ft = sqGmax * (argth[:,None])
Fp = sqGmax * (argth[:,None])
else:
Fat = sqGmax * argth
Fap = sqGmax * argth
Ft = np.dot(Fat[:,None],np.ones(len(self.fGHz))[None,:])
Fp = np.dot(Fap[:,None],np.ones(len(self.fGHz))[None,:])
return Ft,Fp
def __pcst(self,**kwargs):
""" read antenna in text format
"""
defaults = {'param':{'p' : 2,
'directory':'ant/FF_Results_txt_port_1_2/',
'fGHz':np.arange(2,6.5,0.5)}}
if 'param' not in kwargs or kwargs['param']=={}:
param=defaults['param']
else:
param=kwargs['param']
self.fGHz = param['fGHz']
self.nf = len(self.fGHz)
for f in param['fGHz']:
if ((int(f*10))%10)==0:
_filename1 = 'E_port'+str(param['p'])+'_f'+str(int(f))+'GHz.txt'
_filename2 = 'E_port'+str(param['p'])+'_f'+str(int(f))+'Ghz.txt'
# print 'toto'
else:
_filename1 = 'E_port'+str(param['p'])+'_f'+str(f)+'GHz.txt'
_filename2 = 'E_port'+str(param['p'])+'_f'+str(f)+'Ghz.txt'
filename1 = pyu.getlong(_filename1, param['directory'])
filename2 = pyu.getlong(_filename2, param['directory'])
try:
df = pd.read_csv(filename1,sep=';')
except:
df = pd.read_csv(filename2,sep=';')
columns = df.columns
theta = (df[columns[0]]*np.pi/180).values.reshape(72,37)
phi = (df[columns[1]]*np.pi/180).values.reshape(72,37)
modGrlzdB = df[columns[2]]
mFt = df[columns[3]]
pFt = df[columns[4]]
mFp = df[columns[5]]
pFp = df[columns[6]]
ratiodB = df[columns[7]]
Ft = (10**(mFt/20)*np.exp(1j*pFt*np.pi/180)).values.reshape(72,37)
Fp = (10**(mFp/20)*np.exp(1j*pFp*np.pi/180)).values.reshape(72,37)
Ft = Ft.swapaxes(0,1)
Fp = Fp.swapaxes(0,1)
try:
tFt=np.concatenate((tFt,Ft[...,None]),axis=2)
tFp=np.concatenate((tFp,Fp[...,None]),axis=2)
except:
tFt=Ft[...,None]
tFp=Fp[...,None]
self.phi = phi[:,0]
self.theta = theta[0,:]
self.nth = len(self.theta)
self.nph = len(self.phi)
Ft = tFt
Fp = tFp
return Ft,Fp
def __pHertz(self,**kwargs):
""" Hertz dipole
"""
defaults = {'param':{'le':np.array([0,0,1])}}
if 'param' not in kwargs or kwargs['param']=={}:
kwargs['param']=defaults['param']
#k = 2*np.pi*self.fGHz[None,None,None,:]/0.3
param=kwargs['param']
if self.grid:
le = param['le'][:,None,None]
xr = np.sin(self.theta)[None,:,None]*np.cos(self.phi)[None,None,:]
yr = np.sin(self.theta)[None,:,None]*np.sin(self.phi)[None,None,:]
zr = np.cos(self.theta)[None,:,None]*np.ones(len(self.phi))[None,None,:]
r = np.concatenate((xr,yr,zr),axis=0)
xp = -np.sin(self.phi)[None,None,:]*np.ones(len(self.theta))[None,:,None]
yp = np.cos(self.phi)[None,None,:]*np.ones(len(self.theta))[None,:,None]
zp = np.zeros(len(self.phi))[None,None,:]*np.ones(len(self.theta))[None,:,None]
ph = np.concatenate((xp,yp,zp),axis=0)
xt = np.cos(self.theta)[None,:,None]*np.cos(self.phi)[None,None,:]
yt = np.cos(self.theta)[None,:,None]*np.sin(self.phi)[None,None,:]
zt = -np.sin(self.theta)[None,:,None]*np.ones(len(self.phi))[None,None,:]
th = np.concatenate((xt,yt,zt),axis=0)
vec = le - np.einsum('kij,kij->ij',le,r)[None,...]*r
#G = 1j*30*k*vec
Ft = np.sqrt(3/2.)*np.einsum('kij,kij->ij',vec,th)[...,None]
Fp = np.sqrt(3/2.)*np.einsum('kij,kij->ij',vec,ph)[...,None]
else:
le = param['le'][:,None]
xr = np.sin(self.theta)*np.cos(self.phi)
yr = np.sin(self.theta)*np.sin(self.phi)
zr = np.cos(self.theta)
r = np.concatenate((xr,yr,zr),axis=0)
xp = -np.sin(self.phi)
yp = np.cos(self.phi)
zp = np.zeros(len(self.phi))
ph = np.concatenate((xp,yp,zp),axis=0)
xt = np.cos(self.theta)*np.cos(self.phi)
yt = np.cos(self.theta)*np.sin(self.phi)
zt = -np.sin(self.theta)
th = np.concatenate((xt,yt,zt),axis=0)
vec = le - np.einsum('ki,ki->i',le,r)[None,...]*r
#G = 1j*30*k*vec
Ft = np.sqrt(3/2.)*np.einsum('ki,ki->i',vec,th)[...,None]
Fp = np.sqrt(3/2.)*np.einsum('ki,ki->i',vec,ph)[...,None]
return Ft,Fp
def __pHuygens(self,**kwargs):
""" Huygens source
param : dict
le : direction of electric current
n : normal to aperture
"""
defaults = {'param':{'le':np.array([0,0,1]),
'n':np.array([1,0,0])}}
if 'param' not in kwargs or kwargs['param']=={}:
kwargs['param']=defaults['param']
#k = 2*np.pi*self.fGHz[None,None,None,:]/0.3
param=kwargs['param']
if self.grid:
le = param['le'][:,None,None]
n = param['n'][:,None,None]
xr = np.sin(self.theta)[None,:,None]*np.cos(self.phi)[None,None,:]
yr = np.sin(self.theta)[None,:,None]*np.sin(self.phi)[None,None,:]
zr = np.cos(self.theta)[None,:,None]*np.ones(len(self.phi))[None,None,:]
r = np.concatenate((xr,yr,zr),axis=0)
xp = -np.sin(self.phi)[None,None,:]*np.ones(len(self.theta))[None,:,None]
yp = np.cos(self.phi)[None,None,:]*np.ones(len(self.theta))[None,:,None]
zp = np.zeros(len(self.phi))[None,None,:]*np.ones(len(self.theta))[None,:,None]
ph = np.concatenate((xp,yp,zp),axis=0)
xt = np.cos(self.theta)[None,:,None]*np.cos(self.phi)[None,None,:]
yt = np.cos(self.theta)[None,:,None]*np.sin(self.phi)[None,None,:]
zt = -np.sin(self.theta)[None,:,None]*np.ones(len(self.phi))[None,None,:]
th = np.concatenate((xt,yt,zt),axis=0)
vec1 = le - np.einsum('kij,kij->ij',le,r)[None,...]*r
cro1 = np.cross(le,n,axisa=0,axisb=0,axisc=0)
vec2 = np.cross(cro1,r,axisa=0,axisb=0,axisc=0)
vec = vec1-vec2
#G = 1j*30*k*vec
Ft = np.sqrt(3/4.)*np.einsum('kij,kij->ij',vec,th)[...,None]
Fp = np.sqrt(3/4.)*np.einsum('kij,kij->ij',vec,ph)[...,None]
#Ft = np.einsum('kij,kij->ij',vec,th)[...,None]
#Fp = np.einsum('kij,kij->ij',vec,ph)[...,None]
else:
le = param['le'][:,None]
xr = np.sin(self.theta)*np.cos(self.phi)
yr = np.sin(self.theta)*np.sin(self.phi)
zr = np.cos(self.theta)
r = np.concatenate((xr,yr,zr),axis=0)
xp = -np.sin(self.phi)
yp = np.cos(self.phi)
zp = np.zeros(len(self.phi))
ph = np.concatenate((xp,yp,zp),axis=0)
xt = np.cos(self.theta)*np.cos(self.phi)
yt = np.cos(self.theta)*np.sin(self.phi)
zt = -np.sin(self.theta)
th = np.concatenate((xt,yt,zt),axis=0)
vec1 = le - np.einsum('ki,ki->i',le,r)[None,...]*r
cro1 = np.cross(le,n,axisa=0,axisb=0,axisc=0)
vec2 = np.cross(cro1,r,axisa=0,axisb=0,axisc=0)
vec = vec1-vec2
#G = 1j*30*k*vec
Ft = np.sqrt(3)*np.einsum('ki,ki->i',vec,th)[...,None]
Fp = np.sqrt(3)*np.einsum('ki,ki->i',vec,ph)[...,None]
return Ft,Fp
def __pArray(self,**kwargs):
""" Array factor
Parameters
----------
Sc : np.array
coupling S matrix
Notes
-----
Nd : Number of directions
Np : Number of points (antenna elements)
Nf : Number of frequency
Nb : Number of beams
"""
defaults = {'param':{'Sc':[]}}
if 'param' not in kwargs or kwargs['param']=={}:
kwargs['param']=defaults['param']
self.param = kwargs['param']
lamda = (0.3/self.fGHz)
k = 2*np.pi/lamda
if self.grid:
sx = np.sin(self.theta[:,None])*np.cos(self.phi[None,:]) # Ntheta x Nphi
sy = np.sin(self.theta[:,None])*np.sin(self.phi[None,:]) # Ntheta x Nphi
sz = np.cos(self.theta[:,None])*np.ones(len(self.phi))[None,:] # Ntheta x Nphi
sx = sx.reshape(self.nth*self.nph)
sy = sy.reshape(self.nth*self.nph)
sz = sz.reshape(self.nth*self.nph)
else:
sx = np.sin(self.theta)*np.cos(self.phi) # ,Nd
sy = np.sin(self.theta)*np.sin(self.phi) # ,Nd
sz = np.cos(self.theta) # ,Nd
self.s = np.vstack((sx,sy,sz)).T # Nd x 3
#
# F = exp(+jk s.p)
#
lshp = np.array(self.p.shape)
if len(lshp)>2:
Np = np.prod(lshp[1:])
p = self.p.reshape(3,Np)
else:
p = self.p
Np = p.shape[1]
self.Sc = self.param['Sc']
if self.Sc==[]:
# Sc : Np x Np x Nf
self.Sc = np.eye(Np)[...,None]
#Sc2 = np.random.rand(Np,Np)[...,None]
#pdb.set_trace()
#
# Get the weights
#
# w : b x a x f
lshw = np.array(self.w.shape)
if len(lshw)>2:
Np2 = np.prod(lshw[0:-1])
assert(Np2==Np)
w = self.w.reshape(Np,lshw[-1])
else:
w = self.w
# s : Nd x 3
# p : 3 x Np
#
# sdotp : Nd x Np
sdotp = np.dot(self.s,p) # s . p
for a in self.la:
if not self.grid:
a.eval(grid=self.grid,ph=self.phi,th=self.theta)
else:
a.eval(grid=self.grid)
# aFt : Nt x Np x Nf |Nd x Nf
# aFp : Nt x Np x Nf |Nd x Nf
aFt = a.Ft
aFp = a.Fp
#
# Force conversion to Nd x Nf
#
shF = aFt.shape
aFt = aFt.reshape(np.prod(shF[0:-1]),shF[-1])
aFp = aFp.reshape(np.prod(shF[0:-1]),shF[-1])
#
# Same pattern on each point
#
aFt = aFt[:,None,:]
aFp = aFp[:,None,:]
#
# Nf : frequency
# Nd : direction
# Np : points or array antenna element position
# Nb : number of beams
#
# w : Np x Nf
# Sc : Np x Np x Nf
#
#
# w' = w.Sc Np x Nf
#
# Coupling is implemented here
# Rules : The repeated index k is the common dimension of the product
# w : Np(k) x Nf(i)
# Sc : Np(k) x Np(m) x Nf(i)
# wp : Np(m) x Nf(i)
wp = np.einsum('ki,kmi->mi',w,self.Sc)
# add direction axis (=0) in w
#if len(.w.shape)==3:
# self.wp = self.wp[None,:,:,:]
# aFT : Nd x Np x Nf
# E : Nd x Np x Nf
E = np.exp(1j*k[None,None,:]*sdotp[:,:,None])
#
# wp : Np x Nf
# Fp : Nd x Np x Nf
# Ft : Nd x Np x Nf
#
Ft = wp[None,...]*aFt*E
Fp = wp[None,...]*aFp*E
if self.grid:
#
# Integrate over the Np points (axis =1)
# only if self.grid
# Fp : Nd x Nf
# Ft : Nd x Nf
#
Ft = np.sum(Ft,axis=1)
Fp = np.sum(Fp,axis=1)
sh = Ft.shape
Ft = Ft.reshape(self.nth,self.nph,sh[1])
Fp = Fp.reshape(self.nth,self.nph,sh[1])
return Ft,Fp
def radF(self):
""" evaluate radiation fonction w.r.t polarization
self.pol : 't' : theta , 'p' : phi n, 'c' : circular
"""
assert self.pol in ['t','p','c']
if self.pol=='p':
Fp = self.sqG
if len(self.sqG.shape)==3:
Ft = np.array([0])*np.ones(len(self.fGHz))[None,None,:]
else:
Ft = np.array([0])*np.ones(len(self.fGHz))[None,:]
if self.pol=='t':
if len(self.sqG.shape)==3:
Fp = np.array([0])*np.ones(len(self.fGHz))[None,None,:]
else:
Fp = np.array([0])*np.ones(len(self.fGHz))[None,:]
Ft = self.sqG
if self.pol=='c':
Fp = (1./np.sqrt(2))*self.sqG
Ft = (1j/np.sqrt(2))*self.sqG
return Ft,Fp
def gain(self):
""" calculates antenna gain
Returns
-------
self.G : np.array(Nt,Np,Nf) dtype:float
linear gain
or np.array(Nr,Nf)
self.sqG : np.array(Nt,Np,Nf) dtype:float
linear sqare root of gain
or np.array(Nr,Nf)
self.efficiency : np.array (,Nf) dtype:float
efficiency
self.hpster : np.array (,Nf) dtype:float
half power solid angle : 1 ~ 4pi steradian
self.ehpbw : np.array (,Nf) dtyp:float
equivalent half power beamwidth (radians)
Notes
-----
.. math:: G(\theta,phi) = |F_{\\theta}|^2 + |F_{\\phi}|^2
(
"""
self.G = np.real( self.Fp * np.conj(self.Fp)
+ self.Ft * np.conj(self.Ft) )
if self.grid:
dt = self.theta[1]-self.theta[0]
dp = self.phi[1]-self.phi[0]
Nt = len(self.theta)
Np = len(self.phi)
Gs = self.G*np.sin(self.theta)[:,None,None]*np.ones(Np)[None,:,None]
self.efficiency = np.sum(np.sum(Gs,axis=0),axis=0)*dt*dp/(4*np.pi)
self.sqG = np.sqrt(self.G)
self.GdB = 10*np.log10(self.G)
# GdBmax (,Nf)
# Get direction of Gmax and get the polarisation state in that direction
#
self.GdBmax = np.max(np.max(self.GdB,axis=0),axis=0)
self.umax = np.array(np.where(self.GdB==self.GdBmax))[:,0]
self.theta_max = self.theta[self.umax[0]]
self.phi_max = self.phi[self.umax[1]]
M = geu.SphericalBasis(np.array([[self.theta_max,self.phi_max]]))
self.sl = M[:,2].squeeze()
uth = M[:,0]
uph = M[:,1]
el = self.Ft[tuple(self.umax)]*uth + self.Fp[tuple(self.umax)]*uph
eln = el/np.linalg.norm(el)
self.el = np.abs(eln.squeeze())
self.hl = np.cross(self.sl,self.el)
#assert((self.efficiency<1.0).all()),pdb.set_trace()
self.hpster=np.zeros(len(self.fGHz))
self.ehpbw=np.zeros(len(self.fGHz))
for k in range(len(self.fGHz)):
U = np.zeros((Nt,Np))
A = self.GdB[:,:,k]*np.ones(Nt)[:,None]*np.ones(Np)[None,:]
u = np.where(A>(self.GdBmax[k]-3))
U[u] = 1
V = U*np.sin(self.theta)[:,None]
self.hpster[k] = np.sum(V)*dt*dp/(4*np.pi)
self.ehpbw[k] = np.arccos(1-2*self.hpster[k])
else:
self.sqG = np.sqrt(self.G)
self.GdB = 10*np.log10(self.G)
def plotG(self,**kwargs):
""" antenna plot gain in 2D
Parameters
----------
fGHz : frequency
plan : 'theta' | 'phi' depending on the selected plan to be displayed
angdeg : phi or theta in degrees, if plan=='phi' it corresponds to theta
GmaxdB : max gain to be displayed
polar : boolean
Returns
-------
fig
ax
Examples
--------
.. plot::
:include-source:
>>> import matplotlib.pyplot as plt
>>> from pylayers.antprop.antenna import *
>>> A = Antenna('defant.vsh3')
>>> fig,ax = A.plotG(fGHz=[2,3,4],plan='theta',angdeg=0)
>>> fig,ax = A.plotG(fGHz=[2,3,4],plan='phi',angdeg=90)
"""
if not self.evaluated:
self.eval(pattern=True)
dtr = np.pi/180.
defaults = {'fGHz' : [],
'dyn' : 8 ,
'plan': 'phi',
'angdeg' : 90,
'legend':True,
'GmaxdB':20,
'polar':True,
'topos':False,
'source':'satimo',
'show':True,
'mode':'index',
'color':'black',
'u':0,
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
args = {}
for k in kwargs:
if k not in defaults:
args[k] = kwargs[k]
if 'fig' not in kwargs:
fig = plt.figure(figsize=(8, 8))
else:
fig = kwargs['fig']
if 'ax' not in kwargs:
#ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], polar=True, facecolor='#d5de9c')
if kwargs['polar']:
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], polar=True )
else:
ax = fig.add_subplot(111)
else:
ax = kwargs['ax']
u = kwargs['u']
rc('grid', color='#316931', linewidth=1, linestyle='-')
rc('xtick', labelsize=15)
rc('ytick', labelsize=15)
DyndB = kwargs['dyn'] * 5
GmindB = kwargs['GmaxdB'] - DyndB
#print "DyndB",DyndB
#print "GmindB",GmindB
# force square figure and square axes looks better for polar, IMO
t1 = np.arange(5, DyndB + 5, 5)
t2 = np.arange(GmindB + 5, kwargs['GmaxdB'] + 5, 5)
col = ['k', 'r', 'g', 'b', 'm', 'c', 'y']
cpt = 0
#if len(self.fGHz) > 1 :
# fstep = self.fGHz[1]-self.fGHz[0]
#else :
# fstep = np.array((abs(self.fGHz-kwargs['fGHz'][0])+1))
#dtheta = self.theta[1,0]-self.theta[0,0]
#dphi = self.phi[0,1]-self.phi[0,0]
dtheta = self.theta[1]-self.theta[0]
dphi = self.phi[1]-self.phi[0]
if kwargs['fGHz']==[]:
lfreq = [self.fGHz[0]]
else:
lfreq = kwargs['fGHz']
for f in lfreq:
df = abs(self.fGHz-f)
ik0 = np.where(df==min(df))
ik = ik0[0][0]
#ik=0
chaine = 'f = %3.2f GHz' %(self.fGHz[ik])
# all theta
if kwargs['plan']=='theta':
itheta = np.arange(self.nth)
iphi1 = np.where(abs(self.phi-kwargs['angdeg']*dtr)<dphi)[0][0]
Np = self.nph
# 0 <= theta <= pi/2
u1 = np.where((self.theta <= np.pi / 2.) & (self.theta >= 0))[0]
# 0 < theta < pi
u2 = np.arange(self.nth)
# pi/2 < theta <= pi
u3 = np.nonzero((self.theta <= np.pi) & ( self.theta > np.pi / 2))[0]
#
# handle broadcasted axis =1 --> index 0
shsqG = self.sqG.shape
if shsqG[0]==1:
u1 = 0
u2 = 0
u3 = 0
if shsqG[1]==1:
iphi1 = 0
iphi2 = 0
if len(shsqG)==3: # if only one frequency point
if shsqG[2]==1:
ik = 0
else:
if shsqG[3]==1:
ik = 0
# handle parity
if np.mod(Np, 2) == 0:
iphi2 = np.mod(iphi1 + Np / 2, Np)
else:
iphi2 = np.mod(iphi1 + (Np - 1) / 2, Np)
if len(shsqG)==3:
arg1 = (u1,iphi1,ik)
arg2 = (u2,iphi2,ik)
arg3 = (u3,iphi1,ik)
else:
if shsqG[3]==1:
u = 0
arg1 = (u1,iphi1,u,ik)
arg2 = (u2,iphi2,u,ik)
arg3 = (u3,iphi1,u,ik)
# polar diagram
#pdb.set_trace()
if kwargs['polar']:
if kwargs['source']=='satimo':
r1 = -GmindB + 20 * np.log10( self.sqG[arg1]+1e-12)
r2 = -GmindB + 20 * np.log10( self.sqG[arg2]+1e-12)
r3 = -GmindB + 20 * np.log10( self.sqG[arg3]+1e-12)
#print max(r1)+GmindB
#print max(r2)+GmindB
#print max(r3)+GmindB
if kwargs['source']=='cst':
r1 = -GmindB + 20 * np.log10( self.sqG[arg1]/np.sqrt(30)+1e-12)
r2 = -GmindB + 20 * np.log10( self.sqG[arg2]/np.sqrt(30)+1e-12)
r3 = -GmindB + 20 * np.log10( self.sqG[arg3]/np.sqrt(30)+1e-12)
if type(r1)!= np.ndarray:
r1 = np.array([r1])*np.ones(len(self.phi))
if type(r2)!= np.ndarray:
r2 = np.array([r2])*np.ones(len(self.phi))
if type(r3)!= np.ndarray:
r3 = np.array([r3])*np.ones(len(self.phi))
negr1 = np.nonzero(r1 < 0)
negr2 = np.nonzero(r2 < 0)
negr3 = np.nonzero(r3 < 0)
r1[negr1[0]] = 0
r2[negr2[0]] = 0
r3[negr3[0]] = 0
r = np.hstack((r1[::-1], r2, r3[::-1], r1[-1]))
a1 = np.arange(0, 360, 30)
a2 = [90, 60, 30, 0, 330, 300, 270, 240, 210, 180, 150, 120]
rline2, rtext2 = plt.thetagrids(a1, a2)
# linear diagram
else:
r1 = 20 * np.log10( self.sqG[arg1]+1e-12)
r2 = 20 * np.log10( self.sqG[arg2]+1e-12)
r3 = 20 * np.log10( self.sqG[arg3]+1e-12)
r = np.hstack((r1[::-1], r2, r3[::-1], r1[-1]))
# angular basis for phi
angle = np.linspace(0, 2 * np.pi, len(r), endpoint=True)
plt.title(u'$\\theta$ plane')
if kwargs['plan']=='phi':
iphi = np.arange(self.nph)
itheta = np.where(abs(self.theta-kwargs['angdeg']*dtr)<dtheta)[0][0]
angle = self.phi[iphi]
if len(self.sqG.shape)==3:
arg = [itheta,iphi,ik]
else:
arg = [itheta,iphi,u,ik]
if kwargs['polar']:
if np.prod(self.sqG.shape)!=1:
r = -GmindB + 20 * np.log10(self.sqG[arg])
neg = np.nonzero(r < 0)
r[neg] = 0
else:
r = -GmindB+ 20*np.log10(self.sqG[0,0,0]*np.ones(np.shape(angle)))
# plt.title(u'H plane - $\phi$ degrees')
a1 = np.arange(0, 360, 30)
a2 = [0, 30, 60, 90, 120 , 150 , 180 , 210, 240 , 300 , 330]
#rline2, rtext2 = plt.thetagrids(a1, a2)
else:
r = 20 * np.log10(self.sqG[arg])
plt.title(u'$\\phi$ plane ')
# actual plotting
if len(lfreq)>1:
ax.plot(angle, r, color=col[cpt], lw=2, label=chaine)
else:
ax.plot(angle, r, color=kwargs['color'], lw=2, label=chaine)
cpt = cpt + 1
if kwargs['polar']:
rline1, rtext1 = plt.rgrids(t1, t2)
#ax.set_rmax(t1[-1])
#ax.set_rmin(t1[0])
if kwargs['legend']:
ax.legend()
if kwargs['show']:
plt.ion()
plt.show()
return(fig,ax)
class Antenna(Pattern):
""" Antenna
Attributes
----------
name : Antenna name
nf : number of frequency
nth : number of theta
nph : number of phi
Ft : Normalized Ftheta (ntheta,nphi,nf)
Fp : Normalized Fphi (ntheta,nphi,nf)
sqG : square root of gain (ntheta,nphi,nf)
theta : theta base 1 x ntheta
phi : phi base 1 x phi
C : VSH Coefficients
Methods
-------
info : Display information about antenna
vsh : calculates Vector Spherical Harmonics
show3 : Geomview diagram
plot3d : 3D diagram plotting using matplotlib toolkit
Antenna trx file can be stored in various order
natural : HFSS
ncp : near filed chamber
It is important when initializing an antenna object
to be aware of the typ of trx file
.trx (ASCII Vectorial antenna Pattern)
F Phi Theta Fphi Ftheta
"""
def __init__(self,typ='Omni',**kwargs):
""" class constructor
Parameters
----------
typ : 'Omni','Gauss','WirePlate','3GPP','atoll'
_filename : string
antenna file name
directory : str
antenna subdirectory of the current project
the file is seek in the $BASENAME/ant directory
nf : integer
number of frequency
ntheta : integer
number of theta (default 181)
nphi : integer
number of phi (default 90)
source : string
source of data { 'satimo' | 'cst' | 'hfss' }
Notes
-----
The supported data formats for storing antenna patterns are
'mat': Matlab File
'vsh2': unthresholded vector spherical coefficients
'vsh3': thresholded vector spherical cpoefficients
'atoll': Atoll antenna file format
'trx' : Satimo NFC raw data
'trx1' : Satimo NFC raw data (deprecated)
A = Antenna('my_antenna.mat')
"""
defaults = {'directory': 'ant',
'source':'satimo',
'ntheta':90,
'nphi':181,
'L':90, # L max
'param':{}
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
if 'fGHz' in kwargs:
if type(kwargs['fGHz'])==np.ndarray:
self.fGHz=kwargs['fGHz']
else:
self.fGHz=np.array([kwargs['fGHz']])
#mayavi selection
self._is_selected=False
self.source = kwargs['source']
self.param = kwargs['param']
#super(Antenna,self).__init__()
#Pattern.__init__(self)
#
# if typ string has an extension it is a file
#
if isinstance(typ,str):
AntennaName,Extension = os.path.splitext(typ)
self.ext = Extension[1:]
if self.ext=='':
self.fromfile = False
else:
self.fromfile = True
else:
self.fromfile = True
self.tau = 0
self.evaluated = False
#determine if pattern for all theta/phi is constructed
self.full_evaluated = False
if self.fromfile:
if isinstance(typ,str):
self._filename = typ
if self.ext == 'vsh3':
self.typ='vsh3'
self.loadvsh3()
if self.ext == 'vsh2':
self.typ='vsh2'
self.loadvsh2()
if self.ext == 'sh3':
self.typ='sh3'
self.loadsh3()
if self.ext == 'sh2':
self.typ='sh2'
self.loadsh2()
if self.ext == 'trx1':
self.typ='trx'
self.load_trx(kwargs['directory'],self.nf,self.nth,self.nph)
if self.ext == 'trx':
self.typ='trx'
self.loadtrx(kwargs['directory'])
if self.ext == 'mat':
self.typ='mat'
self.loadmat(kwargs['directory'])
if self.ext == 'cst':
self.typ='cst'
if self.ext == 'txt':
self.typ='atoll'
self.load_atoll(kwargs['directory'])
elif isinstance(typ,list):
self._filename = typ
self.ext='hfss'
self.loadhfss(typ, self.nth, self.nph)
else:
self.typ=typ
self._filename=typ
if self.typ=='vsh3':
self.initvsh()
else:
self.eval()
def initvsh(self,lmax=45):
""" Initialize a void vsh structure
Parameters
----------
fGHz : array
lmax : int
level max
"""
nf = len(self.fGHz)
Br = 1j * np.zeros((nf, lmax, lmax-1))
Bi = 1j * np.zeros((nf, lmax, lmax-1))
Cr = 1j * np.zeros((nf, lmax, lmax-1))
Ci = 1j * np.zeros((nf, lmax, lmax-1))
Br = VCoeff(typ='s1', fmin=self.fGHz[0], fmax=self.fGHz[-1], data=Br)
Bi = VCoeff(typ='s1', fmin=self.fGHz[0], fmax=self.fGHz[-1], data=Bi)
Cr = VCoeff(typ='s1', fmin=self.fGHz[0], fmax=self.fGHz[-1], data=Cr)
Ci = VCoeff(typ='s1', fmin=self.fGHz[0], fmax=self.fGHz[-1], data=Ci)
self.C = VSHCoeff(Br, Bi, Cr, Ci)
def ls(self, typ='vsh3'):
""" list the antenna files in antenna project directory
Parameters
----------
typ : string optional
{'mat'|'trx'|'vsh2'|'sh2'|'vsh3'|'sh3'}
Returns
-------
lfile_s : list
sorted list of all the .str file of strdir
"""
if typ=='vsh3':
pathname = pstruc['DIRANT'] + '/*.' + typ
if typ=='sh3':
pathname = pstruc['DIRANT'] + '/*.' + typ
if typ=='mat':
pathname = pstruc['DIRANT'] + '/*.' + typ
if typ=='trx':
pathname = pstruc['DIRANT'] + '/*.' + typ
lfile_l = glob.glob(basename+'/'+pathname)
lfile_s = []
for fi in lfile_l:
fis = pyu.getshort(fi)
lfile_s.append(fis)
lfile_s.sort()
return lfile_s
def photo(self,directory=''):
""" show a picture of the antenna
Parameters
----------
directory : string
"""
if directory == '':
directory = os.path.join('ant','UWBAN','PhotosVideos')
_filename = 'IMG_'+self.PhotoFile.split('-')[1]+'.JPG'
filename = pyu.getlong(_filename,directory)
if sys.version_info.major==2:
I = Image.open(filename)
else:
I = image.open(filename)
I.show()
def load_atoll(self,directory="ant"):
""" load antenna from Atoll file
Atoll format provides Antenna gain given for the horizontal and vertical plane
for different frequencies and different tilt values
Parameters
----------
directory : string
The dictionnary attol is created
"""
_filemat = self._filename
fileatoll = pyu.getlong(_filemat, directory)
fd = open(fileatoll)
lis = fd.readlines()
tab = []
for li in lis:
lispl= li.split('\t')
if (lispl[0]!=''):
tab.append(lispl)
deg_to_rad = np.pi/180.
lbs_to_kg = 0.45359237
columns = tab[0]
#pdb.set_trace()
for k in np.arange(len(tab)-1):
df = pd.DataFrame([tab[k+1]],columns=columns)
try:
dff=dff.append(df)
except:
dff= df
self.raw = dff
dff = dff.iloc[:,[0,8,9,10,2,5,7,14,11,16,17,13,6,12]]
#dff = df['Name','Gain (dBi)','FMin','FMax','FREQUENCY','Pattern','V_WIDTH','H_WIDTH','DIMENSIONS HxWxD (INCHES)','WEIGHT (LBS)']
dff.columns = ['Name','Fmin','Fmax','F','Gmax','G','Hpbw','H_width','V_width','HxWxD','Weight','Tilt','Etilt','Ftob']
dff=dff.apply(lambda x :pd.to_numeric(x,errors='ignore'))
#
# Parse polarization in the field name
#
upolarp45 = ['(+45)' in x for x in dff['Name']]
upolarm45 = ['(-45)' in x for x in dff['Name']]
if (sum(upolarp45)>0):
dff.loc[upolarp45,'Polar']=45
if (sum(upolarm45)>0):
dff.loc[upolarm45,'Polar']=-45
atoll = {}
dfband = dff.groupby(['Fmin'])
for b in dfband:
keyband = str(b[0])+'-'+str(b[1]['Fmax'].values[0])
atoll[keyband]={} # band
dfpol = b[1].groupby(['Polar'])
for p in dfpol:
atoll[keyband][p[0]] = {} # polar
dftilt = p[1].groupby(['Tilt'])
Ghor = np.empty((360,1)) # angle , tilt , frequency
Gver = np.empty((360,1)) # angle ,
ct = 0
tilt = []
for t in dftilt:
dffreq = t[1].groupby(['F'])
ct+=1
cf=0
tilt.append(t[0])
freq = []
for f in dffreq:
freq.append(f[0])
cf+=1
if len(f[1])==1:
df = f[1]
else:
df = f[1].iloc[0:1]
Gmax = df['Gmax'].values
str1 = df.loc[:,'G'].values[0].replace(' ',' ')
lstr = str1.split(' ')
Pattern = [ eval(x) for x in lstr[0:-1]]
# 4 fist field / # of points
Nd,db,dc,Np = Pattern[0:4]
#print(Nd,b,c,Np)
tmp = np.array(Pattern[4:4+2*Np]).reshape(Np,2)
ah = tmp[:,0]
ghor = Gmax-tmp[:,1]
# 4 fist field / # of points
da,db,dc,dd = Pattern[4+2*Np:4+2*Np+4]
#pdb.set_trace()
#print a,b,c,d
tmp = np.array(Pattern[4+2*Np+4:]).reshape(dc,2)
gver = Gmax-tmp[:,0]
av = tmp[:,1]
try:
Ghor = np.hstack((Ghor,ghor[:,None]))
Gver = np.hstack((Gver,gver[:,None]))
except:
pdb.set_trace()
Ghor = np.delete(Ghor,0,1)
Gver = np.delete(Gver,0,1)
atoll[keyband][p[0]]['hor'] = Ghor.reshape(360,ct,cf)
atoll[keyband][p[0]]['ver'] = Gver.reshape(360,ct,cf)
atoll[keyband][p[0]]['tilt'] = np.array(tilt)
atoll[keyband][p[0]]['freq'] = np.array(freq)
self.atoll = atoll
# Gmax = eval(self.df['Gain (dBi)'].values[0])
#fig = plt.figure()
#ax =plt.gca(projection='polar')
#ax =plt.gca()
#ax.plot(H2[:,1]*deg_to_rad,Gain-H2[:,0],'r',label='vertical',linewidth=2)
#ax.plot(H1[:,0]*deg_to_rad,Gain-H1[:,1],'b',label='horizontal',linewidth=2)
#ax.set_rmin(-30)
#plt.title(dir1+'/'+filename+' Gain : '+df['Gain (dBi)'].values[0])
#BXD-634X638XCF-EDIN.txt
#BXD-636X638XCF-EDIN.txt
def loadmat(self, directory="ant"):
""" load an antenna stored in a mat file
Parameters
----------
directory : str , optional
default 'ant'
Examples
--------
Read an Antenna file in UWBAN directory and plot a polar plot
.. plot::
:include-source:
>>> import matplotlib.pyplot as plt
>>> from pylayers.antprop.antenna import *
>>> A = Antenna('S1R1.mat',directory='ant/UWBAN/Matfile')
>>> f,a = A.plotG(plan='theta',angdeg=0)
>>> f,a = A.plotG(plan='phi',angdeg=90,fig=f,ax=a)
>>> txt = plt.title('S1R1 antenna : st loadmat')
>>> plt.show()
"""
_filemat = self._filename
filemat = pyu.getlong(_filemat, directory)
d = io.loadmat(filemat, squeeze_me=True, struct_as_record=False)
ext = _filemat.replace('.mat', '')
d = d[ext]
#
#
#
self.typ = 'mat'
self.Date = str(d.Date)
self.Notes = str(d.Notes)
self.PhotoFile = str(d.PhotoFile)
self.Serie = eval(str(d.Serie))
self.Run = eval(str(d.Run))
self.DataFile = str(d.DataFile)
self.StartTime = str(d.StartTime)
self.AntennaName = str(d.AntennaName)
self.fGHz = d.freq/1.e9
self.theta = d.theta
self.phi = d.phi
self.Ft = d.Ftheta
self.Fp = d.Fphi
self.Fp = self.Fp.swapaxes(0, 2)
self.Fp = self.Fp.swapaxes(0, 1)
self.Ft = self.Ft.swapaxes(0, 2)
self.Ft = self.Ft.swapaxes(0, 1)
Gr = np.real(self.Fp * np.conj(self.Fp) + \
self.Ft * np.conj(self.Ft))
self.sqG = np.sqrt(Gr)
self.nth = len(self.theta)
self.nph = len(self.phi)
if type(self.fGHz) == float:
self.nf = 1
else:
self.nf = len(self.fGHz)
self.evaluated = True
self.grid = True
def load_trx(self, directory="ant", nf=104, ntheta=181, nphi=90, ncol=6):
""" load a trx file (deprecated)
Parameters
----------
directory : str
directory where is located the trx file (default : ant)
nf : float
number of frequency points
ntheta : float
number of theta
nphi : float
number of phi
TODO : DEPRECATED (Fix the Ft and Fp format with Nf as last axis)
"""
_filetrx = self._filename
filename = pyu.getlong(_filetrx, directory)
if ncol == 6:
pattern = """^.*\t.*\t.*\t.*\t.*\t.*\t.*$"""
else:
pattern = """^.*\t.*\t.*\t.*\t.*\t.*\t.*\t.*$"""
fd = open(filename, 'r')
d = fd.read().split('\r\n')
fd.close()
k = 0
#while ((re.search(pattern1,d[k]) is None ) & (re.search(pattern2,d[k]) is None )):
while re.search(pattern, d[k]) is None:
k = k + 1
d = d[k:]
N = len(d)
del d[N - 1]
r = '\t'.join(d)
r.replace(' ', '')
d = np.array(r.split()).astype('float')
#
# TODO Parsing the header
#
#nf = 104
#nphi = 90
#ntheta = 181
N = nf * nphi * ntheta
d = d.reshape(N, 7)
F = d[:, 0]
PHI = d[:, 1]
THETA = d[:, 2]
Fphi = d[:, 3] + d[:, 4] * 1j
Ftheta = d[:, 5] + d[:, 6] * 1j
self.Fp = Fphi.reshape((nf, nphi, ntheta))
self.Ft = Ftheta.reshape((nf, nphi, ntheta))
Ttheta = THETA.reshape((nf, nphi, ntheta))
Tphi = PHI.reshape((nf, nphi, ntheta))
Tf = F.reshape((nf, nphi, ntheta))
self.Fp = self.Fp.swapaxes(1, 2)
self.Ft = self.Ft.swapaxes(1, 2)
Ttheta = Ttheta.swapaxes(1, 2)
Tphi = Tphi.swapaxes(1, 2)
Tf = Tf.swapaxes(1, 2)
self.fGHz = Tf[:, 0, 0]
self.theta = Ttheta[0, :, 0]
#self.phi = Tphi[0,0,:]
#
# Temporaire
#
A1 = self.Fp[:, 90:181, :]
A2 = self.Fp[:, 0:91, :]
self.Fp = np.concatenate((A1, A2[:, ::-1, :]), axis=2)
A1 = self.Ft[:, 90:181, :]
A2 = self.Ft[:, 0:91, :]
self.Ft = np.concatenate((A1, A2[:, ::-1, :]), axis=2)
self.theta = np.linspace(0, np.pi, 91)
self.phi = np.linspace(0, 2 * np.pi, 180, endpoint=False)
self.nth = 91
self.nph = 180
self.nf = 104
self.evaluated = True
def pattern(self,theta=[],phi=[],typ='s3'):
""" return multidimensionnal radiation patterns
Parameters
----------
theta : array
1xNt
phi : array
1xNp
typ : string
{s1|s2|s3}
"""
if theta == []:
theta = np.linspace(0,np.pi,30)
if phi == []:
phi = np.linspace(0,2*np.pi,60)
self.grid = True
Nt = len(theta)
Np = len(phi)
Nf = len(self.fGHz)
#Th = np.kron(theta, np.ones(Np))
#Ph = np.kron(np.ones(Nt), phi)
if typ =='s1':
FTh, FPh = self.Fsynth1(theta, phi)
if typ =='s2':
FTh, FPh = self.Fsynth2b(theta,phi)
if typ =='s3':
FTh, FPh = self.Fsynth3(theta, phi)
#FTh = Fth.reshape(Nf, Nt, Np)
#FPh = Fph.reshape(Nf, Nt, Np)
return(FTh,FPh)
def coeffshow(self,**kwargs):
""" display antenna coefficient
typ : string
'ssh' |'vsh'
L : maximum level
kf : frequency index
vmin : float
vmax : float
"""
defaults = {'typ':'vsh',
'L':20,
'kf':46,
'vmin':-40,
'vmax':0,
'cmap':cm.hot_r,
'dB':True
}
for k in defaults:
if k not in kwargs:
kwargs[k]=defaults[k]
L = kwargs['L']
kf = kwargs['kf']
# calculates mode energy
# linear and log scale
# E : f , l , m
if kwargs['typ']=='vsh':
E = self.C.energy(typ='s1')
if kwargs['typ']=='ssh':
E = self.S.energy(typ='s1')
# Aem : f,l
# calculates energy integrated over m
Aem = np.sum(E,axis=2)
Aem_dB = 10*np.log10(Aem)
# Ael : f,m
# calculates energy integrated over l
Ael = np.sum(E,axis=1)
Ael_dB = 10*np.log10(Ael)
fig, ax = plt.subplots()
fig.set_figwidth(15)
fig.set_figheight(10)
if kwargs['dB']:
im = ax.imshow(10*np.log10(E[kf,:,:]),
vmin = kwargs['vmin'],
vmax = kwargs['vmax'],
extent =[-L,L,L,0],
interpolation = 'nearest',
cmap = kwargs['cmap'])
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
axHistx = divider.append_axes("top", 1., pad=0.5, sharex=ax)
axHisty = divider.append_axes("left", 1., pad=0.5, sharey=ax)
#axHistx.bar(range(-L,L),Aem)
#axHisty.barh(range(0,L),Ael )
axHistx.yaxis.set_ticks(np.array([0,0.2,0.4,0.6,0.8]))
axHisty.xaxis.set_ticks(np.array([0,0.1,0.2,0.3]))
cbar = plt.colorbar(im, cax=cax)
fig.tight_layout()
plt.text(-0.02,0.6 ,'levels',
horizontalalignment='right',
verticalalignment='top',
transform=ax.transAxes,
rotation =90, fontsize= 15)
plt.text(0.6,1.1 ,'free space',
horizontalalignment='right',
verticalalignment='top',
transform=ax.transAxes,
fontsize= 15)
plt.text(0.55,-0.1 ,'modes',
horizontalalignment='right'
,verticalalignment='top', transform=ax.transAxes, fontsize= 15)
return fig,ax
def errel(self,kf=-1, dsf=1, typ='s3'):
""" calculates error between antenna pattern and reference pattern
Parameters
----------
kf : integer
frequency index. If k=-1 integration over all frequency
dsf : down sampling factor
typ :
Returns
-------
errelTh : float
relative error on :math:`F_{\\theta}`
errelPh : float
relative error on :math:`F_{\phi}`
errel : float
Notes
-----
.. math::
\epsilon_r^{\\theta} =
\\frac{|F_{\\theta}(\\theta,\phi)-\hat{F}_{\\theta}(\\theta)(\phi)|^2}
{|F_{\\theta}(\\theta,\phi)|^2}
\epsilon_r^{\phi} =
\\frac{|F_{\phi}(\\theta,\phi)-\hat{F}_{\phi}(\\theta)(\phi)|^2}
{|F_{\\theta}(\\theta,\phi)|^2}
"""
#
# Retrieve angular bases from the down sampling factor dsf
#
theta = self.theta[::dsf]
phi = self.phi[::dsf]
Nt = len(theta)
Np = len(phi)
#Th = np.kron(theta, np.ones(Np))
#Ph = np.kron(np.ones(Nt), phi)
if typ =='s1':
FTh, FPh = self.Fsynth1(theta, phi)
if typ =='s2':
FTh, FPh = self.Fsynth2b(theta, phi)
if typ =='s3':
FTh, FPh = self.Fsynth3(theta, phi)
#FTh = Fth.reshape(self.nf, Nt, Np)
#FPh = Fph.reshape(self.nf, Nt, Np)
#
# Jacobian
#
#st = outer(sin(theta),ones(len(phi)))
st = np.sin(theta).reshape((len(theta), 1))
#
# Construct difference between reference and reconstructed
#
if kf!=-1:
dTh = (FTh[kf, :, :] - self.Ft[kf, ::dsf, ::dsf])
dPh = (FPh[kf, :, :] - self.Fp[kf, ::dsf, ::dsf])
#
# squaring + Jacobian
#
dTh2 = np.real(dTh * np.conj(dTh)) * st
dPh2 = np.real(dPh * np.conj(dPh)) * st
vTh2 = np.real(self.Ft[kf, ::dsf, ::dsf] \
* np.conj(self.Ft[kf, ::dsf, ::dsf])) * st
vPh2 = np.real(self.Fp[kf, ::dsf, ::dsf] \
* np.conj(self.Fp[kf, ::dsf, ::dsf])) * st
mvTh2 = np.sum(vTh2)
mvPh2 = np.sum(vPh2)
errTh = np.sum(dTh2)
errPh = np.sum(dPh2)
else:
dTh = (FTh[:, :, :] - self.Ft[:, ::dsf, ::dsf])
dPh = (FPh[:, :, :] - self.Fp[:, ::dsf, ::dsf])
#
# squaring + Jacobian
#
dTh2 = np.real(dTh * np.conj(dTh)) * st
dPh2 = np.real(dPh * np.conj(dPh)) * st
vTh2 = np.real(self.Ft[:, ::dsf, ::dsf] \
* np.conj(self.Ft[:, ::dsf, ::dsf])) * st
vPh2 = np.real(self.Fp[:, ::dsf, ::dsf] \
* np.conj(self.Fp[:, ::dsf, ::dsf])) * st
mvTh2 = np.sum(vTh2)
mvPh2 = np.sum(vPh2)
errTh = np.sum(dTh2)
errPh = np.sum(dPh2)
errelTh = (errTh / mvTh2)
errelPh = (errPh / mvPh2)
errel =( (errTh + errPh) / (mvTh2 + mvPh2))
return(errelTh, errelPh, errel)
def loadhfss(self,lfa = [], Nt=72,Np=37):
""" load antenna from HFSS file
Parameters
----------
lfa : list of antenna file
Nt : int
Number of angle theta
Np : int
Number of angle phi
Notes
-----
One file per frequency point
th , ph , abs_grlz,th_absdB,th_phase,ph_absdB,ph_phase_ax_ratio
"""
# lfa : list file antenna
self.nf = len(lfa)
fGHz = []
lacsv = []
Fphi = np.empty((self.nf,self.nth,self.nph))
Ftheta = np.empty((self.nf,self.nth,self.nph))
SqG = np.empty((self.nf,self.nth,self.nph))
for i in range (len(lfa)):
fGHz.append(eval(lfa[i].split('.csv')[0][-4]))
lacsv.append(pd.read_csv(lfa[i],
header=False,
sep=',',
names=['th','ph','abs_grlz','th_absdB','th_phase','ph_absdB','ph_phase','ax_ratio'],
index_col=False))
th=lacsv[i].th.reshape(Np,Nt)*np.pi/180.
ph=lacsv[i].ph.reshape(Np,Nt)*np.pi/180.
Greal = lacsv[i].abs_grlz.reshape(Np,Nt)
th_dB = lacsv[i].th_absdB.reshape(Np,Nt)
ph_dB = lacsv[i].ph_absdB.reshape(Np,Nt)
th_lin = pow(10,th_dB/20.)
ph_lin = pow(10,ph_dB/20.)
#th_phase = lacsv[i].th_phase.reshape(72,37)*np.pi/180.
#ph_phase = lacsv[i].ph_phase.reshape(72,37)*np.pi/180.
#axratio=lacsv[i].ax_ratio.reshape(72,37)
Fphi[i,:,:] = ph_lin.swapaxes(1,0)
Ftheta[i,:,:] = th_lin.swapaxes(1,0)
SqG[i,:,:] = Greal.swapaxes(1,0)
self.fGHz = np.array(fGHz)
#self.theta = th[0,:].reshape(Nt,1)
#self.phi = ph[:,0].reshape(1,Np)
self.theta = th[0,:]
self.phi = ph[:,0]
self.Fp=Fphi
self.Ft=Ftheta
self.sqG=SqG
def loadtrx(self,directory):
""" load trx file (SATIMO Near Field Chamber raw data)
Parameters
----------
directory
self._filename: short name of the antenna file
the file is seek in the $BASENAME/ant directory
.. todo:
consider using an ini file for the header
Trx header structure
fmin fmax Nf phmin phmax Nphi thmin thmax Ntheta #EDelay
0 1 2 3 4 5 6 7 8 9
1 10 121 0 6.19 72 0 3.14 37 0
"""
_filetrx = self._filename
_headtrx = 'header_' + _filetrx
_headtrx = _headtrx.replace('trx', 'txt')
headtrx = pyu.getlong(_headtrx, directory)
filename = pyu.getlong(_filetrx, directory)
#
# Trx header structure
#
# fmin fmax Nf phmin phmax Nphi thmin thmax Ntheta #EDelay
# 0 1 2 3 4 5 6 7 8 9
# 1 10 121 0 6.19 72 0 3.14 37 0
#
#
foh = open(headtrx)
ligh = foh.read()
foh.close()
fmin = eval(ligh.split()[0])
fmax = eval(ligh.split()[1])
nf = eval(ligh.split()[2])
phmin = eval(ligh.split()[3])
phmax = eval(ligh.split()[4])
nphi = eval(ligh.split()[5])
thmin = eval(ligh.split()[6])
thmax = eval(ligh.split()[7])
ntheta = eval(ligh.split()[8])
#
# The electrical delay in column 9 is optional
#
try:
tau = eval(ligh.split()[9]) # tau : delay (ns)
except:
tau = 0
#
# Data are stored in 7 columns
#
# 0 1 2 3 4 5 6
# f phi th ReFph ImFphi ReFth ImFth
#
#
fi = open(filename)
d = np.array(fi.read().split())
N = len(d)
M = N / 7
d = d.reshape(M, 7)
d = d.astype('float')
f = d[:, 0]
if f[0] == 0:
print("error : frequency cannot be zero")
# detect frequency unit
# if values are above 2000 its means frequency is not expressed
# in GHz
#
if (f[0] > 2000):
f = f / 1.0e9
phi = d[:, 1]
theta = d[:, 2]
#
# type : refers to the way the angular values are stored in the file
# Detection of file type
#
# nfc
# f phi theta
# 2 1 0
# Natural
# f phi theta
# 2 0 1
#
# auto detect storage mode looping
#
dphi = abs(phi[0] - phi[1])
dtheta = abs(theta[0] - theta[1])
if (dphi == 0) & (dtheta != 0):
typ = 'nfc'
if (dtheta == 0) & (dphi != 0):
typ = 'natural'
self.typ = typ
Fphi = d[:, 3] + d[:, 4] * 1j
Ftheta = d[:, 5] + d[:, 6] * 1j
#
# Normalization
#
G = np.real(Fphi * np.conj(Fphi) + Ftheta * np.conj(Ftheta))
SqG = np.sqrt(G)
#Fphi = Fphi/SqG
#Ftheta = Ftheta/SqG
#Fphi = Fphi
#Ftheta = Ftheta
#
# Reshaping
#
if typ == 'natural':
self.Fp = Fphi.reshape((nf, ntheta, nphi))
self.Ft = Ftheta.reshape((nf, ntheta, nphi))
self.sqG = SqG.reshape((nf, ntheta, nphi))
Ttheta = theta.reshape((nf, ntheta, nphi))
Tphi = phi.reshape((nf, ntheta, nphi))
Tf = f.reshape((nf, ntheta, nphi))
if typ == 'nfc':
self.Fp = Fphi.reshape((nf, nphi, ntheta))
self.Ft = Ftheta.reshape((nf, nphi, ntheta))
self.sqG = SqG.reshape((nf, nphi, ntheta))
Ttheta = theta.reshape((nf, nphi, ntheta))
Tphi = phi.reshape((nf, nphi, ntheta))
Tf = f.reshape((nf, nphi, ntheta))
#
# Force natural order (f,theta,phi)
# This is not the order of the satimo nfc which is (f,phi,theta)
#
self.Fp = self.Fp.swapaxes(1, 2)
self.Ft = self.Ft.swapaxes(1, 2)
self.sqG = self.sqG.swapaxes(1, 2)
Ttheta = Ttheta.swapaxes(1, 2)
Tphi = Tphi.swapaxes(1, 2)
Tf = Tf.swapaxes(1, 2)
self.fGHz = Tf[:, 0, 0]
self.theta = Ttheta[0, :, 0]
self.phi = Tphi[0, 0, :]
#
# check header consistency
#
np.testing.assert_almost_equal(self.fGHz[0],fmin,6)
np.testing.assert_almost_equal(self.fGHz[-1],fmax,6)
np.testing.assert_almost_equal(self.theta[0],thmin,3)
np.testing.assert_almost_equal(self.theta[-1],thmax,3)
np.testing.assert_almost_equal(self.phi[0],phmin,3)
np.testing.assert_almost_equal(self.phi[-1],phmax,3)
self.nf = nf
self.nth = ntheta
self.nph = nphi
self.tau = tau
self.evaluated = True
def checkpole(self, kf=0):
""" display the reconstructed field on pole for integrity verification
Parameters
----------
kf : int
frequency index default 0
"""
Ft0 = self.Ft[kf, 0, :]
Fp0 = self.Fp[kf, 0, :]
Ftp = self.Ft[kf, -1, :]
Fpp = self.Fp[kf, -1, :]
phi = self.phi
Ex0 = Ft0 * np.cos(phi) - Fp0 * np.sin(phi)
Ey0 = Ft0 * np.sin(phi) + Fp0 * np.cos(phi)
Exp = Ftp * np.cos(phi) - Fpp * np.sin(phi)
Eyp = Ftp * np.sin(phi) + Fpp * np.cos(phi)
plt.subplot(4, 2, 1)
plt.plot(phi, np.real(Ex0))
plt.subplot(4, 2, 2)
plt.plot(phi, np.imag(Ex0))
plt.subplot(4, 2, 3)
plt.plot(phi, np.real(Ey0))
plt.subplot(4, 2, 4)
plt.plot(phi, np.imag(Ey0))
plt.subplot(4, 2, 5)
plt.plot(phi, np.real(Exp))
plt.subplot(4, 2, 6)
plt.plot(phi, np.imag(Exp))
plt.subplot(4, 2, 7)
plt.plot(phi, np.real(Eyp))
plt.subplot(4, 2, 8)
plt.plot(phi, np.imag(Eyp))
def info(self):
""" gives info about antenna object
"""
print(self._filename)
print("type : ", self.typ)
if self.typ == 'mat':
print(self.DataFile)
print(self.AntennaName)
print(self.Date)
print(self.StartTime)
print(self.Notes)
print(self.Serie)
print(self.Run)
print("Nb theta (lat) :", self.nth)
print("Nb phi (lon) :", self.nph)
if self.typ =='nfc':
print( "--------------------------")
print( "fmin (GHz) :", self.fGHz[0])
print( "fmax (GHz) :", self.fGHz[-1])
print( "Nf :", self.nf)
print( "thmin (rad) :", self.theta[0])
print( "thmax (rad) :", self.theta[-1])
print( "Nth :", self.nth)
print( "phmin (rad) :", self.phi[0])
print( "phmax (rad) :", self.phi[-1])
print( "Nph :", self.nph)
try:
self.C.info()
except:
print("No vsh coefficient calculated yet")
#@mlab.show
def _show3(self,bnewfig = True,
bcolorbar =True,
name=[],
binteract=False,
btitle=True,
bcircle=True,
**kwargs ):
""" show3 mayavi
Parameters
----------
btitle : boolean
display title
bcolorbar : boolean
display colorbar
binteract : boolean
enable interactive mode
newfig: boolean
see also
--------
antprop.antenna._computemesh
"""
if not self.evaluated:
self.eval(pattern=True)
# k is the frequency index
if hasattr(self,'p'):
lpshp = len(self.p.shape)
sum_index = tuple(np.arange(1,lpshp))
po = np.mean(self.p,axis=sum_index)
kwargs['po']=po
x, y, z, k, scalar = self._computemesh(**kwargs)
if bnewfig:
mlab.clf()
f=mlab.figure(bgcolor=(1, 1, 1), fgcolor=(0, 0, 0))
else :
f=mlab.gcf()
if kwargs.has_key('opacity'):
opacity = kwargs['opacity']
else:
opacity = 1
self._mayamesh = mlab.mesh(x, y, z,
scalars= scalar,
resolution = 1,
opacity = opacity,reset_zoom=False)
if name == []:
f.children[-1].name = 'Antenna ' + self._filename
else :
f.children[-1].name = name + self._filename
if bcolorbar :
mlab.colorbar()
if btitle:
mlab.title(self._filename + ' @ ' + str(self.fGHz[k]) + ' GHz',height=1,size=0.5)
# draw 3D circle around pattern
if bcircle:
xc,yc,zc =circle('xy') # blue
mlab.plot3d(xc,yc,zc,color=(0,0,1))
xc,yc,zc =circle('yz') # red
mlab.plot3d(xc,yc,zc,color=(1,0,0))
xc,yc,zc =circle('xz') # green
mlab.plot3d(xc,yc,zc,color=(0,1,0))
if binteract:
self._outline = mlab.outline(self._mayamesh, color=(.7, .7, .7))
self._outline.visible=False
def picker_callback(picker):
""" Picker callback: this get called when on pick events.
"""
if picker.actor in self._mayamesh.actor.actors:
self._outline.visible = not self._outline.visible
self._is_selected=self._outline.visible
picker = f.on_mouse_pick(picker_callback)
return(f)
def _computemesh(self,**kwargs):
""" compute mesh from theta phi
Parameters
----------
fGHz : np.array()
default [] : takes center frequency fa[len(fa)/2]
po : np.array()
location point of the antenna
T : np.array
rotation matrix
minr : float
minimum radius in meter
maxr : float
maximum radius in meters
tag : string
ilog : boolean
title : boolean
Returns
-------
(x, y, z, k)
x , y , z values in cartesian axis
k frequency point evaluated
"""
defaults = { 'fGHz' :[],
'po': np.array([0,0,0]),
'T' : np.eye(3),
'minr' : 0.1,
'maxr' : 1 ,
'scale':1.,
'tag' : 'Pat',
'txru' : 0,
'ilog' : False,
'title':True,
}
for key, value in defaults.items():
if key not in kwargs:
kwargs[key] = value
fGHz = kwargs['fGHz']
minr = kwargs['minr']
maxr = kwargs['maxr']
tag = kwargs['tag']
ilog = kwargs['ilog']
txru = kwargs['txru']
scale= kwargs['scale']
po = kwargs['po']
# T is an unitary matrix
T = kwargs['T']
if fGHz == []:
# self.ext == '' <=> mathematically generated => nf = 1
if self.ext != '':
k = len(self.fGHz)/2
else:
k = 0
else :
if self.ext != '':
k = np.where(self.fGHz>=fGHz)[0][0]
else:
k = 0
if len(self.Ft.shape)==3:
r = self.sqG[:,:,k]
else:
r = self.sqG[:,:,txru,k]
th = self.theta[:,None]
phi = self.phi[None,:]
if ilog :
r = 10*np.log10(abs(r))
else:
r = abs(r)
if r.max() != r.min():
u = (r - r.min()) /(r.max() - r.min())
else : u = r
r = minr + (maxr-minr) * u
x = scale*r * np.sin(th) * np.cos(phi)
y = scale*r * np.sin(th) * np.sin(phi)
z = scale*r * np.cos(th)
if z.shape[1] != y.shape[1]:
z = z*np.ones(y.shape[1])
p = np.concatenate((x[...,None],
y[...,None],
z[...,None]),axis=2)
#
# antenna cs -> glogal cs
# q : Nt x Np x 3
q = np.einsum('ij,klj->kli',T,p)
#
# translation
#
scalar=(q[...,0]**2+q[...,1]**2+q[...,2]**2)
q[...,0]=q[...,0]+po[0]
q[...,1]=q[...,1]+po[1]
q[...,2]=q[...,2]+po[2]
x = q[...,0]
y = q[...,1]
z = q[...,2]
return x, y, z, k, scalar
def show3(self,k=0,po=[],T=[],txru=0,typ='G', mode='linear', silent=False):
""" show3 geomview
Parameters
----------
k : frequency index
po : poition of the antenna
T : GCS of the antenna
typ : string
'G' | 'Ft' | 'Fp'
mode : string
'linear'| 'not implemented'
silent : boolean
True | False
Examples
--------
>>> from pylayers.antprop.antenna import *
>>> import numpy as np
>>> import matplotlib.pylab as plt
>>> A = Antenna('defant.sh3')
>>> #A.show3()
"""
if not self.evaluated:
self.eval(pattern=True)
f = self.fGHz[k]
# 3 axis : nth x nph x nf
if len(self.Ft.shape)==3:
if typ == 'G':
V = self.sqG[:, :,k]
if typ == 'Ft':
V = self.Ft[:, :,k]
if typ == 'Fp':
V = self.Fp[:, :,k]
if typ == 'Ft':
V = self.Ft[:,:,k]
# 4 axis : nth x nph x ntxru x nf
if len(self.Ft.shape)==4:
if typ == 'G':
V = self.sqG[:, :, txru,k]
if typ == 'Ft':
V = self.Ft[:, : ,txru,k]
if typ == 'Fp':
V = self.Fp[:, :,txru,k]
if po ==[]:
po = np.array([0, 0, 0])
if T ==[]:
T = np.eye(3)
_filename = 'antbody'
geo = geu.Geomoff(_filename)
# geo.pattern requires the following shapes
# theta (Ntx1)
# phi (1xNp)
#if len(np.shape(self.theta))==1:
# theta = self.theta[:,None]
#else:
# theta=self.theta
theta = self.theta
#if len(np.shape(self.phi))==1:
# phi = self.phi[None,:]
#else:
# phi=self.phi
phi = self.phi
geo.pattern(theta,phi,V,po=po,T=T,ilog=False,minr=0.01,maxr=0.2)
#filename = geom_pattern(self.theta, self.phi, V, k, po, minr, maxr, typ)
#filename = geom_pattern(self.theta, self.phi, V, k, po, minr, maxr, typ)
if not silent:
geo.show3()
def plot3d(self, k=0, typ='Gain', col=True):
""" show 3D pattern in matplotlib
Parameters
----------
k : frequency index
typ = 'Gain'
= 'Ftheta'
= 'Fphi'
if col -> color coded plot3D
else -> simple plot3D
"""
fig = plt.figure()
ax = axes3d.Axes3D(fig)
if typ == 'Gain':
V = self.sqG[:, :,k]
if typ == 'Ftheta':
V = self.Ft[ :, :,k]
if typ == 'Fphi':
V = self.Fp[ :, :,k]
vt = np.ones(self.nth)
vp = np.ones(self.nph)
Th = np.outer(self.theta, vp)
Ph = np.outer(vt, self.phi)
pdb.set_trace()
X = abs(V) * np.cos(Ph) * np.sin(Th)
Y = abs(V) * np.sin(Ph) * np.sin(Th)
Z = abs(V) * np.cos(Th)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
if col:
ax.plot_surface(X, Y, Z, rstride=1, cstride=1,
cmap=cm.hot_r,shade=True)
else:
ax.plot3D(np.ravel(X), np.ravel(Y), np.ravel(Z))
plt.show()
def pol3d(self, k=0, R=50, St=4, Sp=4, silent=False):
""" Display polarisation diagram in 3D
Parameters
----------
k : int
frequency index
R : float
radius of the sphere
St : int
downsampling factor along theta
Sp : int
downsampling factor along phi
silent : Boolean
(if True the file is created and not displayed')
The file created is named : Polar{ifreq}.list
it is placed in the /geom directory of the project
"""
_filename = 'Polar' + str(10000 + k)[1:] + '.list'
filename = pyu.getlong(_filename, pstruc['DIRGEOM'])
fd = open(filename, "w")
fd.write("LIST\n")
Nt = self.nth
Np = self.nph
N = 10
plth = np.arange(0, Nt, St)
plph = np.arange(0, Np, Sp)
for m in plph:
for n in plth:
#theta = self.theta[n,0]
theta = self.theta[n]
#print "m,theta= :",m,theta*180/np.pi
#phi = self.phi[0,m]
phi = self.phi[m]
#print "n,phi=:",n,phi*180/np.pi
B = geu.vec_sph(theta, phi)
p = R * np.array((np.cos(phi) * np.sin(theta),
np.sin(phi) * np.sin(theta),
np.cos(theta)))
fd.write('{\n')
geu.ellipse(fd, p, B[0, :], B[1, :], self.Ft[n, m , k], self.Fp[n, m , k], N)
fd.write('}\n')
fd.close()
if not silent:
chaine = "geomview " + filename + " 2>/dev/null &"
os.system(chaine)
def mse(self, Fth, Fph, N=0):
""" mean square error between original and reconstructed
Parameters
----------
Fth : np.array
Fph : np.array
N : int
Notes
-----
Calculate the relative mean square error between original pattern A.Ftheta , A.Fphi and the
pattern given as argument of the function Fth , Fph
The mse is evaluated on both polarization and normalized over the energy of each
original pattern.
The function returns the maximum between those two errors
N is a parameter which allows to suppress value at the pole for the calculation of the error
if N=0 all values are kept else N < n < Nt - N
"""
sh = np.shape(self.Ft)
Nf = sh[0]
Nt = sh[1]
Np = sh[2]
# plage de theta (exclusion du pole)
pt = np.arange(N, Nt - N, 1)
Fthr = Fth.reshape(sh)
Fphr = Fph.reshape(sh)
Gr = np.real(Fphr * np.conj(Fphr) + Fthr * np.conj(Fthr))
SqGr = np.sqrt(Gr)
Fthr = Fthr[:, pt, :].ravel()
Fphr = Fphr[:, pt, :].ravel()
SqGr = SqGr[:, pt, :].ravel()
Ftho = self.Ft[:, pt, :].ravel()
Fpho = self.Fp[:, pt, :].ravel()
SqGo = self.sqG[:, pt, :].ravel()
Etho = np.sqrt(np.dot(np.conj(Ftho), Ftho))
Epho = np.sqrt(np.dot(np.conj(Fpho), Fpho))
Eo = np.sqrt(np.dot(np.conj(Ftho), Ftho) + np.dot(np.conj(Fpho), Fpho))
errth = Ftho - Fthr
errph = Fpho - Fphr
Err = np.real(np.sqrt(np.dot(np.conj(errth), errth) + np.dot(np.conj(errph), errph)))
Errth = np.real(np.sqrt(np.dot(np.conj(errth), errth)))
Errph = np.real(np.sqrt(np.dot(np.conj(errph), errph)))
#Errth_rel = Errth/Etho
#Errph_rel = Errph/Epho
Errth_rel = Errth / Eo
Errph_rel = Errph / Eo
Err_rel = Err / Eo
return Err_rel, Errth_rel, Errph_rel
def getdelay(self,delayCandidates = np.arange(-10,10,0.001)):
""" get electrical delay
Parameters
----------
delayCandidates : ndarray dalay in (ns)
default np.arange(-10,10,0.001)
Returns
-------
electricalDelay : float
Author : Troels Pedersen (Aalborg University)
B.Uguen
"""
if self.evaluated:
maxPowerInd = np.unravel_index(np.argmax(abs(self.Ft)),np.shape(self.Ft))
elD = delayCandidates[np.argmax(abs(
np.dot(self.Ft[maxPowerInd[0],maxPowerInd[1],:]
,np.exp(2j*np.pi*self.fGHz[:,None]
*delayCandidates[None,:]))))]
#electricalDelay = delayCandidates[np.argmax(abs(
# np.dot(self.Ft[:,maxPowerInd[1],maxPowerInd[2]]
# ,np.exp(2j*np.pi*freq.reshape(len(freq),1)
# *delayCandidates.reshape(1,len(delayCandidates))))
# ))]
return(elD)
else:
raise Warning('Antenna has not been evaluated')
def elec_delay(self,tau):
r""" apply an electrical delay
Parameters
----------
tau : float
electrical delay in nanoseconds
Notes
-----
This function applies an electrical delay math::`\exp{+2 j \pi f \tau)`
on the phase of diagram math::``F_{\theta}`` and math::`F_{\phi}`
Examples
--------
.. plot::
:include-source:
>>> from pylayers.antprop.antenna import *
>>> A = Antenna('S2R2.sh3')
>>> A.eval()
>>> tau = A.getdelay()
>>> A.elec_delay(tau)
"""
self.tau = self.tau+tau
if self.evaluated:
Ftheta = self.Ft
Fphi = self.Fp
sh = np.shape(Ftheta)
e = np.exp(2 * np.pi * 1j * self.fGHz[None,None,:]* tau)
#E = np.outer(e, ones(sh[1] * sh[2]))
#Fth = Ftheta.reshape(sh[0], sh[1] * sh[2])
#EFth = Fth * E
#self.Ft = EFth.reshape(sh[0], sh[1], sh[2])
self.Ft = self.Ft*e
self.Fp = self.Fp*e
#Fph = Fphi.reshape(sh[0], sh[1] * sh[2])
#EFph = Fph * E
#self.Fp = EFph.reshape(sh[0], sh[1], sh[2])
else:
raise Warning('antenna has not been evaluated')
def Fsynth(self,theta=[],phi=[],):
""" Perform Antenna synthesis
Parameters
----------
theta : np.array
phi : np.array
call Antenna.Fpatt or Antenna.Fsynth3
Notes
-----
The antenna pattern synthesis is done either from spherical
harmonics coefficients or from an analytical expression of the
radiation pattern.
"""
if ((self.fromfile) or (self.typ=='vsh') or (self.typ=='ssh')):
Ft,Fp = self.Fsynth3(theta,phi)
self.gain()
self.evaluated=True
else :
Ft = self.Ft
Fp = self.Fp
self.theta = theta
self.phi = phi
eval('self.p'+self.typ)()
#Ft,Fp = self.Fpatt(theta,phi,pattern)
return (Ft,Fp)
#def Fsynth1(self, theta, phi, k=0):
def Fsynth1(self, theta, phi):
""" calculate complex antenna pattern from VSH Coefficients (shape 1)
Parameters
----------
theta : ndarray (1xNdir)
phi : ndarray (1xNdir)
k : int
frequency index
Returns
-------
Ft , Fp
"""
Nt = len(theta)
Np = len(phi)
if self.grid:
theta = np.kron(theta, np.ones(Np))
phi = np.kron(np.ones(Nt),phi)
nray = len(theta)
#Br = self.C.Br.s1[k, :, :]
#Bi = self.C.Bi.s1[k, :, :]
#Cr = self.C.Cr.s1[k, :, :]
#Ci = self.C.Ci.s1[k, :, :]
Br = self.C.Br.s1[:, :, :]
Bi = self.C.Bi.s1[:, :, :]
Cr = self.C.Cr.s1[:, :, :]
Ci = self.C.Ci.s1[:, :, :]
N = self.C.Br.N1
M = self.C.Br.M1
#print "N,M",N,M
#
# The - sign is necessary to get the good reconstruction
# deduced from observation
# May be it comes from a different definition of theta in SPHEREPACK
x = -np.cos(theta)
Pmm1n, Pmp1n = AFLegendre3(N, M, x)
ind = index_vsh(N, M)
n = ind[:, 0]
m = ind[:, 1]
#~ V, W = VW(n, m, x, phi, Pmm1n, Pmp1n)
V, W = VW(n, m, x, phi)
#
# broadcasting along frequency axis
#
V = np.expand_dims(V,0)
W = np.expand_dims(V,0)
#
# k : frequency axis
# l : coeff l
# m
Fth = np.eisum('klm,kilm->ki',Br,np.real(V.T)) - \
np.eisum('klm,kilm->ki',Bi,np.imag(V.T)) + \
np.eisum('klm,kilm->ki',Ci,np.real(W.T)) + \
np.eisum('klm,kilm->ki',Cr,np.imag(W.T))
Fph = -np.eisum('klm,kilm->ki',Cr,np.real(V.T)) + \
np.eisum('klm,kilm->ki',Ci,np.imag(V.T)) + \
np.eisum('klm,kilm->ki',Bi,np.real(W.T)) + \
np.eisum('klm,kilm->ki',Br,np.imag(W.T))
#Fth = np.dot(Br, np.real(V.T)) - \
# np.dot(Bi, np.imag(V.T)) + \
# np.dot(Ci, np.real(W.T)) + \
# np.dot(Cr, np.imag(W.T))
#Fph = -np.dot(Cr, np.real(V.T)) + \
# np.dot(Ci, np.imag(V.T)) + \
# np.dot(Bi, np.real(W.T)) + \
# np.dot(Br, np.imag(W.T))
if self.grid:
Nf = len(self.fGHz)
Fth = Fth.reshape(Nf, Nt, Np)
Fph = Fph.reshape(Nf, Nt, Np)
return Fth, Fph
def Fsynth2s(self,dsf=1):
""" pattern synthesis from shape 2 vsh coefficients
Parameters
----------
phi
Notes
-----
Calculate complex antenna pattern from VSH Coefficients (shape 2)
for the specified directions (theta,phi)
theta and phi arrays needs to have the same size
"""
theta = self.theta[::dsf]
phi = self.phi[::dsf]
Nt = len(theta)
Np = len(phi)
theta = np.kron(theta, np.ones(Np))
phi = np.kron(np.ones(Nt), phi)
Ndir = len(theta)
Br = self.C.Br.s2 # Nf x K2
Bi = self.C.Bi.s2 # Nf x K2
Cr = self.C.Cr.s2 # Nf x K2
Ci = self.C.Ci.s2 # Nf x K2
Nf = np.shape(self.C.Br.s2)[0]
K2 = np.shape(self.C.Br.s2)[1]
L = self.C.Br.N2 # int
M = self.C.Br.M2 # int
#print "N,M",N,M
#
# The - sign is necessary to get the good reconstruction
# deduced from observation
# May be it comes from a different definition of theta in SPHEREPACK
x = -np.cos(theta)
Pmm1n, Pmp1n = AFLegendre3(L, M, x)
ind = index_vsh(L, M)
l = ind[:, 0]
m = ind[:, 1]
V, W = VW2(l, m, x, phi, Pmm1n, Pmp1n) # K2 x Ndir
# Fth , Fph are Nf x Ndir
tEBr = []
tEBi = []
tECr = []
tECi = []
for k in range(K2):
BrVr = np.dot(Br[:,k].reshape(Nf,1),
np.real(V.T)[k,:].reshape(1,Ndir))
BiVi = np.dot(Bi[:,k].reshape(Nf,1),
np.imag(V.T)[k,:].reshape(1,Ndir))
CiWr = np.dot(Ci[:,k].reshape(Nf,1),
np.real(W.T)[k,:].reshape(1,Ndir))
CrWi = np.dot(Cr[:,k].reshape(Nf,1),
np.imag(W.T)[k,:].reshape(1,Ndir))
CrVr = np.dot(Cr[:,k].reshape(Nf,1),
np.real(V.T)[k,:].reshape(1,Ndir))
CiVi = np.dot(Ci[:,k].reshape(Nf,1),
np.imag(V.T)[k,:].reshape(1,Ndir))
BiWr = np.dot(Bi[:,k].reshape(Nf,1),
np.real(W.T)[k,:].reshape(1,Ndir))
BrWi = np.dot(Br[:,k].reshape(Nf,1),
np.imag(W.T)[k,:].reshape(1,Ndir))
EBr = np.sum(BrVr*np.conj(BrVr)*np.sin(theta)) + \
np.sum(BrWi*np.conj(BrWi)*np.sin(theta))
EBi = np.sum(BiVi*np.conj(BiVi)*np.sin(theta)) + \
np.sum(BiWr*np.conj(BiWr)*np.sin(theta))
ECr = np.sum(CrWi*np.conj(CrWi)*np.sin(theta)) + \
+ np.sum(CrVr*np.conj(CrVr)*np.sin(theta))
ECi = np.sum(CiWr*np.conj(CiWr)*np.sin(theta)) + \
+ np.sum(CiVi*np.conj(CiVi)*np.sin(theta))
tEBr.append(EBr)
tEBi.append(EBi)
tECr.append(ECr)
tECi.append(ECi)
#Fth = np.dot(Br, np.real(V.T)) - np.dot(Bi, np.imag(V.T)) + \
# np.dot(Ci, np.real(W.T)) + np.dot(Cr, np.imag(W.T))
#Fph = -np.dot(Cr, np.real(V.T)) + np.dot(Ci, np.imag(V.T)) + \
# np.dot(Bi, np.real(W.T)) + np.dot(Br, np.imag(W.T))
return np.array(tEBr),np.array(tEBi),np.array(tECr),np.array(tECi)
def Fsynth2b(self, theta, phi):
""" pattern synthesis from shape 2 vsh coefficients
Parameters
----------
theta : 1 x Nt
phi : 1 x Np
Notes
-----
Calculate complex antenna pattern from VSH Coefficients (shape 2)
for the specified directions (theta,phi)
theta and phi arrays needs to have the same size
"""
Nt = len(theta)
Np = len(phi)
if self.grid:
theta = np.kron(theta, np.ones(Np))
phi = np.kron(np.ones(Nt),phi)
Br = self.C.Br.s2 # Nf x K2
Bi = self.C.Bi.s2 # Nf x K2
Cr = self.C.Cr.s2 # Nf x K2
Ci = self.C.Ci.s2 # Nf x K2
L = self.C.Br.N2 # int
M = self.C.Br.M2 # int
#print "N,M",N,M
#
# The - sign is necessary to get the good reconstruction
# deduced from observation
# May be it comes from a different definition of theta in SPHEREPACK
x = -np.cos(theta)
Pmm1n, Pmp1n = AFLegendre3(L, M, x)
ind = index_vsh(L, M)
l = ind[:, 0]
m = ind[:, 1]
V, W = VW2(l, m, x, phi, Pmm1n, Pmp1n) # K2 x Ndir
# Fth , Fph are Nf x Ndir
Fth = np.dot(Br, np.real(V.T)) - np.dot(Bi, np.imag(V.T)) + \
np.dot(Ci, np.real(W.T)) + np.dot(Cr, np.imag(W.T))
Fph = -np.dot(Cr, np.real(V.T)) + np.dot(Ci, np.imag(V.T)) + \
np.dot(Bi, np.real(W.T)) + np.dot(Br, np.imag(W.T))
if self.grid:
Nf = len(self.fGHz)
Fth = Fth.reshape(Nf, Nt, Np)
Fph = Fph.reshape(Nf, Nt, Np)
return Fth, Fph
def Fsynth2(self, theta, phi, typ = 'vsh'):
""" pattern synthesis from shape 2 vsh coeff
Parameters
----------
theta : array 1 x Nt
phi : array 1 x Np
pattern : boolean
default False
typ : string
{vsh | ssh}
Notes
-----
Calculate complex antenna pattern from VSH Coefficients (shape 2)
for the specified directions (theta,phi)
theta and phi arrays needs to have the same size
"""
self.nth = len(theta)
self.nph = len(phi)
self.nf = len(self.fGHz)
if typ =='vsh' :
if self.grid:
theta = np.kron(theta, np.ones(self.nph))
phi = np.kron(np.ones(self.nth),phi)
Br = self.C.Br.s2
Bi = self.C.Bi.s2
Cr = self.C.Cr.s2
Ci = self.C.Ci.s2
N = self.C.Br.N2
M = self.C.Br.M2
#print "N,M",N,M
#
# The - sign is necessary to get the good reconstruction
# deduced from observation
# May be it comes from a different definition of theta in SPHEREPACK
x = -np.cos(theta)
Pmm1n, Pmp1n = AFLegendre3(N, M, x)
ind = index_vsh(N, M)
n = ind[:, 0]
m = ind[:, 1]
#~ V, W = VW(n, m, x, phi, Pmm1n, Pmp1n)
V, W = VW(n, m, x, phi)
Fth = np.dot(Br, np.real(V.T)) - np.dot(Bi, np.imag(V.T)) + \
np.dot(Ci, np.real(W.T)) + np.dot(Cr, np.imag(W.T))
Fph = -np.dot(Cr, np.real(V.T)) + np.dot(Ci, np.imag(V.T)) + \
np.dot(Bi, np.real(W.T)) + np.dot(Br, np.imag(W.T))
if self.grid:
Fth = Fth.reshape(self.nf, self.nth, self.nph)
Fph = Fph.reshape(self.nf, self.nth, self.nph)
if typ=='ssh':
cx = self.S.Cx.s2
cy = self.S.Cy.s2
cz = self.S.Cz.s2
lmax = self.S.Cx.lmax
Y ,indx = SSHFunc(lmax, theta,phi)
Ex = np.dot(cx,Y).reshape(self.nf,self.nth,self.nph)
Ey = np.dot(cy,Y).reshape(self.nf,self.nth,self.nph)
Ez = np.dot(cz,Y).reshape(self.nf,self.nth,self.nph)
Fth,Fph = CartToSphere (theta, phi, Ex, Ey,Ez, bfreq = True )
self.evaluated = True
return Fth, Fph
def Fsynth3(self,theta=[],phi=[],typ='vsh'):
r""" synthesis of a complex antenna pattern from SH coefficients
(vsh or ssh in shape 3)
Ndir is the number of directions
Parameters
----------
theta : ndarray (1xNdir if not pattern) (1xNtheta if pattern)
phi : ndarray (1xNdir if not pattter) (1xNphi if pattern)
pattern : boolean
if True theta and phi are reorganized for building the pattern
typ : 'vsh' | 'ssh' | 'hfss'
Returns
-------
if self.grid:
Fth : ndarray (Ntheta x Nphi)
Fph : ndarray (Ntheta x Nphi)
else:
Fth : ndarray (1 x Ndir)
Fph : ndarray (1 x Ndir)
See Also
--------
pylayers.antprop.channel._vec2scalA
Examples
--------
.. plot::
:include-source:
>>> from pylayers.antprop.antenna import *
>>> import numpy as np
>>> import matplotlib.pylab as plt
>>> A = Antenna('defant.vsh3')
>>> F = A.eval(grid=True)
All Br,Cr,Bi,Ci have the same (l,m) index in order to evaluate only
once the V,W function
If the data comes from a cst file like the antenna used in WHERE1 D4.1
the pattern is multiplied by $\frac{4\pi}{120\pi}=\frac{1}{\sqrt{30}$
"""
#typ = self.typ
#self._filename.split('.')[1]
#if typ=='satimo':
# coeff=1.
#if typ=='cst':
# coeff=1./sqrt(30)
#assert typ in ['ssh','vsh','hfss'],
assert (hasattr(self,'C') or hasattr(self,'S')),"No SH coeffs evaluated"
Nf = len(self.fGHz)
if theta==[]:
theta=np.linspace(0,np.pi,45)
if phi == []:
phi= np.linspace(0,2*np.pi,90)
Nt = len(theta)
Np = len(phi)
self.nth = len(theta)
self.nph = len(phi)
if self.grid:
#self.theta = theta[:,None]
#self.phi = phi[None,:]
self.theta = theta
self.phi = phi
theta = np.kron(theta, np.ones(Np))
phi = np.kron(np.ones(Nt),phi)
if typ =='vsh':
nray = len(theta)
Br = self.C.Br.s3
lBr = self.C.Br.ind3[:, 0]
mBr = self.C.Br.ind3[:, 1]
Bi = self.C.Bi.s3
Cr = self.C.Cr.s3
Ci = self.C.Ci.s3
L = lBr.max()
M = mBr.max()
# vector spherical harmonics basis functions
V, W = VW(lBr, mBr, theta, phi)
Fth = np.dot(Br, np.real(V.T)) - \
np.dot(Bi, np.imag(V.T)) + \
np.dot(Ci, np.real(W.T)) + \
np.dot(Cr, np.imag(W.T))
Fph = -np.dot(Cr, np.real(V.T)) + \
np.dot(Ci, np.imag(V.T)) + \
np.dot(Bi, np.real(W.T)) + \
np.dot(Br, np.imag(W.T))
if self.grid:
Fth = Fth.reshape(Nf, Nt, Np)
Fph = Fph.reshape(Nf, Nt, Np)
if typ == 'ssh':
cx = self.S.Cx.s3
cy = self.S.Cy.s3
cz = self.S.Cz.s3
lmax = self.S.Cx.lmax
Y ,indx = SSHFunc2(lmax, theta,phi)
#k = self.S.Cx.k2[:,0]
# same k for x y and z
k = self.S.Cx.k2
if pattern :
Ex = np.dot(cx,Y[k])
Ey = np.dot(cy,Y[k])
Ez = np.dot(cz,Y[k])
Fth,Fph = CartToSphere(theta, phi, Ex, Ey,Ez, bfreq = True, pattern = True )
Fth = Fth.reshape(Nf,Nt,Np)
Fph = Fph.reshape(Nf,Nt,Np)
else:
Ex = np.dot(cx,Y[k])
Ey = np.dot(cy,Y[k])
Ez = np.dot(cz,Y[k])
Fth,Fph = CartToSphere (theta, phi, Ex, Ey,Ez, bfreq = True, pattern = False)
#self.Fp = Fph
#self.Ft = Fth
#G = np.real(Fph * np.conj(Fph) + Fth * np.conj(Fth))
#self.sqG = np.sqrt(G)
#if self.grid:
# self.Fp = Fph
# self.Ft = Fth
# G = np.real(Fph * np.conj(Fph) + Fth * np.conj(Fth))
# self.sqG = np.sqrt(G)
self.evaluated = True
#if typ == 'hfss':
# scipy.interpolate.griddata()
# Fth = self.Ft
# Fph = self.Fp
# TODO create 2 different functions for pattern and not pattern
#if not self.grid:
return Fth, Fph
#else:
# return None,None
def movie_vsh(self, mode='linear'):
""" animates vector spherical coeff w.r.t frequency
Parameters
----------
mode : string
'linear' |
"""
Brmin = abs(self.C.Br[:, 0:20, 0:20]).min()
Brmax = abs(self.C.Br[:, 0:20, 0:20]).max()
Bimin = abs(self.C.Bi[:, 0:20, 0:20]).min()
Bimax = abs(self.C.Bi[:, 0:20, 0:20]).max()
Crmin = abs(self.C.Cr[:, 0:20, 0:20]).min()
Crmax = abs(self.C.Cr[:, 0:20, 0:20]).max()
Cimin = abs(self.C.Ci[:, 0:20, 0:20]).min()
Cimax = abs(self.C.Ci[:, 0:20, 0:20]).max()
# print(Brmin, Brmax, Bimin, Bimax, Crmin, Crmax, Cimin, Cimax)
for k in range(self.nf):
plt.figure()
stf = ' f=' + str(self.fGHz[k]) + ' GHz'
subplot(221)
pcolor(abs(self.C.Br.s1[k, 0:20, 0:20]),
vmin=Brmin, vmax=Brmax, edgecolors='k')
#xlabel('m',fontsize=12)
ylabel('n', fontsize=12)
title('$|Br_{n}^{(m)}|$' + stf, fontsize=10)
colorbar()
subplot(222)
pcolor(abs(self.C.Bi.s1[k, 0:20, 0:20]),
vmin=Bimin, vmax=Bimax, edgecolors='k')
#xlabel('m',fontsize=12)
ylabel('n', fontsize=12)
title('$|Bi_{n}^{(m)}|$' + stf, fontsize=10)
colorbar()
subplot(223)
pcolor(abs(self.C.Cr.s1[k, 0:20, 0:20]),
vmin=Crmin, vmax=Crmax, edgecolors='k')
xlabel('m', fontsize=12)
#ylabel('n',fontsize=12)
title('$|Cr_{n}^{(m)}|$' + stf, fontsize=10)
colorbar()
subplot(224)
pcolor(abs(self.C.Ci.s1[k, 0:20, 0:20]),
vmin=Cimin, vmax=Cimax, edgecolors='k')
xlabel('m', fontsize=12)
#ylabel('n',fontsize=12)
title('$|Ci_{n}^{(m)}|$' + stf, fontsize=10)
colorbar()
filename = str('%03d' % k) + '.png'
savefig(filename, dpi=100)
clf()
command = ('mencoder',
'mf://*.png',
'-mf',
'type=png:w=800:h=600:fps=1',
'-ovc',
'lavc',
'-lavcopts',
'vcodec=mpeg4',
'-oac',
'copy',
'-o',
'vshcoeff.avi')
subprocess.check_call(command)
def minsh3(self, emax=0.05):
""" creates vsh3 with significant coeff until given relative reconstruction error
Parameters
----------
emax : float
error default 0.05
Summary
-------
Create antenna's vsh3 file which only contains
the significant vsh coefficients in shape 3,
in order to obtain a reconstruction maximal error = emax
This function requires a reading of .trx file before being executed
"""
#th = np.kron(self.theta, np.ones(self.nph))
#ph = np.kron(np.ones(self.nth), self.phi)
if not self.grid:
self.grid = True
Fth3, Fph3 = self.Fsynth3(self.theta, self.phi)
Err = self.mse(Fth3, Fph3, 0)
Enc = self.C.ens3()
n = len(Enc)
pos = 0
while (pos < n) & (Err[0] < emax):
Emin = Enc[pos]
d = self.C.drag3(Emin)
Fth3, Fph3 = self.Fsynth3(self.theta, self.phi)
Err = self.mse(Fth3, Fph3, 0)
if Err[0] >= emax:
i = d[0][0]
i3 = d[1][0]
self.C.put3(i, i3)
Fth3, Fph3 = self.Fsynth3(self.theta,self.phi)
Err = self.mse(Fth3, Fph3, 0)
pos = pos + 1
def savevsh3(self):
""" save antenna in vsh3 format
Create a .vsh3 antenna file
"""
# create vsh3 file
_filevsh3 = os.path.splitext(self._filename)[0]+'.vsh3'
filevsh3 = pyu.getlong(_filevsh3, pstruc['DIRANT'])
#filevsh3 = pyu.getlong(self._filename,'ant')
if os.path.isfile(filevsh3):
print( filevsh3, ' already exist')
else:
print( 'create ', filevsh3, ' file')
coeff = {}
coeff['fmin'] = self.fGHz[0]
coeff['fmax'] = self.fGHz[-1]
coeff['Br.ind'] = self.C.Br.ind3
coeff['Bi.ind'] = self.C.Bi.ind3
coeff['Cr.ind'] = self.C.Cr.ind3
coeff['Ci.ind'] = self.C.Ci.ind3
coeff['Br.k'] = self.C.Br.k2
coeff['Bi.k'] = self.C.Bi.k2
coeff['Cr.k'] = self.C.Cr.k2
coeff['Ci.k'] = self.C.Ci.k2
coeff['Br.s3'] = self.C.Br.s3
coeff['Bi.s3'] = self.C.Bi.s3
coeff['Cr.s3'] = self.C.Cr.s3
coeff['Ci.s3'] = self.C.Ci.s3
io.savemat(filevsh3, coeff, appendmat=False)
def savesh2(self):
""" save coeff in .sh2 antenna file
"""
# create sh2 file
#typ = self._filename.split('.')[1]
#self.typ = typ
_filesh2 = self._filename.replace('.'+ self.typ, '.sh2')
filesh2 = pyu.getlong(_filesh2, pstruc['DIRANT'])
if os.path.isfile(filesh2):
print(filesh2, ' already exist')
else:
print('create ', filesh2, ' file')
coeff = {}
coeff['fmin'] = self.fGHz[0]
coeff['fmax'] = self.fGHz[-1]
coeff['Cx.ind'] = self.S.Cx.ind2
coeff['Cy.ind'] = self.S.Cy.ind2
coeff['Cz.ind'] = self.S.Cz.ind2
coeff['Cx.lmax']= self.S.Cx.lmax
coeff['Cy.lmax']= self.S.Cy.lmax
coeff['Cz.lmax']= self.S.Cz.lmax
coeff['Cx.s2'] = self.S.Cx.s2
coeff['Cy.s2'] = self.S.Cy.s2
coeff['Cz.s2'] = self.S.Cz.s2
io.savemat(filesh2, coeff, appendmat=False)
def savesh3(self):
""" save antenna in sh3 format
create a .sh3 antenna file
"""
# create sh3 file
# if self._filename has an extension
# it is replace by .sh3
#typ = self._filename.split('.')[1]
#self.typ = typ
_filesh3 = self._filename.replace('.'+ self.typ, '.sh3')
filesh3 = pyu.getlong(_filesh3, pstruc['DIRANT'])
if os.path.isfile(filesh3):
print(filesh3, ' already exist')
else:
print('create ', filesh3, ' file')
coeff = {}
coeff['fmin'] = self.fGHz[0]
coeff['fmax'] = self.fGHz[-1]
coeff['Cx.ind'] = self.S.Cx.ind3
coeff['Cy.ind'] = self.S.Cy.ind3
coeff['Cz.ind'] = self.S.Cz.ind3
coeff['Cx.k'] = self.S.Cx.k2
coeff['Cy.k'] = self.S.Cy.k2
coeff['Cz.k'] = self.S.Cz.k2
coeff['Cx.lmax']= self.S.Cx.lmax
coeff['Cy.lmax']= self.S.Cy.lmax
coeff['Cz.lmax']= self.S.Cz.lmax
coeff['Cx.s3'] = self.S.Cx.s3
coeff['Cy.s3'] = self.S.Cy.s3
coeff['Cz.s3'] = self.S.Cz.s3
io.savemat(filesh3, coeff, appendmat=False)
def loadvsh3(self):
""" Load antenna's vsh3 file
vsh3 file contains a thresholded version of vsh coefficients in shape 3
"""
_filevsh3 = self._filename
filevsh3 = pyu.getlong(_filevsh3, pstruc['DIRANT'])
self.evaluated = False
if os.path.isfile(filevsh3):
coeff = io.loadmat(filevsh3, appendmat=False)
#
# This test is to fix a problem with 2 different
# behavior of io.loadmat
#
if type(coeff['fmin']) == float:
fmin = coeff['fmin']
fmax = coeff['fmax']
else:
fmin = coeff['fmin'][0][0]
fmax = coeff['fmax'][0][0]
# .. Warning
# Warning modification takes only one dimension for k
# if the .vsh3 format evolve it may not work anymore
#
Br = VCoeff('s3', fmin, fmax, coeff['Br.s3'],
coeff['Br.ind'], coeff['Br.k'][0])
Bi = VCoeff('s3', fmin, fmax, coeff['Bi.s3'],
coeff['Bi.ind'], coeff['Bi.k'][0])
Cr = VCoeff('s3', fmin, fmax, coeff['Cr.s3'],
coeff['Cr.ind'], coeff['Cr.k'][0])
Ci = VCoeff('s3', fmin, fmax, coeff['Ci.s3'],
coeff['Ci.ind'], coeff['Ci.k'][0])
self.C = VSHCoeff(Br, Bi, Cr, Ci)
self.nf = np.shape(Br.s3)[0]
self.fGHz = np.linspace(fmin, fmax, self.nf)
else:
print(_filevsh3, ' does not exist')
def loadsh3(self):
""" Load antenna's sh3 file
sh3 file contains a thesholded version of ssh coefficients in shape 3
"""
_filesh3 = self._filename.split('.')[0]+'.sh3'
filesh3 = pyu.getlong(_filesh3, pstruc['DIRANT'])
self.evaluated = False
if os.path.isfile(filesh3):
coeff = io.loadmat(filesh3, appendmat=False)
#
# This test is to fix a problem with 2 different
# behavior of io.loadmat
#
if type(coeff['fmin']) == float:
fmin = coeff['fmin']
fmax = coeff['fmax']
else:
fmin = coeff['fmin'][0][0]
fmax = coeff['fmax'][0][0]
# .. Warning
# Warning modification takes only one dimension for k
# if the .sh3 format evolve it may not work anymore
#
if type(coeff['Cx.lmax']) == float:
lmax = coeff['Cx.lmax']
else:
lmax = coeff['Cx.lmax'][0][0]
Cx = SCoeff(typ = 's3',
fmin = fmin ,
fmax = fmax ,
lmax = lmax,
data = coeff['Cx.s3'],
ind = coeff['Cx.ind'],
k = np.squeeze(coeff['Cx.k']))
Cy = SCoeff(typ= 's3',
fmin = fmin ,
fmax = fmax ,
lmax = lmax,
data = coeff['Cy.s3'],
ind = coeff['Cy.ind'],
k = np.squeeze(coeff['Cy.k']))
Cz = SCoeff(typ = 's3',
fmin = fmin ,
fmax = fmax ,
data = coeff['Cz.s3'],
lmax = lmax,
ind = coeff['Cz.ind'],
k = np.squeeze(coeff['Cz.k']))
if not 'S' in self.__dict__.keys():
self.S = SSHCoeff(Cx, Cy,Cz)
else:
self.S.sets3(Cx,Cy,Cz)
self.nf = np.shape(Cx.s3)[0]
self.fGHz = np.linspace(fmin, fmax, self.nf)
else:
print(_filesh3, ' does not exist')
def savevsh2(self, filename = ''):
""" save coeff in a .vsh2 antenna file
Parameters
----------
filename : string
"""
# create vsh2 file
if filename == '':
_filevsh2 = self._filename.replace('.trx', '.vsh2')
_filevsh2 = filename
filevsh2 = pyu.getlong(_filevsh2, pstruc['DIRANT'])
if os.path.isfile(filevsh2):
print(filevsh2, ' already exist')
else:
print('create ', filevsh2, ' file')
coeff = {}
coeff['fmin'] = self.fGHz[0]
coeff['fmax'] = self.fGHz[-1]
coeff['Br.ind'] = self.C.Br.ind2
coeff['Bi.ind'] = self.C.Bi.ind2
coeff['Cr.ind'] = self.C.Cr.ind2
coeff['Ci.ind'] = self.C.Ci.ind2
coeff['Br.s2'] = self.C.Br.s2
coeff['Bi.s2'] = self.C.Bi.s2
coeff['Cr.s2'] = self.C.Cr.s2
coeff['Ci.s2'] = self.C.Ci.s2
io.savemat(filevsh2, coeff, appendmat=False)
def loadsh2(self):
""" load spherical harmonics coefficient in shape 2
"""
_filesh2 = self._filename.split('.')[0]+'.sh2'
filesh2 = pyu.getlong(_filesh2, pstruc['DIRANT'])
if os.path.isfile(filesh2):
coeff = io.loadmat(filesh2, appendmat=False)
#
# This test is to fix a problem with 2 different
# behavior of io.loadmat
#
if type(coeff['fmin']) == float:
fmin = coeff['fmin']
fmax = coeff['fmax']
else:
fmin = coeff['fmin'][0][0]
fmax = coeff['fmax'][0][0]
if type(coeff['Cx.lmax']) == float:
lmax = coeff['Cx.lmax']
else:
lmax = coeff['Cx.lmax'][0][0]
Cx = SCoeff(typ='s2',
fmin=fmin,
fmax=fmax,
lmax = lmax,
data=coeff['Cx.s2'],
ind=coeff['Cx.ind'])
Cy = SCoeff(typ='s2',
fmin=fmin,
fmax=fmax,
lmax = lmax,
data=coeff['Cy.s2'],
ind=coeff['Cy.ind'])
Cz = SCoeff(typ='s2',
fmin=fmin,
fmax=fmax,
lmax = lmax,
data=coeff['Cz.s2'],
ind=coeff['Cz.ind'])
self.S = SSHCoeff(Cx, Cy,Cz)
Nf = np.shape(Cx.s2)[0]
self.fGHz = np.linspace(fmin, fmax, Nf)
else:
print( _filesh2, ' does not exist')
def loadvsh2(self):
""" load antenna from .vsh2 file format
Load antenna's vsh2 file which only contains
the vsh coefficients in shape 2
"""
_filevsh2 = self._filename
filevsh2 = pyu.getlong(_filevsh2, pstruc['DIRANT'])
if os.path.isfile(filevsh2):
coeff = io.loadmat(filevsh2, appendmat=False)
#
# This test is to fix a problem with 2 different
# behavior of io.loadmat
#
if type(coeff['fmin']) == float:
fmin = coeff['fmin']
fmax = coeff['fmax']
else:
fmin = coeff['fmin'][0][0]
fmax = coeff['fmax'][0][0]
Br = VCoeff(typ='s2', fmin=fmin, fmax=fmax,
data=coeff['Br.s2'], ind=coeff['Br.ind'])
Bi = VCoeff(typ='s2', fmin=fmin, fmax=fmax,
data=coeff['Bi.s2'], ind=coeff['Bi.ind'])
Cr = VCoeff(typ='s2', fmin=fmin, fmax=fmax,
data=coeff['Cr.s2'], ind=coeff['Cr.ind'])
Ci = VCoeff(typ='s2', fmin=fmin, fmax=fmax,
data=coeff['Ci.s2'], ind=coeff['Ci.ind'])
self.C = VSHCoeff(Br, Bi, Cr, Ci)
Nf = np.shape(Br.s2)[0]
self.fGHz = np.linspace(fmin, fmax, Nf)
else:
print( _filevsh2, ' does not exist')
def loadvsh3_old(self):
""" Load antenna vsh coefficients in shape 3
"""
_filevsh3 = self._filename
filevsh3 = getlong(_filevsh3, pstruc['DIRANT'])
fmin = 2.
fmax = 8.
if os.path.isfile(filevsh3):
coeff = io.loadmat(filevsh3, appendmat=False)
Br = VCoeff('s3', fmin, fmax, coeff['Br.s3'],
coeff['Br.ind'], coeff['Br.k'])
Bi = VCoeff('s3', fmin, fmax, coeff['Bi.s3'],
coeff['Bi.ind'], coeff['Bi.k'])
Cr = VCoeff('s3', fmin, fmax, coeff['Cr.s3'],
coeff['Cr.ind'], coeff['Cr.k'])
Ci = VCoeff('s3', fmin, fmax, coeff['Ci.s3'],
coeff['Ci.ind'], coeff['Ci.k'])
self.C = VSHCoeff(Br, Bi, Cr, Ci)
self.fGHz = np.linspace(fmin, fmax, 121)
else:
print(_filevsh3, ' does not exist')
def pol2cart(self, ith):
""" converts FTheta, FPhi to Fx,Fy,Fz for theta=ith
Parameters
----------
ith : theta index
Returns
-------
Fx
Fy
Fz
See Also
--------
cart2pol
"""
Fth = self.Ft[:, ith, :]
Fph = self.Fp[:, ith, :]
th = self.theta[ith]
ph = self.phi
Fx = Fth * np.cos(th) * np.cos(ph) - Fph * np.sin(ph)
Fy = Fth * np.cos(th) * np.sin(ph) + Fph * np.cos(ph)
Fz = (-1) * Fth * np.sin(th)
return(Fx, Fy, Fz)
def cart2pol(self, Fx, Fy, Fz, ith):
""" converts Fx,Fy,Fz to Ftheta, Fphi for theta=ith
Parameters
----------
Fx : np.array
Fy : np.array
Fz : np.array
ith : theta index
See Also
--------
pol2cart
"""
th = self.theta[ith]
ph = self.phi
Fth = Fx * np.cos(th) * np.cos(ph) + Fy * np.cos(th) * np.sin(ph) - Fz * np.sin(th)
Fph = -Fx * np.sin(ph) + Fy * np.cos(th)
SqG = np.sqrt(np.real(Fph * np.conj(Fph) + Fth * np.conj(Fth)))
self.sqG[:, ith, :] = SqG
self.Ft[:, ith, :] = Fth
self.Fp[:, ith, :] = Fph
def forcesympol(A):
""" plot VSH transform vsh basis in 3D plot
Parameters
----------
n,m : integer values (m<=n)
theta : ndarray
phi : ndarray
sf : boolean
if sf : plotted figures are saved in a *.png file
else : plotted figures aren't saved
Examples
--------
.. plot::
:include-source:
>>> from pylayers.antprop.antenna import *
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> n=5
>>> m=3
>>> theta = np.linspace(0,np.pi,30)
>>> phi = np.linspace(0,2*np.pi,60)
>>> plotVW(n,m,theta,phi)
"""
# calculate v and w
if m <= n:
theta[np.where(theta == np.pi / 2)[0]] = np.pi / 2 + \
1e-10 # .. todo :: not clean
x = -np.cos(theta)
Pmm1n, Pmp1n = AFLegendre(n, m, x)
t1 = np.sqrt((n + m) * (n - m + 1))
t2 = np.sqrt((n - m) * (n + m + 1))
y1 = t1 * Pmm1n[:, m, n] - t2 * Pmp1n[:, m, n]
y2 = t1 * Pmm1n[:, m, n] + t2 * Pmp1n[:, m, n]
Ephi = np.exp(1j * m * phi)
cphi = np.cos(m * phi)
if m == 0:
sphi = 1e-10
else:
sphi = np.sin(m * phi)
ny = len(y1)
ne = len(Ephi)
vy = np.ones(ny)
ve = np.ones(ne)
Y1 = np.outer(y1, ve)
Y2 = np.outer(y2, ve)
EPh = np.outer(vy, Ephi)
const = (-1.0) ** n / (2 * np.sqrt(n * (n + 1)))
V = const * Y1 * EPh
#V[np.isinf(V)|isnan(V)]=0
Vcos = cphi * V
Vsin = sphi * V
if m == 0:
#W=np.zeros((len(theta),len(phi)))
W = np.ones((len(theta), len(phi))) * 1e-10
else:
Waux = Y2 * EPh
x1 = 1.0 / x
W = np.outer(x1, const) * Waux
Wcos = cphi * W
Wsin = sphi * W
# plot V and W
Ntheta = np.size(theta)
vt = np.ones(Ntheta)
Nphi = np.size(phi)
vp = np.ones(Nphi)
Phi = np.outer(vt, phi)
Theta = np.outer(theta, vp)
#figdirV='/home/rburghel/Bureau/bases_decomposition_VW/base_V_Vsin_Vcos/'
figdirV = './'
ext1 = '.pdf'
ext2 = '.eps'
ext3 = '.png'
fig = plt.figure()
ax = axes3d.Axes3D(fig)
X = abs(V) * np.cos(Phi) * np.sin(Theta)
Y = abs(V) * np.sin(Phi) * np.sin(Theta)
Z = abs(V) * np.cos(Theta)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.hot_r)
ax.set_xlim3d([-1, 1])
ax.set_ylim3d([-1, 1])
ax.set_zlim3d([-1, 1])
if sf:
sz = fig.get_size_inches()
fig.set_size_inches(sz * 1.8)
figname = figdirV + 'V' + str(n) + str(m)
fig.savefig(figname + ext1, orientation='portrait')
fig.savefig(figname + ext2, orientation='portrait')
fig.savefig(figname + ext3, orientation='portrait')
fig = plt.figure()
ax = axes3d.Axes3D(fig)
X = abs(Vcos) * np.cos(Phi) * np.sin(Theta)
Y = abs(Vcos) * np.sin(Phi) * np.sin(Theta)
Z = abs(Vcos) * np.cos(Theta)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.hot_r)
ax.set_xlim3d([-1, 1])
ax.set_ylim3d([-1, 1])
ax.set_zlim3d([-1, 1])
if sf:
sz = fig.get_size_inches()
fig.set_size_inches(sz * 1.8)
figname = figdirV + 'Vcos' + str(n) + str(m) + '.jpg'
fig.savefig(figname + ext1, orientation='portrait')
fig.savefig(figname + ext2, orientation='portrait')
fig.savefig(figname + ext3, orientation='portrait')
fig = plt.figure()
ax = axes3d.Axes3D(fig)
X = abs(Vsin) * np.cos(Phi) * np.sin(Theta)
Y = abs(Vsin) * np.sin(Phi) * np.sin(Theta)
Z = abs(Vsin) * np.cos(Theta)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.hot_r)
ax.set_xlim3d([-1, 1])
ax.set_ylim3d([-1, 1])
ax.set_zlim3d([-1, 1])
if sf:
sz = fig.get_size_inches()
fig.set_size_inches(sz * 1.8)
figname = figdirV + 'Vsin' + str(n) + str(m) + '.jpg'
fig.savefig(figname + ext1, orientation='portrait')
fig.savefig(figname + ext2, orientation='portrait')
fig.savefig(figname + ext3, orientation='portrait')
#figdirW='/home/rburghel/Bureau/bases_decomposition_VW/base_W_Wsin_Wcos/'
figdirW = './'
fig = plt.figure()
ax = axes3d.Axes3D(fig)
X = abs(W) * np.cos(Phi) * np.sin(Theta)
Y = abs(W) * np.sin(Phi) * np.sin(Theta)
Z = abs(W) * np.cos(Theta)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.hot_r)
ax.set_xlim3d([-1, 1])
ax.set_ylim3d([-1, 1])
ax.set_zlim3d([-1, 1])
if sf:
sz = fig.get_size_inches()
fig.set_size_inches(sz * 1.8)
figname = figdirW + 'W' + str(n) + str(m)
fig.savefig(figname + ext1, orientation='portrait')
fig.savefig(figname + ext2, orientation='portrait')
fig.savefig(figname + ext3, orientation='portrait')
fig = plt.figure()
ax = axes3d.Axes3D(fig)
X = abs(Wcos) * np.cos(Phi) * np.sin(Theta)
Y = abs(Wcos) * np.sin(Phi) * np.sin(Theta)
Z = abs(Wcos) * np.cos(Theta)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.hot_r)
ax.set_xlim3d([-1, 1])
ax.set_ylim3d([-1, 1])
ax.set_zlim3d([-1, 1])
if sf:
sz = fig.get_size_inches()
fig.set_size_inches(sz * 1.8)
figname = figdirW + 'Wcos' + str(n) + str(m)
fig.savefig(figname + ext1, orientation='portrait')
fig.savefig(figname + ext2, orientation='portrait')
fig.savefig(figname + ext3, orientation='portrait')
fig = plt.figure()
ax = axes3d.Axes3D(fig)
X = abs(Wsin) * np.cos(Phi) * np.sin(Theta)
Y = abs(Wsin) * np.sin(Phi) * np.sin(Theta)
fig = plt.figure()
ax = axes3d.Axes3D(fig)
X = abs(Wsin) * np.cos(Phi) * np.sin(Theta)
Y = abs(Wsin) * np.sin(Phi) * np.sin(Theta)
Z = abs(Wsin) * np.cos(Theta)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.hot_r)
ax.set_xlim3d([-1, 1])
ax.set_ylim3d([-1, 1])
ax.set_zlim3d([-1, 1])
if sf:
sz = fig.get_size_inches()
fig.set_size_inches(sz * 1.8)
figname = figdirW + 'Wsin' + str(n) + str(m)
fig.savefig(figname + ext1, orientation='portrait')
fig.savefig(figname + ext2, orientation='portrait')
fig.savefig(figname + ext3, orientation='portrait')
plt.show()
else:
print("Error: m>n!!!")
def compdiag(k, A, th, ph, Fthr, Fphr, typ='modulus', lang='english', fontsize=18):
""" makes comparison between original pattern and reconstructed pattern
Parameters
----------
k : frequency index
A : Antenna
ph : phi base (1 x Np)
th : theta base (1 x Nt)
Fthr : Fth output of Fsynth Nf x (Ntheta*Tphi)
Fphr : Fth output of Fsynth Nf x (Ntheta*Tphi)
lang = 'french'
= 'english'
"""
Nf = np.shape(Fthr)[0]
#Fthr = Fthr.reshape(Nf,len(th),len(ph))
#Fphr = Fphr.reshape(Nf,len(th),len(ph))
plt.figure()
rc('text', usetex=True)
Ftho = A.Ftheta
Fpho = A.Fphi
# limites module Fthr, Ftho, Fphr, Fpho
maxTr = abs(Fthr[:, :, k]).max()
maxTo = abs(Ftho[:, :, k ]).max()
MmT = max(maxTr, maxTo)
minTr = abs(Fthr[ :, :, k ]).min()
minTo = abs(Ftho[ :, :, k ]).min()
mmT = min(minTr, minTo)
maxPr = abs(Fphr[ :, :, k ]).max()
maxPo = abs(Fpho[ :, :, k ]).max()
MmP = max(maxPr, maxPo)
minPr = abs(Fphr[ :, :, k ]).min()
minPo = abs(Fpho[ :, :, k ]).min()
mmP = min(minPr, minPo)
# limites real Fthr, Ftho, Fphr, Fpho
maxTrr = np.real(Fthr[ :, :, k ]).max()
maxTor = np.real(Ftho[ :, :, k ]).max()
MrT = max(maxTrr, maxTor)
minTrr = np.real(Fthr[ :, :, k ]).min()
minTor = np.real(Ftho[ :, :, k ]).min()
mrT = min(minTrr, minTor)
maxPrr = np.real(Fphr[ :, :, k ]).max()
maxPor = np.real(Fpho[ :, :, k ]).max()
MrP = max(maxPrr, maxPor)
minPrr = np.real(Fphr[ :, :, k ]).min()
minPor = np.real(Fpho[ :, :, k ]).min()
mrP = min(minPrr, minPor)
# limites real Fthr, Ftho, Fphr, Fpho
maxTri = np.imag(Fthr[ :, :, k ]).max()
maxToi = np.imag(Ftho[ :, :, k ]).max()
MiT = max(maxTri, maxToi)
minTri = np.imag(Fthr[ :, :, k ]).min()
minToi = np.imag(Ftho[ :, :, k ]).min()
miT = min(minTri, minToi)
maxPri = np.imag(Fphr[ :, :, k ]).max()
maxPoi = np.imag(Fpho[ :, :, k ]).max()
MiP = max(maxPri, maxPoi)
minPri = np.imag(Fphr[ :, :, k ]).min()
minPoi = np.imag(Fpho[ :, :, k ]).min()
miP = min(minPri, minPoi)
# limithes arg Fth,Fph
maxATr = np.angle(Fthr[ :, :, k ]).max()
maxATo = np.angle(Ftho[ :, :, k ]).max()
maT = max(maxATr, maxATo)
minATr = np.angle(Fthr[ :, :, k ]).min()
minATo = np.angle(Ftho[ :, :, k ]).min()
maT0 = min(minATr, minATo)
maxAPr = np.angle(Fphr[ :, :, k ]).max()
maxAPo = np.angle(Fpho[ :, :, k ]).max()
maP = max(maxAPr, maxAPo)
minAPr = np.angle(Fphr[ :, :, k ]).min()
minAPo = np.angle(Fpho[ :, :, k ]).min()
maP0 = min(minAPr, minAPo)
ax = plt.axes([0, 0, 360, 180])
rtd = 180 / np.pi
plt.subplot(221)
if typ == 'modulus':
#
#cmap=cm.jet
#pcolor(A.phi*rtd,A.theta*rtd,abs(Ftho[k,:,:]),vmin=0,vmax=mmT)
#
#cmap= gray
#pcolor(A.phi*rtd,A.theta*rtd,abs(Ftho[k,:,:]),cmap=cm.gray_r,vmin=0,vmax=mmT)
#
#cmap=cm.hot
plt.pcolor(A.phi * rtd, A.theta * rtd, abs(Ftho[ :, :, k ]),
cmap=cm.hot_r, vmin=mmT, vmax=MmT)
plt.title(r'$|F_{\theta}|$ original', fontsize=fontsize)
if typ == 'real':
#pcolor(A.phi*rtd,A.theta*rtd,real(Ftho[k,:,:]),cmap=cm.gray_r,vmin=0,vmax=mmT)
plt.pcolor(A.phi * rtd, A.theta * rtd, np.real(Ftho[ :, :, k ]),
cmap=cm.hot_r, vmin=mrT, vmax=MrT)
title(r'Re ($F_{\theta}$) original', fontsize=fontsize)
if typ == 'imag':
#pcolor(A.phi*rtd,A.theta*rtd,imag(Ftho[k,:,:]),cmap=cm.gray_r,vmin=0,vmax=mmT)
pcolor(A.phi * rtd, A.theta * rtd, np.imag(Ftho[ :, :, k ]),
cmap=cm.hot_r, vmin=miT, vmax=MiT)
title(r'Im ($F_{\theta}$) original', fontsize=fontsize)
if typ == 'phase':
#pcolor(A.phi*rtd,A.theta*rtd,angle(Ftho[k,:,:]),cmap=cm.gray_r,vmin=maT0,vmax=maT)
plt.pcolor(A.phi * rtd, A.theta * rtd, np.angle(Ftho[ :, :, k ]),
cmap=cm.hot_r, vmin=maT0, vmax=maT)
if lang == 'french':
plt.title(r'Arg ($F_{\theta}$) original', fontsize=fontsize)
else:
plt.title(r'Ang ($F_{\theta}$) original', fontsize=fontsize)
plt.axis([0, 360, 0, 180])
plt.ylabel(r'$\theta$ (deg)', fontsize=fontsize)
plt.xticks(fontsize=fontsize)
plt.yticks(fontsize=fontsize)
cbar = plt.colorbar()
for t in cbar.ax.get_yticklabels():
t.set_fontsize(fontsize)
plt.subplot(222)
if typ == 'modulus':
plt.pcolor(A.phi * rtd, A.theta * rtd, abs(Fpho[:, :, k ]),
cmap=cm.hot_r, vmin=mmP, vmax=MmP)
plt.title('$|F_{\phi}|$ original', fontsize=fontsize)
if typ == 'real':
plt.pcolor(A.phi * rtd, A.theta * rtd, np.real(Fpho[ :, :, k ]),
cmap=cm.hot_r, vmin=mrP, vmax=MrP)
plt.title('Re ($F_{\phi}$) original', fontsize=fontsize)
if typ == 'imag':
plt.pcolor(A.phi * rtd, A.theta * rtd, np.imag(Fpho[ :, :, k ]),
cmap=cm.hot_r, vmin=miP, vmax=MiP)
plt.title('Im ($F_{\phi}$) original', fontsize=fontsize)
if typ == 'phase':
plt.pcolor(A.phi * rtd, A.theta * rtd, np.angle(Fpho[ :, :, k ]),
cmap=cm.hot_r, vmin=maP0, vmax=maP)
if lang == 'french':
plt.title('Arg ($F_{\phi}$) original', fontsize=fontsize)
else:
plt.title('Ang ($F_{\phi}$) original', fontsize=fontsize)
plt.axis([0, 360, 0, 180])
plt.xticks(fontsize=fontsize)
plt.yticks(fontsize=fontsize)
cbar = plt.colorbar()
for t in cbar.ax.get_yticklabels():
t.set_fontsize(fontsize)
plt.subplot(223)
if typ == 'modulus':
plt.pcolor(ph * rtd, th * rtd, abs(Fthr[:, :, k ]),
cmap=cm.hot_r, vmin=mmT, vmax=MmT)
if lang == 'french':
plt.title(r'$|F_{\theta}|$ reconstruit', fontsize=fontsize)
else:
plt.title(r'$|F_{\theta}|$ reconstructed', fontsize=fontsize)
if typ == 'real':
plt.pcolor(ph * rtd, th * rtd, np.real(Fthr[:,:,k ]),
cmap=cm.hot_r, vmin=mrT, vmax=MrT)
if lang == 'french':
title(r'Re ($F_{\theta}$) reconstruit', fontsize=fontsize)
else:
title(r'Re ($F_{\theta}$) reconstructed', fontsize=fontsize)
if typ == 'imag':
plt.pcolor(ph * rtd, th * rtd, np.imag(Fthr[ :, :, k ]),
cmap=cm.hot_r, vmin=miT, vmax=MiT)
if lang == 'french':
plt.title(r'Im ($F_{\theta}$) reconstruit', fontsize=fontsize)
else:
plt.title(r'Im ($F_{\theta}$) reconstructed', fontsize=fontsize)
if typ == 'phase':
plt.pcolor(A.phi * rtd, A.theta * rtd, np.angle(Fthr[:,:,k]),
cmap=cm.hot_r, vmin=maT0, vmax=maT)
if lang == 'french':
plt.title(r'Arg ($F_{\theta}$) reconstruit', fontsize=fontsize)
else:
plt.title(r'Ang ($F_{\theta}$) reconstructed', fontsize=fontsize)
plt.axis([0, 360, 0, 180])
plt.xlabel(r'$\phi$ (deg)', fontsize=fontsize)
plt.ylabel(r'$\theta$ (deg)', fontsize=fontsize)
plt.xticks(fontsize=fontsize)
plt.yticks(fontsize=fontsize)
cbar = plt.colorbar()
for t in cbar.ax.get_yticklabels():
t.set_fontsize(fontsize)
plt.subplot(224)
if typ == 'modulus':
plt.pcolor(ph * rtd, th * rtd, abs(Fphr[ :, :,k]),
cmap=cm.hot_r, vmin=mmP, vmax=MmP)
if lang == 'french':
plt.title('$|F_{\phi}|$ reconstruit', fontsize=fontsize)
else:
plt.title('$|F_{\phi}|$ reconstructed', fontsize=fontsize)
if typ == 'real':
plt.pcolor(ph * rtd, th * rtd, np.real(Fphr[ :, :,k]),
cmap=cm.hot_r, vmin=mrP, vmax=MrP)
if lang == 'french':
plt.title('Re ($F_{\phi}$) reconstruit', fontsize=fontsize)
else:
plt.title('Re ($F_{\phi}$) reconstructed', fontsize=fontsize)
if typ == 'imag':
plt.pcolor(ph * rtd, th * rtd, np.imag(Fphr[ :, :,k]),
cmap=cm.hot_r, vmin=miP, vmax=MiP)
if lang == 'french':
plt.title('Im ($F_{\phi}$) reconstruit', fontsize=fontsize)
else:
plt.title('Im ($F_{\phi}$) reconstructed', fontsize=fontsize)
if typ == 'phase':
plt.pcolor(A.phi * rtd, A.theta * rtd, np.angle(Fphr[ :, :,k]),
cmap=cm.hot_r, vmin=maP0, vmax=maP)
if lang == 'french':
plt.title('Arg ($F_{\phi}$) reconstruit', fontsize=fontsize)
else:
plt.title('Ang ($F_{\phi}$) reconstructed', fontsize=fontsize)
plt.axis([0, 360, 0, 180])
plt.xlabel(r'$\phi$ (deg)', fontsize=fontsize)
plt.xticks(fontsize=fontsize)
plt.yticks(fontsize=fontsize)
cbar = plt.colorbar()
for t in cbar.ax.get_yticklabels():
t.set_fontsize(fontsize)
def BeamGauss(theta,phi,Gmax=19.77,HPBW_az=10,HPBW_el=40,Tilt=10):
""" Beam with a Gaussian shape
Parameters
----------
theta : float
angle in degree
phi : float
angle in degree
Gmax : float
HPBW_az : float
Half Power Beamwidth azimuth degree
HPBW_el : float
Half Power Beamwidth elevation degree
Tilt : float
angle in degree
"""
c = np.pi/180.
az = c*(theta-(Tilt+90))*2*np.sqrt(np.log(2))
el = c*phi*2*np.sqrt(np.log(2))
taz = -(az/(HPBW_az*c))**2
tel = -(el/(HPBW_el*c))**2
gain = 10**(Gmax/10.)*np.exp(taz)*np.exp(tel)
return(gain)
def show3D(F, theta, phi, k, col=True):
""" show 3D matplotlib diagram
Parameters
----------
F : ndarray (Nf,Nt,Np)
theta : ndarray (1xNt)
angle
phi : ndarray (1xNp)
angle
theta : ndarray (Nt)
k : int
frequency index
col : boolean
if col -> color coded plot3D
if col == False -> simple plot3D
Examples
--------
.. plot::
:include-source:
>>> import matplotlib.pyplot as plt
>>> from pylayers.antprop.antenna import *
>>> A = Antenna('defant.vsh3')
>>> A.eval(grid=True)
Warnings
--------
len(theta) must be equal with shape(F)[1]
len(phi) must be equal with shape(F)[2]
"""
nth = len(theta)
nph = len(phi)
if k >= np.shape(F)[0]:
print('Error: frequency index k not in F defined interval')
if nth != np.shape(F)[1]:
print('Error: shape mistmatch between theta and F')
if nph != np.shape(F)[2]:
print('Error: shape mistmatch between phi and F')
fig = plt.figure()
ax = axes3d.Axes3D(fig)
V = F[k, :, :]
vt = np.ones(nth)
vp = np.ones(nph)
Th = np.outer(theta, vp)
Ph = np.outer(vt, phi)
X = abs(V) * np.cos(Ph) * np.sin(Th)
Y = abs(V) * np.sin(Ph) * np.sin(Th)
Z = abs(V) * np.cos(Th)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
if (col):
ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.hot_r)
else:
ax.plot3D(np.ravel(X), np.ravel(Y), np.ravel(Z))
class AntPosRot(Antenna):
""" Antenna + position + Rotation
"""
def field(self,p):
"""
Parameters
----------
p : np.array (N,3)
"""
rad_to_deg = 180/np.pi
assert p.shape[-1]==3
if len(p.shape)==1:
r = p[None,:]-self.p[None,:]
else:
r = p-self.p[None,:]
dist = np.sqrt(np.sum(r*r,axis=-1))[:,None]
u = r/dist
th = np.arccos(u[:,2])
ph = np.arctan2(u[:,1],u[:,0])
tang = np.vstack((th,ph)).T
#print("global",tang*rad_to_deg)
Rt, tangl = geu.BTB_tx(tang, self.T)
#print("local",tangl*rad_to_deg)
self.eval(th=tangl[:,0],ph=tangl[:,1],grid=False)
E = (self.Ft[:,None,:]*self.T[:,2][None,:,None]+self.Fp[:,None,:]*self.T[:,0][None,:,None])
P = np.exp(-1j*2*np.pi*self.fGHz[None,None,:]*dist[...,None]/0.3)/dist[...,None]
EP = E*P
return(EP)
#Rr, rangl = geu.BTB_rx(rang, self.Tr)
def _gain(Ft,Fp):
""" calculates antenna gain
Returns
-------
G : np.array(Nt,Np,Nf) dtype:float
linear gain
or np.array(Nr,Nf)
sqG : np.array(Nt,Np,Nf) dtype:float
linear sqare root of gain
or np.array(Nr,Nf)
efficiency : np.array (,Nf) dtype:float
efficiency
hpster : np.array (,Nf) dtype:float
half power solid angle : 1 ~ 4pi steradian
ehpbw : np.array (,Nf) dtyp:float
equivalent half power beamwidth (radians)
Notes
-----
.. math:: G(\theta,phi) = |F_{\\theta}|^2 + |F_{\\phi}|^2
"""
G = np.real( Fp * np.conj(Fp)
+ Ft * np.conj(Ft) )
return(G)
def _hpbw(G,th,ph):
""" half power beamwidth
Parameters
----------
Gain : Ftheta
Nt x Np
th : np.array
,Nt
ph : np.array
,Np
Returns
-------
ehpbw : effective half power beamwidth
hpster : half power solid angle (steradians)
"""
#
GdB = 10*np.log10(G)
GdBmax = np.max(np.max(GdB,axis=0),axis=0)
dt = th[1]-th[0]
dp = ph[1]-ph[0]
Nt = len(th)
Np = len(ph)
Nf = GdB.shape[2]
hpster = np.zeros(Nf)
ehpbw = np.zeros(Nf)
for k in range(Nf):
U = np.zeros((Nt,Np))
A = GdB[:,:,k]*np.ones(Nt)[:,None]*np.ones(Np)[None,:]
u = np.where(A>(GdBmax[k]-3))
U[u] = 1
V = U*np.sin(th)[:,None]
hpster[k] = np.sum(V)*dt*dp/(4*np.pi)
ehpbw[k] = np.arccos(1-2*hpster[k])
return ehpbw,hpster
def _efficiency(G,th,ph):
""" determine antenna efficiency
Parameters
----------
Gain : Ftheta
Nt x Np
th : np.array
,Nt
ph : np.array
,Np
Returns
-------
oefficiency :
"""
#
dt = th[1]-th[0]
dp = ph[1]-ph[0]
Nt = len(th)
Np = len(ph)
Gs = G*np.sin(th)[:,None,None]*np.ones(Np)[None,:,None]
efficiency = np.sum(np.sum(Gs,axis=0),axis=0)*dt*dp/(4*np.pi)
return efficiency
def _dirmax(G,th,ph):
""" determine information in Gmax direction
Parameters
----------
Gain : Ftheta
Nt x Np
th : np.array
,Nt
# GdBmax (,Nf)
# Get direction of Gmax and get the polarisation state in that direction
#
Returns
--------
"""
GdB = 10*np.log10(G)
GdBmax = np.max(np.max(GdB,axis=0),axis=0)
umax = np.array(np.where(GdB==GdBmax))[:,0]
theta_max = th[umax[0]]
phi_max = ph[umax[1]]
M = geu.SphericalBasis(np.array([[theta_max,phi_max]]))
sl = M[:,2].squeeze()
uth = M[:,0]
uph = M[:,1]
el = Ft[tuple(umax)]*uth + Fp[tuple(umax)]*uph
eln = el/np.linalg.norm(el)
el = np.abs(eln.squeeze())
hl = np.cross(sl,el)
return GdBmax,theta_max,phi_max,(hl,sl,el)
def F0(nu,sigma):
""" F0 function for horn antenna pattern
Parameters
----------
nu : np.array
(....,nf)
sigma : np.array
(,nf)
Notes
-----
http://www.ece.rutgers.edu/~orfanidi/ewa/ch18.pdf
18.3.2
"""
nuos = nu/sigma
argp = nuos + sigma
argm = nuos - sigma
expf = np.exp(1j*(np.pi/2)*nuos**2)
sf = 1./sigma
sp , cp = fresnel(argp)
sm , cm = fresnel(argm)
Fp = cp-1j*sp
Fm = cm-1j*sm
F = sf*expf*(Fp -Fm)
return F
def F1(nu,sigma):
""" F1 function for horn antenna pattern
http://www.ece.rutgers.edu/~orfanidi/ewa/ch18.pdf
18.3.3
"""
F = 0.5*(F0(nu+0.5,sigma)+F0(nu-0.5,sigma))
return F
if (__name__ == "__main__"):
doctest.testmod()
| [
2,
12,
9,
12,
19617,
25,
18274,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
198,
492,
1459,
21412,
3712,
279,
2645,
6962,
13,
415,
22930,
13,
415,
13713,
198,
198,
1212,
8265,
17105,
43813,
198,
2025,
20509,
460,
307,
9639,
422,
2972... | 1.688575 | 97,590 |
import numpy as np
np.set_printoptions(linewidth=200)
# 2D integrals with simpson rule
# http://mathfaculty.fullerton.edu/mathews/n2003/SimpsonsRule2DMod.html
import quadratureCoefs as qc
rfunc_default = slater
if __name__ == "__main__":
import matplotlib.pyplot as plt
ymax = 5.0
xs = np.arange(-10.,10.,0.1)
ys = np.arange( 0., ymax, 0.1 )
#ys = np.linspace( 0., ymax, 50 )
func1 = slater
func2 = slater
dx = xs[1]-xs[0]
dy = ys[1]-ys[0]
f1,f2,Ws = getFuncs( xs, ys, f1=func1, f2=func2 )
I_brute = intRfNumpy_brute( f1, f2, Ws*(dx*dy) ); plt.plot(xs,I_brute,label='brute')
I_fft = intRfNumpy_fft ( f1, f2, Ws*(dx*dy) ); plt.plot(xs,I_fft ,label='fft')
xs_ = np.arange(-10.,10.,0.5)
dx_ = xs_[1]-xs_[0]
f1,f2,Ws = getFuncs( xs_, ys, f1=func1, f2=func2 )
I_low = intRfNumpy_fft( f1, f2, Ws*(dx_*dy) ); plt.plot(xs_,I_low,':',label='fft_low')
order = 6
ys = np.array(qc.GaussLegendreNodes [order])*ymax
ws = np.array(qc.GaussLegendreWeights[order])
f1,f2,Ws = getFuncs( xs, ys )
Ws*=ws[:,None]
I_CbG = intRfNumpy_fft( f1, f2, Ws*dx*ymax ); plt.plot(xs,I_CbG,':',label='fftCheby')
f1,f2,Ws = getFuncs( xs_, ys, f1=func1, f2=func2 )
I_CbG_low = intRfNumpy_fft( f1, f2, Ws*dx*ymax ); plt.plot(xs,I_CbG,':',label='fftCheby_low')
ratio = I_CbG/I_brute
#plt.plot(xs,(ratio-1.0)*100.0,label='error_ratio')
#print ratio
plt.legend()
plt.grid()
plt.show()
| [
198,
11748,
299,
32152,
355,
45941,
198,
198,
37659,
13,
2617,
62,
4798,
25811,
7,
2815,
413,
5649,
28,
2167,
8,
198,
198,
2,
362,
35,
4132,
30691,
351,
985,
8430,
3896,
198,
2,
220,
2638,
1378,
11018,
38942,
10672,
13,
12853,
29111... | 1.780093 | 864 |
###############################
#
# Created by Patrik Valkovic
# 3/12/2021
#
###############################
import unittest
import torch as t
import ffeat
from ffeat.strategies import crossover
from ffeat.utils import decay
from test.repeat import repeat
if __name__ == '__main__':
unittest.main()
| [
14468,
7804,
4242,
21017,
198,
2,
198,
2,
15622,
416,
9606,
74,
569,
971,
17215,
198,
2,
513,
14,
1065,
14,
1238,
2481,
198,
2,
198,
14468,
7804,
4242,
21017,
198,
11748,
555,
715,
395,
198,
11748,
28034,
355,
256,
198,
11748,
277,
... | 3.290323 | 93 |
#!/usr/bin/python3
# ******************************************************************************
# Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved.
# licensed under the Mulan PSL v2.
# You can use this software according to the terms and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
# http://license.coscl.org.cn/MulanPSL2
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
# PURPOSE.
# See the Mulan PSL v2 for more details.
# ******************************************************************************/
# -*- coding:utf-8 -*-
"""
test get single binary package info
"""
from pathlib import Path
from requests import RequestException, Response
from packageship.application.cli.commands.singlepkg import SingleCommand
from packageship.application.common.exc import ElasticSearchQueryException
from test.cli import DATA_BASE_INFO
from test.cli.package_command import PackageTestBase
MOCK_DATA_FOLDER = Path(Path(__file__).parent, "mock_data")
EXPECTED_DATA_FOLDER = Path(Path(__file__).parent, "mock_data", "expected_data")
class TestSingleBinaryPackage(PackageTestBase):
"""
class for test single binary package
"""
cmd_class = SingleCommand
def test_true_params(self):
"""test true params"""
self.excepted_str = self.read_file_content(
"bin_true_params.txt",
folder=EXPECTED_DATA_FOLDER,
is_json=False
)
self.command_params = ["Judy", "os-version"]
self.mock_es_search(side_effect=self.read_file_content(
"pkg_info.json",
folder=MOCK_DATA_FOLDER))
self.assert_result()
def test_wrong_dbs(self):
"""test wrong dbs"""
self.excepted_str = """
ERROR_CONTENT :Request parameter error
HINT :Please check the parameter is valid and query again"""
self.command_params = ["Judy", "version123"]
self.mock_es_search(side_effect=self.read_file_content(
"pkg_info.json",
folder=MOCK_DATA_FOLDER))
self.assert_result()
def test_not_exists_package(self):
"""test not exists package"""
self.excepted_str = """
ERROR_CONTENT :The querying package does not exist in the databases
HINT :Use the correct package name and try again"""
self.command_params = ["Judy", "os-version"]
single_package_not_exists_info = self.read_file_content(
"pkg_info.json", folder=MOCK_DATA_FOLDER)[:1]
single_package_not_exists_info.append({})
self.mock_es_search(side_effect=single_package_not_exists_info)
self.assert_result()
def test_wrong_type_filelist(self):
"""test wrong type filelist"""
def modify_filelist_data():
"""generate wrong type filelist"""
wrong_type_filelist = self.read_file_content("pkg_info.json", folder=MOCK_DATA_FOLDER)
wrong_type_filelist[1]["hits"]["hits"][0]["_source"]["filelists"][0]["filetypes"] = "h"
return wrong_type_filelist
self.excepted_str = self.read_file_content(
"wrong_type_filelist.txt",
folder=EXPECTED_DATA_FOLDER,
is_json=False
)
self.command_params = ["Judy", "os-version"]
self.mock_es_search(side_effect=modify_filelist_data())
self.assert_result()
def test_none_filelist(self):
"""test none filelist"""
def generate_none_filelist():
"""generate none filelist"""
error_filelist_info = self.read_file_content("pkg_info.json", folder=MOCK_DATA_FOLDER)
error_filelist_info[1]["hits"]["hits"][0]["_source"]["filelists"] = None
return error_filelist_info
self.excepted_str = self.read_file_content(
"error_filelist.txt",
folder=EXPECTED_DATA_FOLDER,
is_json=False)
self.command_params = ["Judy", "os-version"]
self.mock_es_search(side_effect=generate_none_filelist())
self.assert_result()
def test_error_single_bin_package(self):
"""test error single bin package"""
self.excepted_str = """
ERROR_CONTENT :The querying package does not exist in the databases
HINT :Use the correct package name and try again
"""
self.command_params = ["Judy", "os-version"]
error_single_bin_info = self.read_file_content("pkg_info.json", folder=MOCK_DATA_FOLDER)
error_single_bin_info[1] = {None}
self.mock_es_search(side_effect=error_single_bin_info)
self.assert_result()
def test_empty_provides_for_bin(self):
"""test empty provides for bin"""
def generate_empty_provides_data():
"""generate empty provides data"""
empty_provides_single_bin = self.read_file_content("pkg_info.json", folder=MOCK_DATA_FOLDER)
empty_provides_single_bin[2]["hits"]["hits"][0]["_source"]["provides"] = None
return empty_provides_single_bin
self.excepted_str = self.read_file_content(
"bin_empty_provides.txt",
folder=EXPECTED_DATA_FOLDER,
is_json=False)
self.command_params = ["Judy", "os-version"]
self.mock_es_search(side_effect=generate_empty_provides_data())
self.assert_result()
def test_raise_es_error(self):
"""test_raise_es_error"""
self.command_params = ["Judy", "os-version"]
self.mock_es_search(side_effect=[DATA_BASE_INFO, ElasticSearchQueryException])
self.excepted_str = """
ERROR_CONTENT :Failed to Connect the database
HINT :Check the connection
"""
self.assert_result()
def test_request_raise_requestexception(self):
"""test_request_raise_requestexception"""
self.command_params = ["Judy", "os-version"]
self.mock_es_search(side_effect=self.read_file_content("pkg_info.json", folder=MOCK_DATA_FOLDER))
self.excepted_str = """
ERROR_CONTENT :
HINT :The remote connection is abnormal, please check the 'remote_host' parameter value to ensure the connectivity of the remote address
"""
self.mock_requests_get(side_effect=[RequestException])
self.assert_result()
def test_request_text_raise_jsonerror(self):
"""test_request_text_raise_jsonerror"""
self.command_params = ["Judy", "os-version"]
self.excepted_str = """
ERROR_CONTENT :{"test":'123',}
HINT :The content is not a legal json format,please check the parameters is valid
"""
self.mock_requests_get(return_value=Resp())
self.assert_result()
def test_request_status_429(self):
"""test_request_status_429"""
self.command_params = ["Judy", "os-version"]
self.excepted_str = """
Too many requests in a short time, please request again later
"""
self.mock_requests_get(return_value=Resp())
self.assert_result()
def test_request_status_500(self):
"""test_request_status_500"""
self.excepted_str = """
ERROR_CONTENT :500 Server Error: None for url: None
HINT :The remote connection is abnormal, please check the 'remote_host' parameter value to ensure the connectivity of the remote address
"""
self.command_params = ["Judy", "os-version"]
r = Response()
r.status_code = 500
self.mock_requests_get(return_value=r)
self.assert_result()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
2,
41906,
17174,
46068,
1174,
198,
2,
15069,
357,
66,
8,
43208,
21852,
1766,
1539,
12052,
13,
12131,
12,
42334,
13,
1439,
2489,
10395,
13,
198,
2,
11971,
739,
262,
17996,
272,
6599,
43,... | 2.420919 | 3,155 |
"""
training
Created by: Martin Sicho
On: 19-11-19, 15:23
"""
import torch
from abc import abstractmethod
from torch import Tensor
from drugex.core import model, util
from drugex.api.corpus import Corpus, BasicCorpus
from drugex.api.pretrain.serialization import GeneratorDeserializer, StateProvider, GeneratorSerializer
| [
37811,
198,
34409,
198,
198,
41972,
416,
25,
5780,
311,
38720,
198,
2202,
25,
678,
12,
1157,
12,
1129,
11,
1315,
25,
1954,
198,
37811,
198,
11748,
28034,
198,
198,
6738,
450,
66,
1330,
12531,
24396,
198,
198,
6738,
28034,
1330,
309,
... | 3.421053 | 95 |
#!/usr/bin/env python3
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
d = []
for line in open("counts.tsv"):
chapter, chunks, words = line.strip().split()
d.append((int(chapter), int(chunks), int(words)))
df = pd.DataFrame(d, columns=["chapter", "chunks", "words"])
sns.set_style("whitegrid")
f, (ax1, ax2) = plt.subplots(2, 1, figsize=(7, 5))
sns.barplot(y="chunks", x="chapter", data=df, ax=ax1)
sns.barplot(y="words", x="chapter", data=df, ax=ax2)
ax1.set_xlabel("")
f.suptitle("Chunk and Word Counts by Chapter in the Hobbit", y=0.95, weight='semibold')
f.text(0.01, 0.02, "Digital Tolkien Project • digitaltolkien.com", size='medium', color='black', weight='medium')
f.text(0.99, 0.02, "Little Delving #001", horizontalalignment='right', size='medium', color='black', weight='medium')
f.subplots_adjust(bottom=0.2)
plt.savefig("001.png")
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
384,
397,
1211,
355,
3013,
82,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
628,
198,
67,
796,
17635,
198,
1640... | 2.519886 | 352 |
if __name__ == '__main__':
arr = [2, 8, 5, 3, 9, 4]
#arr = []
sorted_arr = insertion_sort(arr)
sorted_arr1 = insertion_sort(sorted_arr)
print(sorted_arr1)
| [
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
5240,
796,
685,
17,
11,
807,
11,
642,
11,
513,
11,
860,
11,
604,
60,
198,
220,
220,
220,
1303,
3258,
796,
17635,
198,
220,
220,
220,
23243,
62,
3258,
... | 2.134146 | 82 |
# SPDX-License-Identifier: Apache-2.0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import pkgutil
from types import ModuleType
from typing import Optional, List
import numpy as np # type: ignore
all_numeric_dtypes = [
np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16, np.uint32, np.uint64,
np.float16, np.float32, np.float64,
]
def import_recursive(package): # type: (ModuleType) -> None
"""
Takes a package and imports all modules underneath it
"""
pkg_dir = None # type: Optional[List[str]]
pkg_dir = package.__path__ # type: ignore
module_location = package.__name__
for (_module_loader, name, ispkg) in pkgutil.iter_modules(pkg_dir):
module_name = "{}.{}".format(module_location, name) # Module/package
module = importlib.import_module(module_name)
if ispkg:
import_recursive(module)
| [
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
24843,
12,
17,
13,
15,
198,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
11593,
37443,
834,
1330,
7297,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
1... | 2.67867 | 361 |
from django.conf import settings
from django.urls import reverse
from django.utils import timezone
from zeep import Client
from misaghestan.subscriptions.models import SubscriptionTransaction, UserSubscription
| [
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
9575,
198,
6738,
42625,
14208,
13,
26791,
1330,
640,
11340,
198,
6738,
41271,
538,
1330,
20985,
198,
198,
6738,
2984,
363,
3634,
272,
13,
7266,
1204... | 4 | 53 |
from typing import Optional, Iterable, Union
| [
6738,
19720,
1330,
32233,
11,
40806,
540,
11,
4479,
628,
198
] | 4.272727 | 11 |
from __future__ import print_function
from pyNastran.bdf.mesh_utils.test.test_convert import TestConvert
from pyNastran.bdf.mesh_utils.test.test_cutting_plane import TestCuttingPlane
from pyNastran.bdf.mesh_utils.test.test_mass import TestMass
from pyNastran.bdf.mesh_utils.test.test_mesh_quality import TestMeshQuality
from pyNastran.bdf.mesh_utils.test.test_mesh_utils import TestMeshUtils
from pyNastran.bdf.mesh_utils.test.test_renumber import TestRenumber
from pyNastran.bdf.mesh_utils.test.test_remove_unused import TestRemoveUnused
from pyNastran.bdf.mesh_utils.test.test_sum_loads import TestLoadSum
if __name__ == "__main__": # pragma: no cover
import os
import unittest
on_rtd = os.environ.get('READTHEDOCS', None)
if on_rtd is None:
unittest.main()
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
6738,
12972,
45,
459,
2596,
13,
65,
7568,
13,
76,
5069,
62,
26791,
13,
9288,
13,
9288,
62,
1102,
1851,
1330,
6208,
3103,
1851,
198,
6738,
12972,
45,
459,
2596,
13,
65,
7568,
... | 2.60396 | 303 |
from ply import lex, yacc
| [
6738,
35960,
1330,
31191,
11,
331,
4134,
628,
628,
628,
198
] | 2.909091 | 11 |
"""Defines the GraphQL schema for custom URLs."""
import graphene
from graphene import Node
from graphene_django.filter import DjangoFilterConnectionField
from graphene_django.rest_framework.mutation import SerializerMutation
from graphene_django.types import DjangoObjectType
from cdsso.users.api.serializers import USER_EXCLUDE_FIELDS, UserSerializer
from cdsso.users.models import User
class UserNode(DjangoObjectType):
"""
User information who are not marked anonymous. The actualCount will have the total number of members,
and the resulting data will be non-anonymous users.
"""
@classmethod
def get_queryset(cls, queryset, info):
"""Overrides the default queryset to filter anyone who wishes to remain anonymous."""
return queryset.filter(anonymous=False)
| [
37811,
7469,
1127,
262,
29681,
9711,
32815,
329,
2183,
32336,
526,
15931,
198,
11748,
42463,
198,
6738,
42463,
1330,
19081,
198,
6738,
42463,
62,
28241,
14208,
13,
24455,
1330,
37770,
22417,
32048,
15878,
198,
6738,
42463,
62,
28241,
14208,... | 3.375 | 240 |
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
from torchvision.transforms import Compose
from dataset.dense_transform import Normalize, Rotate90, VFlip, Pad, RandomRotate
from dataset.dense_transform import RandomCropAndScale, HFlip, ToTensor, ColorJitterImage, LightingImage
from tools.adamw import AdamW
from tools.clr import CyclicLR
from tools.lr_policy import PolyLR
def get_model_params(network_config):
"""Convert a configuration to actual model parameters
Parameters
----------
network_config : dict
Dictionary containing the configuration options for the network.
Returns
-------
model_params : dict
Dictionary containing the actual parameters to be passed to the `net_*` functions in `models`.
"""
model_params = {}
model_params["seg_classes"] = network_config["seg_classes"]
model_params["backbone_arch"] = network_config["backbone_arch"]
return model_params
def create_optimizer(optimizer_config, model, master_params=None):
"""Creates optimizer and schedule from configuration
Parameters
----------
optimizer_config : dict
Dictionary containing the configuration options for the optimizer.
model : Model
The network model.
Returns
-------
optimizer : Optimizer
The optimizer.
scheduler : LRScheduler
The learning rate scheduler.
"""
if optimizer_config["classifier_lr"] != -1:
# Separate classifier parameters from all others
net_params = []
classifier_params = []
for k, v in model.named_parameters():
if not v.requires_grad:
continue
if k.find("encoder") != -1:
net_params.append(v)
else:
classifier_params.append(v)
params = [
{"params": net_params},
{"params": classifier_params, "lr": optimizer_config["classifier_lr"]},
]
else:
if master_params:
params = master_params
else:
params = model.parameters()
if optimizer_config["type"] == "SGD":
optimizer = optim.SGD(params,
lr=optimizer_config["learning_rate"],
momentum=optimizer_config["momentum"],
weight_decay=optimizer_config["weight_decay"],
nesterov=optimizer_config["nesterov"])
elif optimizer_config["type"] == "Adam":
optimizer = optim.Adam(params,
lr=optimizer_config["learning_rate"],
weight_decay=optimizer_config["weight_decay"])
elif optimizer_config["type"] == "AdamW":
optimizer = AdamW(params,
lr=optimizer_config["learning_rate"],
weight_decay=optimizer_config["weight_decay"])
elif optimizer_config["type"] == "RmsProp":
optimizer = optim.Adam(params,
lr=optimizer_config["learning_rate"],
weight_decay=optimizer_config["weight_decay"])
else:
raise KeyError("unrecognized optimizer {}".format(optimizer_config["type"]))
if optimizer_config["schedule"]["type"] == "step":
scheduler = lr_scheduler.StepLR(optimizer, **optimizer_config["schedule"]["params"])
elif optimizer_config["schedule"]["type"] == "multistep":
scheduler = lr_scheduler.MultiStepLR(optimizer, **optimizer_config["schedule"]["params"])
elif optimizer_config["schedule"]["type"] == "exponential":
scheduler = lr_scheduler.ExponentialLR(optimizer, **optimizer_config["schedule"]["params"])
elif optimizer_config["schedule"]["type"] == "poly":
scheduler = PolyLR(optimizer, **optimizer_config["schedule"]["params"])
elif optimizer_config["schedule"]["type"] == "clr":
scheduler = CyclicLR(optimizer, **optimizer_config["schedule"]["params"])
elif optimizer_config["schedule"]["type"] == "constant":
scheduler = lr_scheduler.LambdaLR(optimizer, lambda epoch: 1.0)
elif optimizer_config["schedule"]["type"] == "linear":
scheduler = lr_scheduler.LambdaLR(optimizer, linear_lr)
return optimizer, scheduler
def create_transforms(input_config):
"""Create transforms from configuration
Parameters
----------
input_config : dict
Dictionary containing the configuration options for input pre-processing.
Returns
-------
train_transforms : list
List of transforms to be applied to the input during training.
val_transforms : list
List of transforms to be applied to the input during validation.
"""
train_transforms = []
if input_config.get('random_rotate', None):
train_transforms.append(RandomRotate(input_config['random_rotate']['angle'], input_config['random_rotate']['prob']))
if input_config.get('random_crop', None):
train_transforms.append(RandomCropAndScale(input_config['random_crop'][0], input_config['random_crop'][1], scale_range=input_config['crop_size_range'], rescale_prob=input_config['rescale_prob'], prob=1))
train_transforms += [
# HFlip(),
# Rotate90(),
# VFlip(),
ToTensor(),
]
if input_config.get("color_jitter_train", False):
train_transforms.append(ColorJitterImage())
val_transforms = []
val_transforms += [
Pad(),
ToTensor(),
]
return Compose(train_transforms), Compose(val_transforms)
| [
11748,
28034,
13,
40085,
355,
6436,
198,
11748,
28034,
13,
40085,
13,
14050,
62,
1416,
704,
18173,
355,
300,
81,
62,
1416,
704,
18173,
198,
6738,
28034,
10178,
13,
7645,
23914,
1330,
3082,
577,
198,
198,
6738,
27039,
13,
67,
1072,
62,... | 2.418403 | 2,304 |
"""Unit test package for ensemble."""
| [
37811,
26453,
1332,
5301,
329,
34549,
526,
15931,
198
] | 4.222222 | 9 |
import numpy as np
import math
import autogen as ag
# # basic operator test
# print (CppADScalar(1.0), -CppADScalar(1.0))
# assert CppADScalar(1.0) == CppADScalar(1.0)
# assert -CppADScalar(1.0) == CppADScalar(-1.0)
# # assert CppADScalar(2.0)**2 == CppADScalar(4.0)
# assert CppADScalar(1.0) * CppADScalar(5.0) == CppADScalar(10.0) / CppADScalar(2.0)
# assert CppADScalar(1.0) + CppADScalar(5.0) == CppADScalar(10.0) - CppADScalar(4.0)
#
# # sin test
# arr = np.array([CppADScalar(0), CppADScalar(math.pi / 2)], dtype=CppADScalar)
# sin_arr = np.sin(arr)
# assert sin_arr[0] == CppADScalar(0)
# assert sin_arr[1] == CppADScalar(1.0)
#
# # Array operator with float test
# arr = np.array([CppADScalar(1), CppADScalar(2)], dtype=CppADScalar)
# arr = 3 * arr / 2.
# assert arr[0] == CppADScalar(1.5)
# assert arr[1] == CppADScalar(3.0)
#
# print(arr)
# CG Scalar test
# basic operator test
test = ag.ADCGScalarPtr(1.0)
print(test)
print(test.cos())
print(ag.ADCGScalarPtr(1.0) + ag.ADCGScalarPtr(1.0))
input = ag.ADCGPtrVector([ag.ADCGScalarPtr(1.0)])
output = ag.ADCGPtrVector([ag.ADCGScalarPtr(1.0)])
ag.independent(input)
f = ag.ADCGPtrFun(input, output)
gen = ag.GeneratedCodeGen("test_function", f)
x = [2.0]
y = gen.forward(x)
print("y = ", y)
J = gen.jacobian(x)
print("j = ", J) | [
11748,
299,
32152,
355,
45941,
198,
11748,
10688,
198,
11748,
1960,
6644,
355,
556,
198,
198,
2,
1303,
4096,
10088,
1332,
198,
2,
3601,
357,
34,
381,
2885,
3351,
282,
283,
7,
16,
13,
15,
828,
532,
34,
381,
2885,
3351,
282,
283,
7,... | 2.021978 | 637 |
from napari_plugin_engine import napari_hook_implementation
from .napari_splineit import napari_splineit
@napari_hook_implementation
| [
6738,
25422,
2743,
62,
33803,
62,
18392,
1330,
25422,
2743,
62,
25480,
62,
320,
32851,
198,
6738,
764,
77,
499,
2743,
62,
22018,
500,
270,
1330,
25422,
2743,
62,
22018,
500,
270,
628,
198,
31,
77,
499,
2743,
62,
25480,
62,
320,
3285... | 3.068182 | 44 |
# -*- coding: utf-8 -*-
from pathlib import Path
import os
import os.path
import sys
import getopt
import json
import shutil
import re
import sys
import getopt
import gettext
def main(argv):
"""
WebPerf Core - Regression Test
Usage:
verify_result.py -h
Options and arguments:
-h/--help\t\t\t: Verify Help command
-l/--language\t\t: Verify languages
-c/--prep-config <activate feature, True or False>\t\t: Uses SAMPLE-config.py to creat config.py
-t/--test <test number>\t: Verify result of specific test
NOTE:
If you get this in step "Setup config [...]" you forgot to add repository secret for your repository.
More info can be found here: https://github.com/Webperf-se/webperf_core/issues/81
"""
try:
opts, args = getopt.getopt(argv, "hlc:t:", [
"help", "test=", "prep-config=", "language"])
except getopt.GetoptError:
print(main.__doc__)
sys.exit(2)
if (opts.__len__() == 0):
print(main.__doc__)
sys.exit(2)
for opt, arg in opts:
if opt in ('-h', '--help'): # help
print(main.__doc__)
sys.exit(0)
break
elif opt in ("-c", "--prep-config"):
is_activated = False
if 'true' in arg or 'True' in arg or '1' in arg:
is_activated = True
if prepare_config_file('SAMPLE-config.py', 'config.py', is_activated):
sys.exit(0)
else:
sys.exit(2)
break
elif opt in ("-l", "--language"):
if validate_translations():
sys.exit(0)
else:
sys.exit(2)
break
elif opt in ("-t", "--test"): # test id
if validate_testresult(arg):
sys.exit(0)
else:
sys.exit(2)
break
# No match for command so return error code to fail verification
sys.exit(2)
"""
If file is executed on itself then call a definition, mostly for testing purposes
"""
if __name__ == '__main__':
main(sys.argv[1:])
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
3108,
8019,
1330,
10644,
198,
11748,
28686,
198,
11748,
28686,
13,
6978,
198,
11748,
25064,
198,
11748,
651,
8738,
198,
11748,
33918,
198,
11748,
4423,
346,
198,
117... | 2.085603 | 1,028 |
#==============================================================================
# -*- encoding: utf-8 -*-
#==============================================================================
#==============================================================================
# Módulos Importados do Python / Devito / Examples
#==============================================================================
#==============================================================================
# Pyhton Modules and Imports
#==============================================================================
import numpy as np
import matplotlib.pyplot as plot
import math as mt
import sys
import time as tm
import testes_opt as ttopt
import rotinas_plot as rplot
import macustica as mc
import coef_opt as copt
#==============================================================================
#==============================================================================
# Devito Imports
#==============================================================================
from devito import *
#==============================================================================
#==============================================================================
# Devito Examples Imports
#==============================================================================
from examples.seismic import TimeAxis
from examples.seismic import RickerSource
from examples.seismic import Receiver
configuration['log-level']='ERROR'
#==============================================================================
#==============================================================================
plot.close("all")
#==============================================================================
#==============================================================================
# Testes de Leitura de Dados
#==============================================================================
ptype = 1
ref = 1
if(ref!=0):
if(ptype==1): teste = ttopt.teste1_ref1
if(ptype==2): teste = ttopt.teste2_ref1
if(ptype==3): teste = ttopt.teste3_ref1
if(ptype==4): teste = ttopt.teste4_ref1
else:
if(ptype==1): teste = ttopt.teste1
if(ptype==2): teste = ttopt.teste2
if(ptype==3): teste = ttopt.teste3
if(ptype==4): teste = ttopt.teste4
MV = mc.acusdevito(teste)
coef1 = copt.coefopt1(teste,MV)
#==============================================================================
#==============================================================================
# Obtenção de Parâmetros
#==============================================================================
nptx = teste.nptx # Número de Pontos Direção X
npty = teste.npty # Número de Pontos Direção Y
x0 = teste.x0 # Ponto Inicial da Malha X
y0 = teste.y0 # Ponto Inicial da Malha Y
compx = teste.compx # Comprimento Domínio em X
compy = teste.compy # Comprimento Domínio em Y
hxv = teste.hx # Delta x
hyv = teste.hy # Delta y
t0 = teste.t0 # Tempo Inicial da Simulação em Milisegundos
tn = teste.tn # Tempo Final da Simulação em Milisegundos
f0 = teste.f0 # Frequência da Fonte em Khz
nfonte = teste.nfonte # Número de Fontes
xposf = teste.xposf # Posição da Fonte em X
yposf = teste.yposf # Posição da Fonte em Y
nrec = teste.nrec # Número de Receivers
nxpos = teste.nxpos # Posição dos Receivers em X
nypos = teste.nypos # Posição dos Receivers em Y
CFL = teste.CFL # Constante de Estabilidade
v = MV.C0a # Matriz de Velocidade
jump = teste.jump # Intervalo de Plotagem
tou = teste.tou # Time Order Displacement
sou = teste.sou # Space Order Displacement
nvalue = teste.nvalue # Second Parameter for Stencils
npesos = teste.npesos # Allow Different Weights
wauthor = teste.wauthor # Weight's Author
wtype = teste.wtype # Weight Type
btype = teste.btype # Boundary Type
ftype = teste.ftype # Source type
#==============================================================================
#==============================================================================
# Definição de Vetores Devito
#==============================================================================
origin = (x0,y0)
extent = (compx,compy)
shape = (nptx,npty)
spacing = (hxv,hyv)
d0_domain = d0domain()
grid = Grid(origin=origin,extent=extent,shape=shape,subdomains=(d0_domain),dtype=np.float64)
#==============================================================================
#==============================================================================
# Construção da Malha Temporal
#==============================================================================
vmax = np.around(np.amax(v),1)
dtmax = (min(hxv,hyv)*CFL)/(vmax)
ntmax = int((tn-t0)/dtmax)
dt0 = (tn-t0)/(ntmax)
time_range = TimeAxis(start=t0,stop=tn,num=ntmax+1)
nt = time_range.num - 1
nplot = mt.ceil(nt/jump) + 1
#==============================================================================
#print(dt0,nt,jump,nplot,hxv,hyv)
#sys.exit()
#==============================================================================
# Variváveis Simbólicas
#==============================================================================
(hx,hy) = grid.spacing_map
(x, y) = grid.dimensions
time = grid.time_dim
t = grid.stepping_dim
dt = grid.stepping_dim.spacing
#==============================================================================
#==============================================================================
# Construção e Posicionamento da Fonte
#==============================================================================
src = RickerSource(name='src',grid=grid,f0=f0,npoint=nfonte,time_range=time_range,staggered=NODE,dtype=np.float64)
src.coordinates.data[:, 0] = xposf
src.coordinates.data[:, 1] = yposf
#==============================================================================
#==============================================================================
# Construção e Posicionamento dos Receivers
#==============================================================================
rec = Receiver(name='rec',grid=grid,npoint=nrec,time_range=time_range,staggered=NODE,dtype=np.float64)
rec.coordinates.data[:, 0] = nxpos
rec.coordinates.data[:, 1] = nypos
#==============================================================================
#==============================================================================
# Construção e Posicionamento dos Receivers Seleionados
#==============================================================================
if(ptype==1):
xpositionv = np.array([500.0,1500.0,500.0,1500.0])
ypositionv = np.array([500.0,500.0,1500.0,1500.0])
if(ptype==2):
xpositionv = np.array([4000.0,4000.0,4000.0,6000.0,6000.0,6000.0,8000.0,8000.0,8000.0,])
ypositionv = np.array([2000.0,2500.0,1500.0,3000.0,2000.0,2500.0,1500.0,3000.0,2000.0,2500.0,1500.0,3000.0])
if(ptype==3):
xpositionv = np.array([500.0,1500.0,500.0,1500.0])
ypositionv = np.array([500.0,500.0,1500.0,1500.0])
if(ptype==4):
xpositionv = np.array([30000.0,30000.0,30000.0,40000.0,40000.0,40000.0])
ypositionv = np.array([2500.0,5000.0,7500.0,2500.0,5000.0,7500.0,2500.0,5000.0,7500.0])
nrec_select = len(xpositionv)
rec_select = Receiver(name='rec_select',grid=grid,npoint=nrec_select,time_range=time_range,staggered=NODE,dtype=np.float64)
rec_select.coordinates.data[:, 0] = xpositionv
rec_select.coordinates.data[:, 1] = ypositionv
#==============================================================================
#==============================================================================
# Construção da Equação da Onda com Termo de Fonte
#==============================================================================
u = TimeFunction(name="u",grid=grid,time_order=tou,space_order=sou,staggered=NODE,dtype=np.float64)
vel = Function(name="vel",grid=grid,space_order=2,staggered=NODE,dtype=np.float64)
vel.data[:,:] = v[:,:]
fact = 1/(hxv*hyv)
src_term = src.inject(field=u.forward,expr=fact*1*src*dt**2*vel**2)
rec_term = rec.interpolate(expr=u)
rec_select_term = rec_select.interpolate(expr=u)
if(npesos==0):
pde0 = Eq(u.dt2 - u.laplace*vel*vel)
stencil0 = Eq(u.forward, solve(pde0,u.forward),subdomain = grid.subdomains['d0'])
if(npesos==1):
Txx,Tyy,mcoef = coef1.calccoef(wauthor,wtype,sou,nvalue)
new_laplace, contcoef = coef1.eqconstuct(mcoef,u,t,x,y)
if(wauthor==4 or wauthor==5):
pde0 = new_laplace - u[t-1,x,y]
stencil0 = Eq(u[t+1,x,y],pde0,subdomain=grid.subdomains['d0'])
else:
pde0 = Eq(u.dt2 - new_laplace*vel*vel)
stencil0 = Eq(u.forward, solve(pde0,u.forward),subdomain = grid.subdomains['d0'])
#==============================================================================
#==============================================================================
# Criando Estrutura para Plots Selecionados
#==============================================================================
time_subsampled = ConditionalDimension('t_sub',parent=time,factor=jump)
usave = TimeFunction(name='usave',grid=grid,time_order=tou,space_order=sou,save=nplot,time_dim=time_subsampled,staggered=NODE,dtype=np.float64)
Ug = np.zeros((nplot,nptx,npty))
#==============================================================================
#==============================================================================
# Construção do Operador de Solução
#==============================================================================
if(btype==1):
bc = [Eq(u[t+1,0,y],0.),Eq(u[t+1,nptx-1,y],0.),Eq(u[t+1,x,0],0.),Eq(u[t+1,x,npty-1],0.)]
op = Operator([stencil0] + src_term + bc + rec_term + rec_select_term + [Eq(usave,u.forward)],subs=grid.spacing_map)
if(btype==2):
bc = [Eq(u[t+1,0,y],0.),Eq(u[t+1,nptx-1,y],0.),Eq(u[t+1,x,npty-1],0.)]
bc1 = [Eq(u[t+1,x,-k],u[t+1,x,k]) for k in range(1,int(sou/2)+1)]
op = Operator([stencil0] + src_term + bc + bc1 + rec_term + rec_select_term + [Eq(usave,u.forward)],subs=grid.spacing_map)
usave.data[:] = 0.
u.data[:] = 0.
start = tm.time()
op(time=nt,dt=dt0)
end = tm.time()
time_exec = end - start
Ug[:] = usave.data[:]
Ug[nplot-1,:,:] = u.data[0,:,:]
#==============================================================================
#==============================================================================
# Plots de Interesse
#==============================================================================
#G1 = rplot.graph2d(u.data[0,:,:],teste,ref)
#R1 = rplot.graph2drec(rec.data,teste,ref)
#V1 = rplot.graph2dvel(v,teste)
S1 = rplot.datasave(teste,rec.data,Ug,rec_select.data,ref)
#==============================================================================
#==============================================================================
print("Tempo de Execuação da Referencia = %.3f s" %time_exec)
#============================================================================== | [
2,
23926,
25609,
855,
198,
2,
532,
9,
12,
21004,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
23926,
25609,
855,
198,
198,
2,
23926,
25609,
855,
198,
2,
337,
10205,
67,
377,
418,
17267,
22484,
466,
11361,
1220,
6245,
10094,
1220,
2... | 2.947628 | 3,857 |
"""
Description:
-----------
This Script Mask the url behind another url
Usage:
-----
python3 maskurl.py
"""
import sys
import argparse
from urllib.parse import urlparse
from requests import post
banner = r"""
__ __ _ ____ _ __ _ _ ____ _
| \/ | / \ / ___| | |/ / | | | | | _ \ | |
| |\/| | / _ \ \___ \ | ' / | | | | | |_) | | |
| | | | / ___ \ ___) | | . \ | |_| | | _ < | |___
|_| |_| /_/ \_\ |____/ |_|\_\ \___/ |_| \_\ |_____|
"""
def Shortner(big_url: str) -> str:
"""
Function short the big urls to short
"""
return post(f"https://is.gd/create.php?format=json&url={big_url}").json()['shorturl']
def MaskUrl(target_url: str, mask_domain: str, keyword: str) -> str:
"""
Function mask the url with given domain and keyword
"""
url = Shortner(target_url)
return f"{mask_domain}-{keyword}@{urlparse(url).netloc + urlparse(url).path}"
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Mask the URL behind the another URL")
parser.add_argument(
"--target",
type=str,
help="Target URL to Mask (With http or https)",
required=True,
)
parser.add_argument(
"--mask",
type=str,
help="Mask URL (With http or https)",
required=True,
)
parser.add_argument(
"--keywords",
type=str,
help="Keywords (Use (-) instead of whitespace)",
required=True,
)
print(f"\033[91m {banner}\033[00m")
if len(sys.argv) == 1:
print("\n")
target = input("Enter the url (With http or https): ")
mask = input("Enter the domain name to mask url (With http or https): ")
keyword = input("Enter the keywords (use '-' instead of whitespace): ")
print("\n")
else:
args = parser.parse_args()
target = args.target
mask = args.mask
keyword = args.keywords
print(f"\033[91m {MaskUrl(target, mask, keyword)}\033[00m")
| [
37811,
198,
11828,
25,
198,
32284,
198,
220,
220,
220,
770,
12327,
18007,
262,
19016,
2157,
1194,
19016,
198,
198,
28350,
25,
198,
30934,
198,
220,
220,
220,
21015,
18,
9335,
6371,
13,
9078,
220,
198,
220,
220,
220,
220,
198,
37811,
... | 2.174948 | 966 |
from src.markdown_converter import MarkdownConverter
from re import compile
@MarkdownConverter.register
| [
6738,
12351,
13,
4102,
2902,
62,
1102,
332,
353,
1330,
2940,
2902,
3103,
332,
353,
198,
6738,
302,
1330,
17632,
628,
198,
31,
9704,
2902,
3103,
332,
353,
13,
30238,
198
] | 3.419355 | 31 |
from jinja2 import Template
from IPython.display import IFrame, HTML
import os
import json
from .base_plotter import IPlotter
class GCPlotter(IPlotter):
"""
Class for creating Google Charts in ipython notebook
"""
head = '''
<!-- Load Google Charts -->
<script type='text/javascript' src='https://www.gstatic.com/charts/loader.js'></script>
'''
template = '''
<div id={{div_id}} style='width: 100%; height: 100%' ></div>
<script type='text/javascript'>
google.charts.load('current', {'packages':['{{ chart_package}}']});
google.charts.setOnLoadCallback(drawChart);
function drawChart() {
var data = google.visualization.arrayToDataTable({{data}}
);
var chart = new google.visualization.{{chart_type}}(document.getElementById('{{div_id}}'));
chart.draw(data, {{options}});
}
</script>
'''
def render(self,
data,
chart_type,
chart_package='corechart',
options=None,
div_id="chart",
head=""):
'''
render the data in HTML template
'''
if not self.is_valid_name(div_id):
raise ValueError(
"Name {} is invalid. Only letters, numbers, '_', and '-' are permitted ".format(
div_id))
return Template(head + self.template).render(
div_id=div_id.replace(" ", "_"),
data=json.dumps(
data, indent=4).replace("'", "\\'").replace('"', "'"),
chart_type=chart_type,
chart_package=chart_package,
options=json.dumps(
options, indent=4).replace("'", "\\'").replace('"', "'"))
def plot_and_save(self,
data,
chart_type,
chart_package='corechart',
options=None,
w=800,
h=420,
filename='chart',
overwrite=True):
'''
save the rendered html to a file and return an IFrame to display the plot in the notebook
'''
self.save(data, chart_type, chart_package, options, filename,
overwrite)
return IFrame(filename + '.html', w, h)
def plot(self,
data,
chart_type,
chart_package='corechart',
options=None,
w=800,
h=420):
'''
output an iframe containing the plot in the notebook without saving
'''
return HTML(
self.iframe.format(
source=self.render(
data=data,
options=options,
chart_type=chart_type,
chart_package=chart_package,
head=self.head),
w=w,
h=h))
def save(self,
data,
chart_type,
chart_package='corechart',
options=None,
filename='chart',
overwrite=True):
'''
save the rendered html to a file in the same directory as the notebook
'''
html = self.render(
data=data,
chart_type=chart_type,
chart_package=chart_package,
options=options,
div_id=filename,
head=self.head)
if overwrite:
with open(filename.replace(" ", "_") + '.html', 'w') as f:
f.write(html)
else:
if not os.path.exists(filename.replace(" ", "_") + '.html'):
with open(filename.replace(" ", "_") + '.html', 'w') as f:
f.write(html)
else:
raise IOError('File Already Exists!')
| [
6738,
474,
259,
6592,
17,
1330,
37350,
198,
6738,
6101,
7535,
13,
13812,
1330,
314,
19778,
11,
11532,
198,
11748,
28686,
198,
11748,
33918,
198,
6738,
764,
8692,
62,
29487,
353,
1330,
314,
43328,
353,
628,
198,
4871,
20145,
43328,
353,
... | 1.889104 | 2,065 |
from .json_out import JSONOut
outputs = {
JSONOut.slug: JSONOut
}
__all__ = [
'JSONOut'
]
| [
6738,
764,
17752,
62,
448,
1330,
19449,
7975,
198,
198,
22915,
82,
796,
1391,
198,
220,
220,
220,
19449,
7975,
13,
6649,
1018,
25,
19449,
7975,
198,
92,
198,
198,
834,
439,
834,
796,
685,
198,
220,
220,
220,
705,
40386,
7975,
6,
1... | 2.173913 | 46 |
# -*- coding: utf-8 -*-
import json
import os
from celery.schedules import crontab
from eventkit_cloud.celery import app
from eventkit_cloud.settings.base import is_true
from eventkit_cloud.settings.contrib import * # NOQA
# Celery config
CELERY_TRACK_STARTED = True
"""
IMPORTANT
Don't propagate exceptions in the celery chord header to the finalize task.
If exceptions are thrown in the chord header then allow the
finalize task to collect the results and update the overall run state.
"""
# CELERY_CHORD_PROPAGATES = False
CELERYD_PREFETCH_MULTIPLIER = 1
CELERYBEAT_SCHEDULER = "django_celery_beat.schedulers:DatabaseScheduler"
CELERY_RESULT_BACKEND = os.getenv("CELERY_RESULT_BACKEND", "rpc://")
# Pickle used to be the default, and accepting pickled content is a security concern. Using the new default json,
# causes a circular reference error, that will need to be resolved.
CELERY_TASK_SERIALIZER = "json"
CELERY_ACCEPT_CONTENT = ["json"]
# configure periodic task
BEAT_SCHEDULE = {
"expire-runs": {"task": "Expire Runs", "schedule": crontab(minute="0", hour="0")},
"provider-statuses": {
"task": "Check Provider Availability",
"schedule": crontab(minute="*/{}".format(os.getenv("PROVIDER_CHECK_INTERVAL", "30"))),
},
"clean-up-queues": {"task": "Clean Up Queues", "schedule": crontab(minute="0", hour="0")},
"clear-tile-cache": {"task": "Clear Tile Cache", "schedule": crontab(minute="0", day_of_month="*/14")},
"clear-user-sessions": {"task": "Clear User Sessions", "schedule": crontab(minute="0", day_of_month="*/2")},
"update-statistics-cache": {
"task": "Update Statistics Caches",
"schedule": crontab(minute="0", day_of_month="*/4"),
},
}
BEAT_SCHEDULE.update(
{
"scale-celery": {
"task": "Scale Celery",
"schedule": 60.0,
"kwargs": {"max_tasks_memory": int(os.getenv("CELERY_MAX_TASKS_MEMORY", 20000))},
"options": {"priority": 90, "queue": "scale", "routing_key": "scale"},
},
}
)
CELERY_SCALE_BY_RUN = is_true(os.getenv("CELERY_SCALE_BY_RUN", False))
CELERY_GROUP_NAME = os.getenv("CELERY_GROUP_NAME", None)
app.conf.beat_schedule = BEAT_SCHEDULE
CELERYD_USER = CELERYD_GROUP = "eventkit"
if os.getenv("VCAP_SERVICES"):
CELERYD_USER = CELERYD_GROUP = "vcap"
CELERYD_USER = os.getenv("CELERYD_USER", CELERYD_USER)
CELERYD_GROUP = os.getenv("CELERYD_GROUP", CELERYD_GROUP)
BROKER_URL = None
if os.getenv("VCAP_SERVICES"):
for service, listings in json.loads(os.getenv("VCAP_SERVICES")).items():
try:
if "rabbitmq" in service:
BROKER_URL = listings[0]["credentials"]["protocols"]["amqp"]["uri"]
if "cloudamqp" in service:
BROKER_URL = listings[0]["credentials"]["uri"]
except KeyError:
continue
if BROKER_URL:
break
if not BROKER_URL:
BROKER_URL = os.environ.get("BROKER_URL", "amqp://guest:guest@localhost:5672//")
BROKER_API_URL = None
if os.getenv("VCAP_SERVICES"):
for service, listings in json.loads(os.getenv("VCAP_SERVICES")).items():
try:
if "rabbitmq" in service:
BROKER_API_URL = listings[0]["credentials"]["http_api_uri"]
if "cloudamqp" in service:
BROKER_API_URL = listings[0]["credentials"]["http_api_uri"]
except KeyError:
continue
if BROKER_API_URL:
break
if not BROKER_API_URL:
BROKER_API_URL = os.environ.get("BROKER_API_URL", "http://guest:guest@localhost:15672/api/")
MAX_TASK_ATTEMPTS = int(os.getenv("MAX_TASK_ATTEMPTS", 3))
app.conf.task_soft_time_limit = int(os.getenv("TASK_TIMEOUT", 0)) or None
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
33918,
198,
11748,
28686,
198,
6738,
18725,
1924,
13,
1416,
704,
5028,
1330,
1067,
756,
397,
198,
198,
6738,
1785,
15813,
62,
17721,
13,
7015,
88,
1330,
598,
198,... | 2.258652 | 1,647 |
import cv2
# path
path = r'D:\DLive\PINet\dataset\Test_images\11A00160.jpg'
# Reading an image in default mode
image = cv2.imread(path)
# Window name in which image is displayed
window_name = 'image'
# Using cv2.imshow() method
# Displaying the image
cv2.imshow(window_name, image)
#waits for user to press any key
#(this is necessary to avoid Python kernel form crashing)
cv2.waitKey(0)
#closing all open windows
cv2.destroyAllWindows() | [
11748,
269,
85,
17,
220,
198,
220,
220,
198,
2,
3108,
220,
198,
6978,
796,
374,
6,
35,
7479,
35,
18947,
59,
44032,
316,
59,
19608,
292,
316,
59,
14402,
62,
17566,
59,
1157,
32,
405,
14198,
13,
9479,
6,
198,
220,
220,
198,
2,
1... | 2.645714 | 175 |
from tabnanny import verbose
from embed_video.fields import EmbedVideoField
from core.utils import get_file_path
from django.db import models
from pydoc import describe
CHOICES_RATTING = [
(0, "L"),
(10, "10"),
(12, "12"),
(14, "14"),
(16, "16"),
(18, "18"),
]
| [
6738,
7400,
77,
7737,
1330,
15942,
577,
198,
6738,
11525,
62,
15588,
13,
25747,
1330,
13302,
276,
10798,
15878,
198,
6738,
4755,
13,
26791,
1330,
651,
62,
7753,
62,
6978,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
279,
... | 2.423729 | 118 |
#%%
import time
start = time.time()
#import gdal, ogr, osr
from osgeo import gdal
from osgeo import ogr
from osgeo import osr
from osgeo.gdalnumeric import *
from osgeo.gdalconst import *
#import numpy as np
#import scipy.ndimage as ndimage
#import pandas as pd
from subprocess import call
from itertools import compress
#import skfmm
#import stateplane
#import pylab as p
#%matplotlib inline
file_bool2 = '../inun_nj/inun_bool2.tif'
file_poly2 = '../inun_nj/inun_poly2'
file_wl = '../inun_nj/wl.kml'
#%%
call(['gdal_polygonize.py', '-nomask', file_bool2, '-b', '1', '-q', file_poly2])
#%% Polygon of wl
print('Constructing inundation polygons...')
with open(file_poly2,'r') as f_poly:
text_all = f_poly.read().replace('\n', '')
dn = []
for item in text_all.split("</ogr:DN>"):
if "<ogr:DN>" in item:
dn.append(item [ item.find("<ogr:DN>")+len("<ogr:DN>") : ])
dn = [int(v) for v in dn[:]]
outer_block = []
for item in text_all.split("</gml:coordinates></gml:LinearRing></gml:outerBoundaryIs>"):
if "<gml:outerBoundaryIs><gml:LinearRing><gml:coordinates>" in item:
outer_block.append(item [ item.find("<gml:outerBoundaryIs><gml:LinearRing><gml:coordinates>")+
len("<gml:outerBoundaryIs><gml:LinearRing><gml:coordinates>") : ])
outer = [[[float(v6) for v6 in v5] for v5 in v4] for v4 in
[[v3.split(',') for v3 in v2] for v2 in
[v.split(' ') for v in outer_block]]]
fm = []
for item in text_all.split("</gml:featureMember>"):
if "<gml:featureMember>" in item:
fm.append(item [ item.find("<gml:featureMember>")+len("<gml:featureMember>") : ])
inner = []
inner_count = []
for i in range(len(fm)):
inner_block = []
for item in fm[i].split("</gml:coordinates></gml:LinearRing></gml:innerBoundaryIs>"):
if "<gml:innerBoundaryIs><gml:LinearRing><gml:coordinates>" in item:
inner_block.append(item [ item.find("<gml:innerBoundaryIs><gml:LinearRing><gml:coordinates>")+
len("<gml:innerBoundaryIs><gml:LinearRing><gml:coordinates>") : ])
if not inner_block:
inner.append([])
inner_count.append(0)
else:
inner.append([[[float(v6) for v6 in v5] for v5 in v4] for v4 in
[[v3.split(',') for v3 in v2] for v2 in
[v.split(' ') for v in inner_block]]])
inner_count.append(len(inner[-1]))
dn1 = [v==1 for v in dn]
outer1 = list(compress(outer, dn1))
inner1 = list(compress(inner, dn1))
inner_count1 = list(compress(inner_count, dn1))
dn2 = [v==2 for v in dn]
outer2 = list(compress(outer, dn2))
inner2 = list(compress(inner, dn2))
inner_count2 = list(compress(inner_count, dn2))
dn3 = [v==3 for v in dn]
outer3 = list(compress(outer, dn3))
inner3 = list(compress(inner, dn3))
inner_count3 = list(compress(inner_count, dn3))
dn3 = [v==3 for v in dn]
outer3 = list(compress(outer, dn3))
inner3 = list(compress(inner, dn3))
inner_count3 = list(compress(inner_count, dn3))
dn4 = [v==4 for v in dn]
outer4 = list(compress(outer, dn4))
inner4 = list(compress(inner, dn4))
inner_count4 = list(compress(inner_count, dn4))
dn5 = [v==5 for v in dn]
outer5 = list(compress(outer, dn5))
inner5 = list(compress(inner, dn5))
inner_count5 = list(compress(inner_count, dn5))
c_empty = '00000000'
c_1 = 'AB00FF00'
c_2 = 'AB00FFFF'
c_3 = 'AB0080FF'
c_4 = 'AB0000FF'
c_5 = 'ABCC00CC'
s = []
s = """<?xml version="1.0" encoding="UTF-8"?>
<kml xmlns="http://www.opengis.net/kml/2.2">
<Document>
<name>{title}</name>""".format(title=title_str)
s += """
<Style id="s_1">
<LineStyle>
<color>{c0}</color>
<width>0</width>
</LineStyle>
<PolyStyle>
<color>{c}</color>
</PolyStyle>
</Style>""".format(c=c_1,c0=c_empty)
s += """
<Style id="s_2">
<LineStyle>
<color>{c0}</color>
<width>0</width>
</LineStyle>
<PolyStyle>
<color>{c}</color>
</PolyStyle>
</Style>""".format(c=c_2,c0=c_empty)
s += """
<Style id="s_3">
<LineStyle>
<color>{c0}</color>
<width>0</width>
</LineStyle>
<PolyStyle>
<color>{c}</color>
</PolyStyle>
</Style>""".format(c=c_3,c0=c_empty)
s += """
<Style id="s_4">
<LineStyle>
<color>{c0}</color>
<width>0</width>
</LineStyle>
<PolyStyle>
<color>{c}</color>
</PolyStyle>
</Style>""".format(c=c_4,c0=c_empty)
s += """
<Style id="s_5">
<LineStyle>
<color>{c0}</color>
<width>0</width>
</LineStyle>
<PolyStyle>
<color>{c}</color>
</PolyStyle>
</Style>""".format(c=c_5,c0=c_empty)
for i in range(len(outer1)):
s += """
<Placemark>
<name>{id:d}</name>
<visibility>1</visibility>
<styleUrl>#s_1</styleUrl>
<Polygon>
<extrude>0</extrude>
<tessellate>1</tessellate>
<altitudeMode>clampToGround</altitudeMode>
<outerBoundaryIs>
<LinearRing>
<coordinates>""".format(id=i)
for ii in range(len(outer1[i])):
s += """
{lon:.15f},{lat:.15f}""".format(lon=outer1[i][ii][0],lat=outer1[i][ii][1])
s += """
</coordinates>
</LinearRing>
</outerBoundaryIs>"""
if inner_count1[i]>0:
for ii in range(inner_count1[i]):
s += """
<innerBoundaryIs>
<LinearRing>
<coordinates>"""
for iii in range(len(inner1[i][ii])):
s += """
{lon:.15f},{lat:.15f}""".format(lon=inner1[i][ii][iii][0],lat=inner1[i][ii][iii][1])
s += """
</coordinates>
</LinearRing>
</innerBoundaryIs>"""
s += """
</Polygon>
</Placemark>"""
for i in range(len(outer2)):
s += """
<Placemark>
<name>{id:d}</name>
<visibility>1</visibility>
<styleUrl>#s_2</styleUrl>
<Polygon>
<extrude>0</extrude>
<tessellate>1</tessellate>
<altitudeMode>clampToGround</altitudeMode>
<outerBoundaryIs>
<LinearRing>
<coordinates>""".format(id=i)
for ii in range(len(outer2[i])):
s += """
{lon:.15f},{lat:.15f}""".format(lon=outer2[i][ii][0],lat=outer2[i][ii][1])
s += """
</coordinates>
</LinearRing>
</outerBoundaryIs>"""
if inner_count2[i]>0:
for ii in range(inner_count2[i]):
s += """
<innerBoundaryIs>
<LinearRing>
<coordinates>"""
for iii in range(len(inner2[i][ii])):
s += """
{lon:.15f},{lat:.15f}""".format(lon=inner2[i][ii][iii][0],lat=inner2[i][ii][iii][1])
s += """
</coordinates>
</LinearRing>
</innerBoundaryIs>"""
s += """
</Polygon>
</Placemark>"""
for i in range(len(outer3)):
s += """
<Placemark>
<name>{id:d}</name>
<visibility>1</visibility>
<styleUrl>#s_3</styleUrl>
<Polygon>
<extrude>0</extrude>
<tessellate>1</tessellate>
<altitudeMode>clampToGround</altitudeMode>
<outerBoundaryIs>
<LinearRing>
<coordinates>""".format(id=i)
for ii in range(len(outer3[i])):
s += """
{lon:.15f},{lat:.15f}""".format(lon=outer3[i][ii][0],lat=outer3[i][ii][1])
s += """
</coordinates>
</LinearRing>
</outerBoundaryIs>"""
if inner_count3[i]>0:
for ii in range(inner_count3[i]):
s += """
<innerBoundaryIs>
<LinearRing>
<coordinates>"""
for iii in range(len(inner3[i][ii])):
s += """
{lon:.15f},{lat:.15f}""".format(lon=inner3[i][ii][iii][0],lat=inner3[i][ii][iii][1])
s += """
</coordinates>
</LinearRing>
</innerBoundaryIs>"""
s += """
</Polygon>
</Placemark>"""
for i in range(len(outer4)):
s += """
<Placemark>
<name>{id:d}</name>
<visibility>1</visibility>
<styleUrl>#s_4</styleUrl>
<Polygon>
<extrude>0</extrude>
<tessellate>1</tessellate>
<altitudeMode>clampToGround</altitudeMode>
<outerBoundaryIs>
<LinearRing>
<coordinates>""".format(id=i)
for ii in range(len(outer4[i])):
s += """
{lon:.15f},{lat:.15f}""".format(lon=outer4[i][ii][0],lat=outer4[i][ii][1])
s += """
</coordinates>
</LinearRing>
</outerBoundaryIs>"""
if inner_count4[i]>0:
for ii in range(inner_count4[i]):
s += """
<innerBoundaryIs>
<LinearRing>
<coordinates>"""
for iii in range(len(inner4[i][ii])):
s += """
{lon:.15f},{lat:.15f}""".format(lon=inner4[i][ii][iii][0],lat=inner4[i][ii][iii][1])
s += """
</coordinates>
</LinearRing>
</innerBoundaryIs>"""
s += """
</Polygon>
</Placemark>"""
for i in range(len(outer5)):
s += """
<Placemark>
<name>{id:d}</name>
<visibility>1</visibility>
<styleUrl>#s_5</styleUrl>
<Polygon>
<extrude>0</extrude>
<tessellate>1</tessellate>
<altitudeMode>clampToGround</altitudeMode>
<outerBoundaryIs>
<LinearRing>
<coordinates>""".format(id=i)
for ii in range(len(outer5[i])):
s += """
{lon:.15f},{lat:.15f}""".format(lon=outer5[i][ii][0],lat=outer5[i][ii][1])
s += """
</coordinates>
</LinearRing>
</outerBoundaryIs>"""
if inner_count5[i]>0:
for ii in range(inner_count5[i]):
s += """
<innerBoundaryIs>
<LinearRing>
<coordinates>"""
for iii in range(len(inner5[i][ii])):
s += """
{lon:.15f},{lat:.15f}""".format(lon=inner5[i][ii][iii][0],lat=inner5[i][ii][iii][1])
s += """
</coordinates>
</LinearRing>
</innerBoundaryIs>"""
s += """
</Polygon>
</Placemark>"""
s += """
</Document>
</kml>"""
with open(file_wl,'w') as f_kml:
f_kml.writelines(s)
#%%
end = time.time()
print(end - start)
| [
2,
16626,
198,
11748,
640,
198,
9688,
796,
640,
13,
2435,
3419,
198,
198,
2,
11748,
308,
31748,
11,
267,
2164,
11,
267,
27891,
198,
6738,
28686,
469,
78,
1330,
308,
31748,
198,
6738,
28686,
469,
78,
1330,
267,
2164,
198,
6738,
28686... | 2.047566 | 4,478 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 23 23:32:28 2020
@author: cjburke
Solve the rubik's cube with a Iterative Deepening Depth First Search
With a precalculated pattern database of minimum length to solve
for various configurations.
This borrows heavily from the excellent Blog piece and code by
Benjamin Botto https://github.com/benbotto
https://medium.com/@benjamin.botto/implementing-an-optimal-rubiks-cube-solver-using-korf-s-algorithm-bf750b332cf9
Definitely read the blog about this before you dive into the code and comments
Hereafter in the comments I will refer to this blog post as BottoB
Note: The python code here is not used for finding the solution
It is here because it was prototyped in python first and it is useful
to have the functions to perform moves ahead of time.
Thus, many of these functions have nearly identical surrogates in the
cython code as well.
"""
from multiprocessing import Pool, RawArray, cpu_count
import numpy as np
import rubik_cython_roll_buffdq_solve as rcm
import rubik_cython_roll_buffdq_solve_MP as rcmMP
import copy
import lehmer_code as lc
from collections import deque as dq
from timeit import default_timer as timer
# This is the worker/child that will perform
# the search from the initial cube configuration its given
# module level pointers for pattern db
patternDB_Storage=[]
# start time to keep track of elapsed run time
startts = timer()
# The method for terminating the worker children processes
# after one of them finds a solution is from
# https://stackoverflow.com/questions/36962462/terminate-a-python-multiprocessing-program-once-a-one-of-its-workers-meets-a-cer
# https://stackoverflow.com/questions/34827250/how-to-keep-track-of-status-with-multiprocessing-and-pool-map
if __name__ == '__main__':
# Set N cores you want to use
# default is all of them found by multiprocessing.cpu_count()
print('Found {0:d} CPUS'.format(cpu_count()))
USENCPUS = cpu_count()
# See the README.md for the nomenclature for entering the scrambled
# cube that you want to solve. solvedfaces is the solved cube
# This veriable isn't used it is just here for reference
# These are the integers that correspond to which color
# 1 - orange, 2 - blue, 3 - yellow, 4 - green, 5 - white, 6 - red
solvedfaces = {"01my":5, "01mz":6, "01mx":4, "02my":5, "02mz":6,"03my":5, "03px":2, "03mz":6,\
"04mx":4, "04mz":6,"05mz":6,"06px":2, "06mz":6,\
"07mx":4, "07py":3, "07mz":6,"08py":3, "08mz":6,"09px":2, "09py":3, "09mz":6,\
"10mx":4, "10my":5,"11my":5,"12px":2, "12my":5,\
"13mx":4,"15px":2,\
"16mx":4, "16py":3,"17py":3,"18px":2, "18py":3,\
"19mx":4, "19my":5, "19pz":1,"20my":5, "20pz":1,"21px":2, "21my":5, "21pz":1,\
"22mx":4, "22pz":1,"23pz":1,"24px":2, "24pz":1,\
"25mx":4, "25py":3, "25pz":1,"26py":3, "26pz":1,"27px":2, "27py":3, "27pz":1}
# HERE is where you put the cube you want to solve in begcubefaces dictionary
# The standard is to have the white center cube face you, orange center
# cube on top and blue center cube to the right
# 15 turns
begcubefaces = {"01my":6, "01mz":5, "01mx":2, "02my":6, "02mz":4,"03my":3, "03px":6, "03mz":2,\
"04mx":2, "04mz":5,"05mz":6,"06px":6, "06mz":2,\
"07mx":5, "07py":2, "07mz":1,"08py":6, "08mz":3,"09px":1, "09py":3, "09mz":2,\
"10mx":3, "10my":4,"11my":5,"12px":3, "12my":1,\
"13mx":4,"15px":2,\
"16mx":2, "16py":1,"17py":3,"18px":6, "18py":5,\
"19mx":6, "19my":5, "19pz":4,"20my":1, "20pz":5,"21px":5, "21my":4, "21pz":1,\
"22mx":1, "22pz":4,"23pz":1,"24px":2, "24pz":3,\
"25mx":6, "25py":3, "25pz":4,"26py":5, "26pz":4,"27px":4, "27py":1, "27pz":3}
# empty template to use for filling in your own cube face colors/numbers
# begcubefaces = {"01my":, "01mz":, "01mx":, "02my":, "02mz":,"03my":, "03px":, "03mz":,\
# "04mx":, "04mz":,"05mz":6,"06px":, "06mz":,\
# "07mx":, "07py":, "07mz":,"08py":, "08mz":,"09px":, "09py":, "09mz":,\
# "10mx":, "10my":,"11my":5,"12px":, "12my":,\
# "13mx":4,"15px":2,\
# "16mx":, "16py":,"17py":3,"18px":, "18py":,\
# "19mx":, "19my":, "19pz":,"20my":, "20pz":,"21px":, "21my":, "21pz":,\
# "22mx":, "22pz":,"23pz":1,"24px":, "24pz":,\
# "25mx":, "25py":, "25pz":,"26py":, "26pz":,"27px":, "27py":, "27pz":}
# This is the internal integer name for a move with my non-standard
# character code for the move. See README.md for description
# rotnames = {0:"DR", 1:"DL", 2:"DH",\
# 3:"UR", 4:"UL", 5:"UH",\
# 6:"RU", 7:"RD", 8:"RH",\
# 9:"LU", 10:"LD", 11:"LH",\
# 12:"FC", 13:"FG", 14:"FH",\
# 15:"BC", 16:"BG", 17:"BH"}
# initialize cube
bcube = rubiks_cube()
# Read in the cube color dictionary and convert it to the
# id and orientation for the cubies that are used internally
init_faceids = bcube.get_start_faceids(begcubefaces).tolist()
# if you want to hard code the internal cubie ids generated by
# rubik_cube_debugpath_roll.py you can enter it here
# to bypass what is in the color dictionary
# init_faceids = [9, 32, 29, 68, 13, 41, 24, 76, 25, 40, 12, 61, 21, 64, 1, 52, 2, 65, 20, 72, 6, 36, 18, 44, 16, 37, 5, 56, 30, 33, 8, 49, 26, 53, 0, 45, 17, 48, 10, 77, 28, 57, 4, 73, 22, 60, 14, 69]
print('Start Loading Pattern DBs')
# Load the corner config to solve turns DB
with np.load('rubik_corner_db.npz') as data:
cornerDB = data['db']
# Fix -1 score for solved state
idx = np.argmin(cornerDB)
cornerDB[idx] = 0
# Load the edge config to solve turns DB
with np.load('rubik_alledge_db.npz') as data:
edgeDB = data['db']
# Fix -1 score for solved state
idx = np.argmin(edgeDB)
edgeDB[idx] = 0
# Load the edge1 config to solve turns DB
with np.load('rubik_edge1_DFS_12p7_db.npz') as data:
edge1DB = data['db']
# Fix -1 score for solved state
idx = np.argmin(edge1DB)
edge1DB[idx] = 0
# Load the edge2 config to solve turns DB
with np.load('rubik_edge2_DFS_12p7_db.npz') as data:
edge2DB = data['db']
# Fix -1 score for solved state
idx = np.argmin(edge2DB)
edge2DB[idx] = 0
# Make shared raw arrays of pattern databases
print('Start making DBs shared')
# Note that the 'h' designation in RawArray was used because
# 'i' was too big for the np.int16.However, multiple times reading
# the documentation seemed to me that 'i' should have worked for int16 as well (2 bytes), but it didn't
# this disagreement in element size may break on other computers or implementations.
# make shared db storage
cshDB = RawArray('h', cornerDB.shape[0])
# make the numpy wrapper to this buffer
cshDB_np = np.frombuffer(cshDB, dtype=np.int16)
# now copy data into the shared storage
np.copyto(cshDB_np, cornerDB.astype(np.int16))
# repeat for other databases
eshDB = RawArray('h', edgeDB.shape[0])
eshDB_np = np.frombuffer(eshDB, dtype=np.int16)
np.copyto(eshDB_np, edgeDB.astype(np.int16))
e1shDB = RawArray('h', edge1DB.shape[0])
e1shDB_np = np.frombuffer(e1shDB, dtype=np.int16)
np.copyto(e1shDB_np, edge1DB.astype(np.int16))
e2shDB = RawArray('h', edge2DB.shape[0])
e2shDB_np = np.frombuffer(e2shDB, dtype=np.int16)
np.copyto(e2shDB_np, edge2DB.astype(np.int16))
patternDB_Storage.extend([cshDB, cornerDB.shape[0], \
eshDB, edgeDB.shape[0],\
e1shDB, edge1DB.shape[0],\
e2shDB, edge2DB.shape[0]])
print('Done copying pattern db to shared memory')
print('Elapsed time for setup (s) {0:.1f}'.format(timer()-startts))
# Calculate the Lehmer Get the initial cube distance
lehcode = lc.lehmer_code(8)
statecode = bcube.getstate(np.array(init_faceids), lehcode)
edge_lehcode = lc.lehmer_code(12)
edge_statecode = bcube.getstate_edge(np.array(init_faceids), edge_lehcode)
edge1_lehcode = lc.lehmer_code(12, 7)
edge1_statecode = bcube.getstate_edgesplit(np.array(init_faceids), edge1_lehcode, 0)
edge2_lehcode = lc.lehmer_code(12, 7)
edge2_statecode = bcube.getstate_edgesplit(np.array(init_faceids), edge2_lehcode, 1)
print(statecode, edge_statecode, edge1_statecode, edge2_statecode)
# Based on the lehmer code look up the moves until end for each database
cs = cornerDB[statecode]
ce = edgeDB[edge_statecode]
ce1 = edge1DB[edge1_statecode]
ce2 = edge2DB[edge2_statecode]
score = np.max([cs, ce, ce1, ce2])
print('Max & Initial Scores')
print(score, cs, ce, ce1, ce2)
# Since cubes are always solvable in <=20 moves
# this is the largets depth from the initial score we need to explore
largest_MAXDELDEP = 20 - score
retval = 0
# The first few are so quick that don't bother with MP
# Here is where we call the Iterative Depth Depth First search
# MAXDELDEP sets the maximum depth we search each iteration
# the first few are single core.
for MAXDELDEP in np.arange(0,5):
useMaxLevel = MAXDELDEP +score
if not retval == 2: # Found solution yet?
print('Trying MAXDELDEP {0:d} MaxLevel: {1:d}'.format(MAXDELDEP, useMaxLevel))
# Call the DFS cython that does all the work to MAXDELDEP
retval = rcm.DFS_cython_solve(bytes(init_faceids), MAXDELDEP, cornerDB, edgeDB, edge1DB, edge2DB)
print('Now Trying MP for larger rounds')
# Save the cube states after the first set of turns
# newmoves holds the facieds after the first 18 turns
newmoves = bcube.make_pathlist(init_faceids, 18)
# go to another level 2 turns starting from the first turns
newmoves2 = []
for i in range(18):
new_faceids = newmoves[i][0]
exmoves = bcube.make_pathlist(new_faceids, i)
# we need to keep track of the two moves we do this by
# Adjusting the move number to put the second move
# by multiplying second move by 100 and adding to first move
for ex in exmoves:
curmv = ex[1]*100
ex[1] = curmv +i
newmoves2.append(ex)
print("Got {0:d} number of moves after 2nd level".format(len(newmoves2)))
# Here is where we go to even deeper IDDFS searches but using Multiprocessing
for MAXDELDEP in np.arange(5,largest_MAXDELDEP+1):
useMaxLevel = useMaxLevel + 1
if not retval == 2: #Found Solution yet?
print('Trying MAXDELDEP {0:d} MaxLevel:{1:d} with MP'.format(MAXDELDEP, useMaxLevel))
# pack the worker arguments
work_args = []
for i, curnewmoves in enumerate(newmoves2):
curlevel = 3
cmv = curnewmoves[1]
holdlist = [cmv, useMaxLevel, curlevel]
holdlist.extend(curnewmoves[0])
work_args.append(holdlist)
# Have all the worker arguments loaded
# initialize the pool of workes
pmp = Pool(processes = USENCPUS)
results = [] # This will store results
# This gets populated in log_quitter() callback function
# callback is in scope of main so it is visible
# Fill the wokeres with all the jobs
for i in range(len(work_args)):
pmp.apply_async(child, args=(work_args[i],), callback=log_quitter)
# close the pool for any future jobs
pmp.close()
# Block until all the workers finished or are terminated
pmp.join()
# Go through the results list to see if any workers found a solution
fndSoln = False
for rr in results:
if rr == 2: # Worker finds solution!
fndSoln = True
if fndSoln:
retval = 2 # This terminates going to higher levels in the IDFFS search
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
3300,
4280,
2242,
2242,
25,
2624,
25,
2078,
12131,
198,
198,
31,
9800,
25,
269,
73,
6236,... | 2.209236 | 5,587 |
"""
"""
import sys
from functools import wraps
import logging
from six import with_metaclass
from .errors import ResourceExistsError
logger = logging.getLogger("nar")
registry = {}
# todo: add namespaces to avoid name clashes, e.g. "Person" exists in several namespaces
class Registry(type):
"""Metaclass for registering Knowledge Graph classes"""
#class KGObject(object, metaclass=Registry):
class KGObject(with_metaclass(Registry, object)):
"""Base class for Knowledge Graph objects"""
cache = {}
@classmethod
@classmethod
@classmethod
def list(cls, client, size=100, **filters):
"""List all objects of this type in the Knowledge Graph"""
return client.list(cls, size=size)
def exists(self, client):
"""Check if this object already exists in the KnowledgeGraph"""
# Note that this default implementation should in
# many cases be over-ridden.
if self.id:
return True
else:
context = {"schema": "http://schema.org/"},
query_filter = {
"path": "schema:name",
"op": "eq",
"value": self.name
}
response = client.filter_query(self.path, query_filter, context)
if response:
self.id = response[0].data["@id"]
return bool(response)
def _save(self, data, client, exists_ok=True):
"""docstring"""
if self.id:
# instance.data should be identical to data at this point
self.instance = client.update_instance(self.instance)
logger.info("Updating {self.instance.id}".format(self=self))
else:
if self.exists(client):
if exists_ok:
logger.info("Not updating {self.__class__.__name__}, already exists (id={self.id})".format(self=self))
return
else:
raise ResourceExistsError("Already exists in the Knowledge Graph: {self!r}".format(self=self))
instance = client.create_new_instance(self.__class__.path, data)
self.id = instance.data["@id"]
self.instance = instance
KGObject.cache[self.id] = self
@property
class KGProxy(object):
"""docstring"""
@property
def resolve(self, client):
"""docstring"""
if self.id in KGObject.cache:
return KGObject.cache[self.id]
else:
obj = self.cls.from_uri(self.id, client)
KGObject.cache[self.id] = obj
return obj
class KGQuery(object):
"""docstring"""
| [
37811,
198,
198,
37811,
198,
198,
11748,
25064,
198,
6738,
1257,
310,
10141,
1330,
27521,
198,
11748,
18931,
198,
6738,
2237,
1330,
351,
62,
4164,
330,
31172,
198,
6738,
764,
48277,
1330,
20857,
3109,
1023,
12331,
628,
198,
6404,
1362,
... | 2.283737 | 1,156 |
import datetime
| [
11748,
4818,
8079,
628
] | 4.25 | 4 |
"""Configuration store, retrieval, validation, and supporting functionality."""
import configparser
import logging
import os
from .general import CHECKSUM_TAG_PREFIX
from .general import CHECKSUM_TAG_PREFIX_NORMALIZED
from .general import SyncError
DEFAULT_CONFIG_DIR = '~/.config/flickrsyncr'
DEFAULT_SECTION_NAME = 'DEFAULT'
CONFIG_FILENAME = 'config'
__all__ = ['Config', 'loadConfigStore']
logger = logging.getLogger(__name__)
class Config():
"""Config for input to flickrsyncr.sync().
Args:
album: Name of Flickr album
path: Local path for photos
api_key: Flickr API key. (Required in Config() or in the config file.)
api_secret: Flickr API secret. (Required in Config() or in the config file.)
dir_: Dir with config file and local OAuth tokens.
push: Local is the source, Flickr album is the destination. (aka, upload)
pull: Flickr album is the source, local is the destination. (aka, download)
sync: Remove all photos at the destination that aren't in the source. (Optional)
tag: Ignore Flickr photos without this tag. Uploaded photos will get the tag. (Optional)
checksum: Store the file's checksum on Flickr, use it to detect edits. (Optional)
dryrun: Don't make any modifications to photos, locally or on Flickr. (Optional)
store: Supports .get(setting_name) for reading config values.
"""
def fillFromStore(self, store):
"""Adds config settings from the config store, eg. a file. Only imports settings from
config store that are a) necessary and b) not explicitly provided. Throws a SyncError
if a required parameter can't be found in config.
Args:
store: A config store obtained from load_config_store().
"""
if not self.api_key:
logger.info('Filling setting "api_key" config store.')
self.api_key = self._loadSetting(store, 'api_key')
if not self.api_secret:
logger.info('Filling setting "api_secret" config store.')
self.api_secret = self._loadSetting(store, 'api_secret')
def _loadSetting(self, store, setting_name):
"""Load a setting from config store. Throws an exception if it isn't found.
"""
setting_val = None
try:
setting_val = store.get(DEFAULT_SECTION_NAME, setting_name)
except configparser.NoSectionError as e:
# The section doesn't exist at all.
raise SyncError('No config section "{}": error={}'.format(DEFAULT_SECTION_NAME, e))
except configparser.NoOptionError as e:
# A setting with that name doesn't exist.
raise SyncError
return setting_val
def validate(self):
"""Validates that the Config's existing combination of settings is valid."""
# The Flickr API key and secret must be specified.
if not self.api_key:
raise SyncError('api_key must be provided, but it was not. Get one from ' +
'http://www.flickr.com/services/api/keys/ .')
if not self.api_secret:
raise SyncError('api_secret must be provided, but it was not. Get one from ' +
'http://www.flickr.com/services/api/keys/ .')
# The config dir must be specified.
if not self.dir_:
raise SyncError('dir_ must be specified, but it was not.')
# User must specify at least --push or --pull.
if not self.push and not self.pull:
raise SyncError('Choose at least one action between --push or --pull. ' +
'What was set: push={}, pull={}'.format(self.push, self.pull))
# User can both push and pull, but pruning as well is logically useless: There's nothing
# to prune.
if self.push and self.pull and self.sync:
raise SyncError('Specifying --push and --pull and --sync all together makes no ' +
'sense, nothing to remove. Choose at most two of them.' +
'What was set: push={}, pull={}, sync={}'.format(
self.push, self.pull, self.sync))
# User can both push and pull, but validating checksums at the same time doesn't make
# sense: Which side wins if the checksum doesn't match?
if self.push and self.pull and self.checksum:
raise SyncError('Specifying --push and --pull and --checksum all together makes no ' +
'sense, which side\'s checksum is right? Choose at most two of them.' +
'What was set: push={}, pull={}, checksum={}'.format(
self.push, self.pull, self.checksum))
# Don't let the custom tag start with the checksum tag's prefix, it will confuse checksum
# syncing logic.
if self.tag and (self.tag.startswith(CHECKSUM_TAG_PREFIX) or self.tag.startswith(
CHECKSUM_TAG_PREFIX_NORMALIZED)):
raise SyncError('Tag name "{}" overlaps with the checksum tag"{}", this would cause ' +
'problems during checksum validation.'.format(self.tag, CHECKSUM_TAG_PREFIX))
# The only whitespace used is the standard space. Don't know how Flickr would treat other
# whitespace in tag names.
if self.tag and ' ' in self.tag:
raise SyncError('Do not put spaces in tags.')
def loadConfigStore(config_dir=''):
"""Provides a reader for config file. If config_dir is empty, uses a default."""
dir_path = os.path.expanduser(config_dir if config_dir else DEFAULT_CONFIG_DIR)
file_path = os.path.join(dir_path, CONFIG_FILENAME)
if not os.path.exists(file_path):
raise SyncError("Can't load config from path {}, file doesn't exist".format(file_path))
logging.info('Reading config from path={}'.format(file_path))
config = configparser.ConfigParser()
config.read(file_path)
return config
| [
37811,
38149,
3650,
11,
45069,
11,
21201,
11,
290,
6493,
11244,
526,
15931,
198,
11748,
4566,
48610,
198,
11748,
18931,
198,
11748,
28686,
198,
198,
6738,
764,
24622,
1330,
5870,
25171,
50,
5883,
62,
42197,
62,
47,
31688,
10426,
198,
67... | 2.606593 | 2,275 |
##
# Copyright (c) 2015-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from cStringIO import StringIO
from collections import OrderedDict
from json.encoder import encode_basestring
from plistlib import PlistWriter, _escapeAndEncode
import json
import os
import re
import textwrap
DEBUG = False
COPYRIGHT = """
<!--
Copyright (c) 2006-2017 Apple Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
"""
def parseConfigItem(item):
"""
Read the definition of a "DEFAULT_*" value from the stdconfig.py file so that we get
the literal Python source as a L{str} that we then then process into JSON.
@param item: the "DEFAULT_*" item to read
@type item: L{str}
@return: the "DEFAULT_*" value
@rtype: L{str}
"""
with open(os.path.join(os.path.dirname(__file__), "stdconfig.py")) as f:
# Read up to the first line containing DEFAULT_*
while f.readline() != "{} = {{\n".format(item):
continue
# Build list of all lines up to the end of the DEFAULT_* definition and
# make it look like a JSON object
lines = ['{']
line = f.readline()
while line != "}\n":
lines.append(line[:-1])
line = f.readline()
lines.append('}')
return "\n".join(lines)
def processConfig(configlines, with_comments=False, verbose=False, substitutions=None):
"""
Process the "raw" config lines from stdconfig.py into a JSON object
(a Python L{dict}) that is ordered and contains commentary based on
the Python comments.
@param configlines: config data lines
@type configlines: L{list} of L{str}
@param with_comments: whether to include comments or not
@type with_comments: L{bool}
@param verbose: print out intermediate state
@type verbose: L{bool}
@return: the serialized JSON object
@rtype: L{OrderedDict}
"""
# Comments will either be "block" (as in section dividers) or "inline"
# (as in appended to the end of the line). We treat these slightly
# differently wrt to whitespace and where they appear.
lines = []
ctr = 0
block_comment = []
inline_comment = []
# Regular expression to match an inline comment and a
# value containing a numeric expression that needs to be
# evaluated (e.g. "60 * 60")
comments = re.compile("([ ]*.*?,?)[ ]*#[ ]*(.*)[ ]*$")
value = re.compile("([^:]+:[ ]+)([0-9 \*]+)(.*)")
for line in configlines.splitlines():
if line.strip() and line.strip()[0] == "#":
# Line with just a comment is a block comment unless the
# previous comment was inline (in which case it is a multi-line
# inline). Aggregate block and inline comments into one overall
# comment.
comment = line.strip()[1:].strip()
if len(comment) == 0 and len(block_comment) == 0 and len(inline_comment) == 0:
pass
elif inline_comment:
inline_comment.append(comment if comment else "\n")
else:
block_comment.append(comment if comment else "\n")
continue
elif block_comment:
# Generate a block comment JSON member
if with_comments:
comment_type = "comment_" if line.strip() and block_comment[-1] != "\n" else "section_"
while block_comment[-1] == "\n":
block_comment.pop()
lines.append("\"{}{}\": {},".format(comment_type, ctr, encode_basestring(" ".join(block_comment))))
ctr += 1
block_comment = []
elif inline_comment:
# Generate an inline comment JSON member
if with_comments:
lines.insert(-1, "\"comment_{}\": {},".format(ctr, encode_basestring(" ".join(inline_comment))))
ctr += 1
inline_comment = []
# Check if the current line contains an inline comment, if so extract
# the comment and add to the current inline comments list
m = comments.match(line)
if m:
inline_comment.append(m.group(2))
append = m.group(1)
else:
append = line
# Do some simple value conversions
append = append.rstrip().replace(" None", ' ""').replace(" True", " true").replace(" False", " false").replace("\\", "\\\\")
# Look for special substitutions
if substitutions:
for subskey in substitutions.keys():
pos = append.find(subskey)
if pos >= 0:
actual = append[pos + len(subskey) + 2:]
comma = ""
if actual[-1] == ",":
actual = actual[:-1]
comma = ","
actual = actual[:-2]
append = "{}{}{}".format(
append[:pos],
json.dumps(substitutions[subskey][actual]),
comma,
)
break
# Look for numeric expressions in the value and eval() those to get a value
# that is compatible with JSON
m = value.match(append)
if m:
expression = eval(m.group(2))
append = "{}{}{}".format(m.group(1), expression, m.group(3))
# Remove trailing commas for the last items in an array
# or object as JSON does not like that
if append.strip() and append.strip()[0] in ("]", "}"):
if lines[-1][-1] == ",":
lines[-1] = lines[-1][:-1]
# Line is ready to use
lines.append(append)
newj = "\n".join(lines)
if verbose:
print(newj)
# Created an ordered JSON object
j = json.loads(newj, object_pairs_hook=OrderedDict)
return j
class OrderedPlistWriter(PlistWriter):
"""
L{PlistWriter} that maintains the order of dict items. It also handles special keys
"section_" and "comment_" which are used to insert XML comments in the plist output.
Some additional blank lines are also added for readability of the plist.
"""
def writeDict(self, d):
"""
Basically a copy of L{PlistWriter.writeDict} that does not sort the dict keys
if the dict type is L{OrderedDict}.
"""
self.beginElement("dict")
items = d.items()
if not isinstance(d, OrderedDict):
items.sort()
newline = False
for key, value in items:
if not isinstance(key, (str, unicode)):
raise TypeError("keys must be strings")
if newline:
self.writeln("")
if key.startswith("section_"):
self.writeComment(value)
newline = True
elif key.startswith("comment_"):
self.writeComment(value)
newline = False
else:
self.simpleElement("key", key)
self.writeValue(value)
newline = True
self.endElement("dict")
def writeOrderedPlist(rootObject, pathOrFile):
"""
A copy of L{plistlib.writePlist} that uses an L{OrderedPlistWriter} to
write the plist.
"""
"""Write 'rootObject' to a .plist file. 'pathOrFile' may either be a
file name or a (writable) file object.
"""
didOpen = 0
if isinstance(pathOrFile, (str, unicode)):
pathOrFile = open(pathOrFile, "w")
didOpen = 1
writer = OrderedPlistWriter(pathOrFile)
writer.writeln(COPYRIGHT)
writer.writeln("<plist version=\"1.0\">")
writer.writeValue(rootObject)
writer.writeln("</plist>")
if didOpen:
pathOrFile.close()
def writeOrderedPlistToString(rootObject):
"""
A copy of L{plistlib.writePlistToString} that uses an L{writeOrderedPlist} to
write the plist.
"""
"""Return 'rootObject' as a plist-formatted string.
"""
f = StringIO()
writeOrderedPlist(rootObject, f)
return f.getvalue()
def writeStdConfig(data):
"""
Write the actual plist data to conf/caldavd-stdconfig.plist.
@param data: plist data
@type data: L{str}
"""
with open(os.path.join(os.path.dirname(os.path.dirname(__file__)), "conf", "caldavd-stdconfig.plist"), "w") as f:
f.write(data)
def dumpConfig():
"""
Dump the full stdconfig to a string.
"""
# Generate a set of serialized JSON objects for the *_PARAMS config items
maps = {
"DEFAULT_SERVICE_PARAMS": "",
"DEFAULT_RESOURCE_PARAMS": "",
"DEFAULT_AUGMENT_PARAMS": "",
"DEFAULT_DIRECTORY_ADDRESSBOOK_PARAMS": "",
}
for item in maps.keys():
if DEBUG:
print(item)
lines = parseConfigItem(item)
maps[item] = processConfig(lines, with_comments=True, verbose=DEBUG)
# Generate the plist for the default config, substituting for the *_PARAMS items
lines = parseConfigItem("DEFAULT_CONFIG")
j = processConfig(lines, with_comments=True, verbose=DEBUG, substitutions=maps)
return writeOrderedPlistToString(j)
if __name__ == '__main__':
data = dumpConfig()
writeStdConfig(data)
print(data)
| [
2235,
198,
2,
15069,
357,
66,
8,
1853,
12,
5539,
4196,
3457,
13,
1439,
2489,
10395,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
... | 2.401029 | 4,274 |
n = int(input("Enter a number: "))
factorial = 1
for i in range(1, n + 1):
factorial *= i
print(f"Factorial of {n} is {factorial}")
| [
77,
796,
493,
7,
15414,
7203,
17469,
257,
1271,
25,
366,
4008,
198,
198,
22584,
5132,
796,
352,
198,
198,
1640,
1312,
287,
2837,
7,
16,
11,
299,
1343,
352,
2599,
198,
220,
220,
220,
1109,
5132,
1635,
28,
1312,
198,
198,
4798,
7,
... | 2.355932 | 59 |
import dropbox
import hashlib
import math
import os
import pdbox
import shutil
from pdbox.utils import DropboxError, dbx_uri, execute, normpath
def get_remote(path, meta=None):
"""
Get a RemoteFile or RemoteFolder from path.
Raises:
- ValueError
"""
if meta: # Don't look up the path, just use what's provided.
if isinstance(meta, dropbox.files.FileMetadata):
return RemoteFile(None, meta=meta)
if isinstance(meta, dropbox.files.FolderMetadata):
return RemoteFolder(None, meta=meta)
path = normpath(path)
if path == "/": # get_metadata on the root is not supported.
return RemoteFolder(path)
try:
meta = execute(pdbox.dbx.files_get_metadata, path)
except DropboxError:
raise ValueError("%s could not be found" % dbx_uri(path))
if isinstance(meta, dropbox.files.DeletedMetadata):
pdbox.debug("%s was recently deleted" % dbx_uri(path))
raise ValueError("%s could not be found" % dbx_uri(path))
if isinstance(meta, dropbox.files.FolderMetadata):
return RemoteFolder(None, meta=meta)
else:
# This doesn't account for types other than FileMetadata but I don't
# think that they can be returned here.
return RemoteFile(None, meta=meta)
def get_local(path):
"""
Get a LocalFile or LocalFolder from path.
Raises: ValueError
"""
path = os.path.abspath(path)
if os.path.isfile(path):
return LocalFile(path)
if os.path.isdir(path):
return LocalFolder(path)
raise ValueError("%s does not exist" % path)
def remote_assert_empty(path):
"""
Assert that nothing exists at path in Dropbox.
Raises: ValueError
"""
path = normpath(path)
try:
remote = get_remote(path)
except ValueError: # Nothing exists at path, nothing to worry about.
return
raise ValueError("Something exists at %s" % remote.uri)
def local_assert_empty(path):
"""
Assert that nothing exists at path locally.
Raises: ValueError
"""
try:
local = get_local(path)
except ValueError:
return
raise ValueError("Something exists at %s" % local.path)
class RemoteObject(object):
"""A file or folder inside Dropbox."""
def delete(self):
"""
Delete a file or folder inside Dropbox.
Raises: DropboxError
"""
if not pdbox._args.get("dryrun"):
result = execute(pdbox.dbx.files_delete_v2, self.path)
pdbox.debug("Metadata response: %s" % result.metadata)
pdbox.info("Deleted %s" % self.uri)
def copy(self, dest, overwrite=False):
"""
Copy a file or folder to dest inside Dropbox.
Raises:
- ValueError
- DropboxError
"""
dest = normpath(dest)
try:
remote = get_remote(dest)
except ValueError: # Nothing exists at dest, nothing to worry about.
remote = None
else: # Something exists here.
if not overwrite:
raise ValueError("Something exists at %s" % remote.uri)
try:
if self.hash == remote.hash: # Nothing to update.
pdbox.info(
"%s and %s are identical" % (self.uri, remote.uri),
)
return
except AttributeError: # RemoteFolder doesn't have a hash.
pass
if not pdbox._args.get("dryrun"):
if overwrite and remote:
# There's no way to copy and overwrite at the same time,
# so delete the existing file first.
remote.delete()
result = execute(pdbox.dbx.files_copy_v2, self.path, dest)
pdbox.debug("Metadata respones: %s" % result.metadata)
pdbox.info("Copied %s to %s" % (self.uri, dbx_uri(dest)))
if not pdbox._args.get("dryrun"): # Return the newly created object.
return get_remote(None, meta=result.metadata)
def move(self, dest, overwrite=False):
"""
Move a file or folder to dest inside Dropbox.
Note that this is essentially "rename", and will not move the source
into a folder. Instead, it will delete that folder if overwrite is set.
Raises:
- ValueError
- DropboxError
"""
dest = normpath(dest)
try:
remote = get_remote(dest)
except ValueError: # Nothing exists at dest, nothing to worry about.
pass
else: # Something exists here.
if not overwrite:
raise ValueError("Something exists at %s" % remote.uri)
# There's no way to copy and overwrite at the same time,
# so delete the existing file first.
# Note that this can delete folders too.
remote.delete()
if not pdbox._args.get("dryrun"):
result = execute(pdbox.dbx.files_move_v2, self.path, dest)
pdbox.debug("Metadata response: %s" % result.metadata)
pdbox.info("Moved %s to %s" % (self.path, dbx_uri(dest)))
if not pdbox._args.get("dryrun"): # Return the newly created object.
return get_remote(None, meta=result.metadata)
class RemoteFile(RemoteObject):
"""A file in Dropbox."""
def __init__(self, path, meta=None):
"""Raises: ValueError"""
if not meta: # Look for a file at path.
path = normpath(path)
if path == "/": # get_metadata on the root is not supported.
raise ValueError("The root folder is not a file")
try:
meta = execute(pdbox.dbx.files_get_metadata, path)
except DropboxError:
raise ValueError("%s could not be found" % dbx_uri(path))
if isinstance(meta, dropbox.files.FolderMetadata):
raise ValueError("%s is a folder" % dbx_uri(meta.path_display))
if isinstance(meta, dropbox.files.DeletedMetadata):
pdbox.debug("%s was recently deleted" % dbx_uri(path))
raise ValueError("%s could not be found" % dbx_uri(path))
self.id = meta.id # File ID, not sure how this can be used.
self.size = meta.size # Size in bytes.
self.path = meta.path_display # Path, including the name.
self.parent = "/".join(self.path.split("/")[:-1]) # Parent folder.
self.name = meta.name # File name with extension.
self.modified = meta.server_modified # Last modified time.
self.rev = meta.rev # Revision, not sure how this can be used.
self.hash = meta.content_hash # Hash for comparing the contents.
self.uri = dbx_uri(self.path) # Convenience field for display.
def download(self, dest, overwrite=False):
"""
Download this file to dest locally.
Raises:
- ValueError
- DropboxError
- Exception
"""
dest = os.path.abspath(dest)
try:
local = get_local(dest)
except ValueError: # Nothing exists at dest, nothing to worry about.
local = None
else: # Something exists here.
if local.hash() == self.hash: # Nothing to update.
pdbox.info("%s and %s are identical" % (self.uri, local.path))
return
if not overwrite:
raise ValueError("%s already exists" % local.path)
# To avoid any weird overwriting behaviour in the case of errors, we'll
# download to a different location first, then move to dest afterwards.
tmp_dest = os.path.join(
pdbox.TMP_DOWNLOAD_DIR,
os.path.basename(dest),
)
while os.path.exists(tmp_dest): # Make sure the temp name is unique.
tmp_dest += "_"
if pdbox._args.get("dryrun"):
pdbox.info("Downloaded %s to %s" % (self.uri, dest))
return None
# TODO: Progress bars.
meta = execute(pdbox.dbx.files_download_to_file, tmp_dest, self.path)
pdbox.debug("Metadata response: %s" % meta)
if not os.path.isdir(os.path.dirname(dest)):
# Create the parent directories of dest.
os.makedirs(os.path.dirname(dest))
if not pdbox._args.get("dryrun"):
# os.rename overwrites files just fine, but not directories.
if local and isinstance(local, LocalFolder):
shutil.rmtree(local.path)
# Move the file from the temp location to dest.
os.rename(tmp_dest, dest)
pdbox.info("Downloaded %s to %s" % (self.uri, dest))
return LocalFile(dest) # Return the newly created file.
class RemoteFolder(RemoteObject):
"""A folder in Dropbox."""
def __init__(self, path, meta=None):
"""Raises: ValueError"""
if not meta: # Look for a folder at path.
path = normpath(path)
if path == "/":
# get_metadata on the root folder is not supported.
self.id = -1
self.path = "/"
self.parent = "/"
self.name = "/"
self.uri = "dbx://"
return
try:
meta = execute(pdbox.dbx.files_get_metadata, path)
except DropboxError:
raise ValueError("%s could not be found" % dbx_uri(path))
if isinstance(meta, dropbox.files.FileMetadata):
raise ValueError("%s is a file" % dbx_uri(meta.path_display))
if isinstance(meta, dropbox.files.DeletedMetadata):
pdbox.debug("%s was recently deleted" % dbx_uri(path))
raise ValueError("%s does not exist" % dbx_uri(path))
self.id = meta.id # Folder ID, not sure how this can be used.
self.path = meta.path_display # Path to the folder, including name.
self.parent = "/".join(self.path.split("/")[:-1]) # Parent folder.
self.name = meta.name # Base name of the folder.
self.uri = dbx_uri(self.path) # Convenience field for display.
@staticmethod
def create(path, overwrite=False):
"""
Create a new folder in Dropbox.
Raises:
- ValueError
- DropboxError
"""
path = normpath(path)
try:
remote = get_remote(path)
except ValueError: # Nothing exists at path, nothing to worry about.
pass
else:
if isinstance(remote, RemoteFolder):
pdbox.info("%s already exists" % remote.uri)
return remote
elif not overwrite:
raise ValueError("%s already exists" % remote.uri)
if not pdbox._args.get("dryrun"):
result = execute(pdbox.dbx.files_create_folder_v2, path)
pdbox.debug("Metadata response: %s" % result.metadata)
pdbox.info("Created new folder %s" % dbx_uri(path))
if not pdbox._args.get("dryrun"): # Return the newly created folder.
return RemoteFolder(None, meta=result.metadata)
def contents(self):
"""Get this folder's contents in Dropbox."""
# list_folder on "/" isn't supported for some reason.
path = "" if self.path == "/" else self.path
result = execute(pdbox.dbx.files_list_folder, path)
entries = [get_remote(None, meta=e) for e in result.entries]
# TODO: Verify that this works.
while result.has_more:
# As long as there are more pages to look through,
# add their contents to the list of entries.
more = execute(pdbox.dbx.files_list_folder_continue, result.cursor)
entries.extend(get_remote(None, meta=e) for e in more)
return entries
def download(self, dest, overwrite=False):
"""
Download this folder to dest locally.
Raises:
- ValueError
- DropboxError
"""
dest = os.path.abspath(dest)
try:
local = get_local(dest)
except ValueError: # Nothing exists at dest, nothing to worry about.
local = None
else:
if not overwrite:
raise ValueError("%s already exists" % local.path)
# To avoid any weird overwriting behaviour in the case of errors, we'll
# download to a different location first, then move to dest afterwards.
tmp_dest = os.path.join(
pdbox.TMP_DOWNLOAD_DIR,
os.path.basename(dest),
)
while os.path.exists(tmp_dest):
dest += "_" # Make sure the temp name is unique.
LocalFolder.create(tmp_dest, overwrite=overwrite)
for entry in self.contents():
try:
entry.download(os.path.join(tmp_dest, entry.name))
except Exception:
pdbox.error("%s could not be downloaded" % self.uri)
if not pdbox._args.get("dryrun"):
# os.rename overwrites files just fine, but not directories.
if local and isinstance(local, LocalFolder):
shutil.rmtree(local.path)
# Move the folder from the temp location to dest.
shutil.move(tmp_dest, dest)
pdbox.info("Downloaded %s to %s" % (self.uri, dest))
def sync(self, other):
"""
Synchronize this folder to other.
If dest is a LocalFolder or string, it is synchronized locally.
If dest is a RemoteFolder, it is synchronized to that remote folder.
"""
if isinstance(other, str) or isinstance(other, LocalFolder):
return self.sync_local(other)
else:
return self.sync_remote(other)
def sync_local(self, other):
"""
Synchronize this folder to other locally.
dest is either a string or a LocalFoler.
"""
pass # TODO
def sync_remote(self, other):
"""
Synchronize this folder to other inside Dropbox.
dest is a RemoteFolder.
"""
pass # TODO
class LocalFile(object):
"""A file on disk."""
def hash(self):
"""
Get this file's hash according to Dropbox's algorithm.
https://www.dropbox.com/developers/reference/content-hash
"""
block = 1024 * 1024 * 4 # 4 MB.
hasher = hashlib.sha256()
with open(self.path, "rb") as f:
while True:
chunk = f.read(block)
if not chunk:
break
hasher.update(hashlib.sha256(chunk).digest())
digest = hasher.hexdigest()
pdbox.debug("Hash for %s: %s" % (self.path, digest))
return digest
def upload(self, dest, overwrite=False):
"""
Upload this file to dest in Dropbox.
Raises:
- ValueError
- DropboxError
"""
dest = normpath(dest)
try:
remote = get_remote(dest)
except ValueError: # Nothing exists at dest, nothing to worry about.
pass
else: # Something exists here.
if isinstance(remote, RemoteFile) and self.hash() == remote.hash:
# Nothing to update.
pdbox.info("%s and %s are identical" % (self.path, remote.uri))
return
if not overwrite:
raise ValueError("%s exists" % remote.uri)
# Uploading can either happen all at once (with a 150 MB limit),
# or in chunks. If the file is smaller than the selected chunk size,
# then try to upload in one go.
chunksize = min(pdbox._args.get("chunksize", 149.0), 149.0)
pdbox.debug("Chunk size: %.2f MB" % chunksize)
if pdbox._args.get("dryrun"):
pdbox.info("Uploaded %s to %s" % (self.path, dbx_uri(dest)))
return None
# Set the write mode.
if overwrite:
mode = dropbox.files.WriteMode.overwrite
else:
mode = dropbox.files.WriteMode.add
chunk = int(chunksize * 1024 * 1024) # Convert B to MB.
with open(self.path, "rb") as f:
data = f.read()
sz = len(data)
# TODO: Progress bars.
if sz < chunk: # One-shot upload.
meta = execute(pdbox.dbx.files_upload, data, dest, mode)
else: # Multipart upload.
nchunks = math.ceil(sz / chunk)
# Initiate the upload with just the first byte.
start = execute(pdbox.dbx.files_upload_session_start, f[0])
cursor = dropbox.files.UploadSessionCursor(start.session_id, 1)
# Now just add each chunk.
while sz - cursor.offset > chunk:
pdbox.debug(
"Uploading chunk %d/%d" % (cursor.offset % chunk, nchunks),
)
execute(
pdbox.dbx.files_upload_session_append_v2,
data[cursor.offset:cursor.offset + chunk],
cursor,
)
cursor.offset += chunk
# Upload the remaining to finish the transaction.
meta = execute(
pdbox.dbx.files_upload_session_finish,
data[cursor.offset:],
dropbox.files.CommitInfo(dest, mode),
)
pdbox.info("Uploaded %s to %s" % (self.path, dbx_uri(dest)))
return RemoteFile(None, meta=meta)
def delete(self):
"""Delete this file locally."""
pdbox._args.get("dryrun") or os.remove(self.path)
pdbox.info("Deleted %s" % self.path)
class LocalFolder(object):
"""A folder on disk."""
def __init__(self, path):
"""Raises: ValueError"""
path = os.path.abspath(path)
if not os.path.exists(path):
raise ValueError("%s does not exist" % path)
if not os.path.isdir(path):
raise ValueError("%s is a file" % path)
self.path = path # Path to the folder, including name.
self.parent = os.path.dirname(self.path) # Parent folder.
self.name = os.path.basename(self.path) # Base name of the folder..
self.islink = os.path.islink(self.path) # If the path is a symlink.
self.parent = os.path.dirname(self.path) # Parent folder.
@staticmethod
def create(path, overwrite=False):
"""
Create a new folder locally.
Raises: ValueError
"""
path = os.path.abspath(path)
if os.path.isfile(path):
if overwrite:
pdbox._args.get("dryrun") or os.remove(path)
else:
raise ValueError("%s is a file" % path)
if os.path.isdir(path):
if overwrite:
pdbox._args.get("dryrun") or shutil.rmtree(path)
else:
raise ValueError("%s already exists" % path)
pdbox._args.get("dryrun") or os.makedirs(path)
pdbox.info("Created new folder %s" % path)
return None if pdbox._args.get("dryrun") else LocalFolder(path)
def contents(self):
"""Get this folder's contents locally."""
entries = []
walk = next(os.walk(self.path))
entries.extend(LocalFolder(os.path.join(walk[0], f)) for f in walk[1])
entries.extend(LocalFile(os.path.join(walk[0], f)) for f in walk[2])
return entries
def upload(self, dest, overwrite=False):
"""
Upload this folder to dest in Dropbox.
Raises:
- ValueError
- DropboxError
TODO: Parallel batch upload.
https://www.dropbox.com/developers/reference/data-ingress-guide
"""
dest = normpath(dest)
remote_assert_empty(dest)
remote = RemoteFolder.create(dest)
for entry in self.contents():
entry.upload("/".join([remote.path, entry.name]))
return remote
def delete(self):
"""Delete this folder locally."""
pdbox._args.get("dryrun") or shutil.rmtree(self.path)
pdbox.info("Deleted %s/" % self.path)
def sync(self, other):
"""
Synchronize this folder to other.
other is either a RemoteFolder or a string (in which case it is
converted to a RemoteFolder).
"""
pass # TODO
| [
11748,
4268,
3524,
198,
11748,
12234,
8019,
198,
11748,
10688,
198,
11748,
28686,
198,
11748,
279,
67,
3524,
198,
11748,
4423,
346,
198,
198,
6738,
279,
67,
3524,
13,
26791,
1330,
38930,
12331,
11,
20613,
87,
62,
9900,
11,
12260,
11,
... | 2.197143 | 9,242 |
from mol_tree import Vocab, MolTree
from jtnn_vae import JTNNVAE
from jtnn_f import JTNNVAEMLP
from jtnn_mj import JTNNVAEMJ
from jtnn_enc import JTNNEncoder
from jtmpn import JTMPN
from mpn import MPN
from nnutils import create_var
from datautils import MolTreeFolder, PairTreeFolder, MolTreeDataset, MolTreeFolderMLP, MolTreeFolderMJ
| [
6738,
18605,
62,
21048,
1330,
47208,
397,
11,
17958,
27660,
198,
6738,
474,
83,
20471,
62,
33353,
1330,
449,
51,
6144,
11731,
36,
198,
6738,
474,
83,
20471,
62,
69,
1330,
449,
51,
6144,
11731,
3620,
19930,
198,
6738,
474,
83,
20471,
... | 2.77686 | 121 |
from django.contrib.localflavor.fi.forms import (FIZipCodeField,
FISocialSecurityNumber, FIMunicipalitySelect)
from utils import LocalFlavorTestCase
| [
6738,
42625,
14208,
13,
3642,
822,
13,
17946,
1604,
75,
5570,
13,
12463,
13,
23914,
1330,
357,
11674,
41729,
10669,
15878,
11,
198,
220,
220,
220,
376,
1797,
9402,
24074,
15057,
11,
376,
3955,
9462,
1483,
17563,
8,
198,
198,
6738,
338... | 3.058824 | 51 |
#!/usr/bin/python3
import argparse
import os
parser = argparse.ArgumentParser()
parser.add_argument('infile', nargs='+',
help='one or more input files')
parser.add_argument('--generate', action='store_true',
help='generate new instance and exit')
parser.add_argument('--heuristic',
choices=('option_1', 'option_2', 'option_3'),
help='select the clustering heuristic to use')
parser.add_argument('--no-color', action='store_true',
help='optimize visualization for gray scale printouts')
parser.add_argument('--no-writeback', dest="no_writeback",
action='store_true',
help='disable updating problems with new best results')
parser.add_argument('--outdir', default=os.curdir+os.sep,
metavar='dir',
help=('write all output files in this directory; '
'the given directory must exist and be writable '
'(default: {})'.format(os.curdir+os.sep)))
parser.add_argument('--parkings', type=int, default=3,
metavar='n',
help=('every Nth input line is a parking '
'(only relevant for non-JSON input) '
'(default: 3)'))
parser.add_argument('-p', '--penalty', type=float,
default=0.0,
metavar='value',
help=('penalty for cluster attractiveness when adding '
'a customer requires more workers; '
'should be >= 1 '
'(default: 0.0)'))
args = parser.parse_args()
print(args.infile)
print(args.outfile)
print(args.flag)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
198,
11748,
1822,
29572,
198,
11748,
28686,
198,
198,
48610,
796,
1822,
29572,
13,
28100,
1713,
46677,
3419,
198,
198,
48610,
13,
2860,
62,
49140,
10786,
259,
7753,
3256,
299,
22046,
11639,... | 2.039261 | 866 |
import sqreen
| [
11748,
19862,
1361,
628
] | 3.75 | 4 |
from extutils.flags import (
DuplicatedCodeError,
FlagCodeEnum, FlagSingleEnum, FlagDoubleEnum, FlagPrefixedDoubleEnum,
is_flag_instance, is_flag_class, is_flag_single, is_flag_double
)
from tests.base import TestCase
__all__ = ["TestFlagMisc", "TestFlagCodeEnum", "TestFlagSingleEnum", "TestFlagDoubleEnum",
"TestFlagPrefixedDoubleEnum"]
| [
6738,
1070,
26791,
13,
33152,
1330,
357,
198,
220,
220,
220,
49821,
3474,
10669,
12331,
11,
198,
220,
220,
220,
19762,
10669,
4834,
388,
11,
19762,
28008,
4834,
388,
11,
19762,
25628,
4834,
388,
11,
19762,
36698,
2966,
25628,
4834,
388,... | 2.715328 | 137 |
# -*- coding: utf8 -*-
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals
)
from lxml.html import fragment_fromstring, document_fromstring
from breadability.readable import Article
from breadability.annotated_text import AnnotatedTextHandler
from .utils import load_snippet, load_article
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
23,
532,
9,
12,
198,
198,
6738,
11593,
37443,
834,
1330,
357,
198,
220,
220,
220,
4112,
62,
11748,
11,
198,
220,
220,
220,
7297,
11,
198,
220,
220,
220,
3601,
62,
8818,
11,
198,
220,
220,
22... | 3.116071 | 112 |
from lib.pyse import Pyse
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from appium.webdriver.common.mobileby import MobileBy
from appium.webdriver.connectiontype import ConnectionType
| [
6738,
9195,
13,
9078,
325,
1330,
9485,
325,
201,
198,
6738,
384,
11925,
1505,
13,
12384,
26230,
13,
11284,
1330,
2938,
62,
17561,
1756,
355,
13182,
201,
198,
6738,
384,
11925,
1505,
13,
12384,
26230,
13,
11284,
13,
9019,
1330,
5313,
3... | 3.467532 | 77 |
#!/usr/bin/env python3
"""
@Filename: streamlit_server.py
@Author: dulanj
@Time: 12/02/2022 16:38
"""
import streamlit as st
from backend import SportEventDetectionBackend
if 'process_button' not in st.session_state:
st.session_state['process_button'] = False
if 'dataframe' not in st.session_state:
st.session_state['dataframe'] = None
if 'time_frame' not in st.session_state:
st.session_state['time_frame'] = (0, 60)
if 'start_time' not in st.session_state:
st.session_state['start_time'] = 0
sports_event_detection_backend = SportEventDetectionBackend(return_json=False)
st.title('Sports Video Event Analysis System')
# image = Image.open('banner.jpg')
# st.image(image, caption='Sports Event Detection')
url = st.text_input('Paste rugby match youtube video URL')
@st.cache
try:
info = get_info(url)
enable = True
except Exception as e:
info = {
'title': '',
'length': st.session_state['time_frame'][1],
'views': 0,
}
enable = False
url = "Not a valid url"
st.write('URL : {}'.format(url))
_length = int(info['length'])
st.write('{:10} : {}'.format("Title", info['title']))
st.write('{} : {} | {} : {} | {} : {} seconds'.format(
"Length", get_video_time(info['length']),
"Views", info['views'],
"Duration", info['length'])
)
values = st.slider('Select video range seconds scale', 0, _length, (0, _length), disabled=not enable)
_start_sec, _end_sec = values
st.session_state['start_time'] = _start_sec
_skip_time = get_video_time(_start_sec)
_break_on_time = get_video_time(_end_sec)
st.write(f'Video selected from **{_skip_time}** to **{_break_on_time}**')
if enable:
st.video(url, start_time=st.session_state['start_time'])
@st.cache
if st.button('Process Video', disabled=not enable):
st.session_state['process_button'] = True
dataframe = process_video(url, skip_time=_skip_time, break_on_time=_break_on_time)
st.session_state['dataframe'] = dataframe
st.session_state['time_frame'] = (_skip_time, _break_on_time)
st.write('Completed!')
st.balloons()
if st.session_state['process_button']:
dataframe = st.session_state['dataframe']
if dataframe is not None:
if len(dataframe.index) > 0:
_options = list(dataframe['event_name'].unique())
options = st.multiselect('Show Events', _options, _options)
st.write(f'Results are showing from **{st.session_state["time_frame"][0]}** to '
f'**{st.session_state["time_frame"][1]}**')
st.write(dataframe[dataframe['event_name'].isin(options)])
else:
st.write('No events found')
else:
st.write('Video is not processed')
_ = [st.write('') for _ in range(10)]
st.write('https://github.com/CodeProcessor/sports-events-detection')
st.write('Copyright © 2022 Dulan Jayasuriya. All rights reserved.')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
37811,
198,
31,
35063,
25,
220,
220,
220,
4269,
18250,
62,
15388,
13,
9078,
198,
31,
13838,
25,
220,
220,
220,
220,
220,
288,
377,
272,
73,
198,
31,
7575,
25,
220,
220,
220,
... | 2.527098 | 1,144 |
# -*- coding: utf-8 -*-
# quiz-orm/app.py
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
import os
app = Flask(__name__)
# konfiguracja aplikacji
app.config.update(dict(
SECRET_KEY='bardzosekretnawartosc',
DATABASE=os.path.join(app.root_path, 'quiz.db'),
SQLALCHEMY_DATABASE_URI='sqlite:///' +
os.path.join(app.root_path, 'quiz.db'),
SQLALCHEMY_TRACK_MODIFICATIONS=False,
TYTUL='Quiz ORM SQLAlchemy'
))
# tworzymy instancję bazy używanej przez modele
baza = SQLAlchemy(app)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
38964,
12,
579,
14,
1324,
13,
9078,
198,
198,
6738,
42903,
1330,
46947,
198,
6738,
42903,
62,
25410,
282,
26599,
1330,
16363,
2348,
26599,
198,
11748,
28686,
198,
198... | 2.056818 | 264 |
from microbit import *
import random
import time
random.seed()
#afbeeldingen
blad = Image("00000:"
"09990:"
"09990:"
"09990:"
"09990:")
steen = Image("00000:"
"00000:"
"09990:"
"99999:"
"99999:")
schaar = Image("99009:"
"99090:"
"00900:"
"99090:"
"99009:")
keuze2 = [blad, steen, schaar]
display.show(blad)
i = 0
while True:
if button_a.was_pressed():
i += 1
if i == 3:
i = 0
display.show(keuze2[i])
elif button_b.was_pressed():
break
time.sleep(0.2)
i2 = random.randint(0,2)
display.show(keuze2[i2])
time.sleep(2)
score = 0
if i == i2:
score = 1
display.scroll('gelijkspel')
else:
if i2==0:
if i == 2:
score = 2
display.scroll("gewonnen")
elif i == 1:
display.scroll("verloren")
elif i2 == 1:
if i == 0:
score = 2
display.scroll("gewonnen")
elif i == 2:
display.scroll("verloren")
elif i2 == 2:
if i == 0:
display.scroll("verloren")
elif i == 1:
score = 2
display.scroll("gewonnen")
time.sleep(3)
| [
6738,
4580,
2545,
1330,
1635,
201,
198,
11748,
4738,
201,
198,
11748,
640,
201,
198,
201,
198,
25120,
13,
28826,
3419,
201,
198,
201,
198,
2,
1878,
20963,
335,
36795,
201,
198,
2436,
324,
796,
7412,
7203,
20483,
11097,
201,
198,
220,
... | 1.632075 | 848 |
#!/usr/bin/env python3
"""Setup script."""
from setuptools import setup, find_packages
setup(
name="obormot",
version="0.0.0",
author="Britsyn Eugene, Luzyanin Artemiy, Rassolov Sergey",
author_email="ebritsyn@gmail.com, kek@obormor.com, kek@obormot.ru",
url="https://github.com/ebritsyn/obormot",
license="MIT",
packages=find_packages(exclude=['tests*']),
install_requires=[
"numpy",
"dlib",
"pillow",
"h5py",
"python-telegram-bot",
"keras",
"tensorflow",
"opencv-python",
],
setup_requires=[
"pytest-runner",
"pytest-pylint",
"pytest-pycodestyle",
"pytest-pep257",
"pytest-cov",
],
tests_require=[
"pytest",
"pylint",
"pycodestyle",
"pep257",
],
classifiers=[
"Development Status :: 1 - Planning",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
]
)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
37811,
40786,
4226,
526,
15931,
198,
198,
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
628,
198,
40406,
7,
198,
220,
220,
220,
1438,
2625,
672,
579,
313,
1600,
... | 2.056751 | 511 |
#
# Copyright (C) 2016, 2017
# The Board of Trustees of the Leland Stanford Junior University
# Written by Stephane Thiell <sthiell@stanford.edu>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
VERSION = '0.3.9'
setup(name='sasutils',
version=VERSION,
packages=find_packages(),
author='Stephane Thiell',
author_email='sthiell@stanford.edu',
license='Apache Software License',
url='https://github.com/stanford-rc/sasutils',
platforms=['GNU/Linux'],
keywords=['SAS', 'SCSI', 'storage'],
description='Serial Attached SCSI (SAS) Linux utilities',
long_description=open('README.rst').read(),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: System :: Systems Administration'
],
entry_points={
'console_scripts': [
'sas_counters=sasutils.cli.sas_counters:main',
'sas_devices=sasutils.cli.sas_devices:main',
'sas_discover=sasutils.cli.sas_discover:main',
'sas_mpath_snic_alias=sasutils.cli.sas_mpath_snic_alias:main',
'sas_sd_snic_alias=sasutils.cli.sas_sd_snic_alias:main',
'ses_report=sasutils.cli.ses_report:main'
],
},
)
| [
2,
198,
2,
15069,
357,
34,
8,
1584,
11,
2177,
198,
2,
220,
220,
220,
220,
220,
383,
5926,
286,
9870,
2841,
286,
262,
406,
8822,
13863,
20000,
2059,
198,
2,
22503,
416,
39644,
1531,
536,
494,
297,
1279,
48476,
494,
297,
31,
14192,
... | 2.520595 | 874 |
'''
Script which runs a ModelBuilder model externally.
Imports the toolbox which contains the model of interest - in this case the "BombExplosion" model from the Practical1_Models.tbx.
This model simulates the impact of a bomb exploding on the buildings in its vicinity.
The model is run by specifiying the locations of the input parameters in the order they are input to the model in ArcGIS.
@author Molly Asher
@Version 1.0
'''
import arcpy
# Set workspace
arcpy.env.workspace = "E:/MSc/Advanced-Programming"
# Specify input parameters for running the model.
explosion_location = "data/input/explosion.shp"
explosion_distance = "100 Meters"
building_shpfile = "data/input/buildings.shp"
# Specify where to save outputs from the model.
destroyed_buildings = "data/generated/destucto4.shp"
# If outputs exist already, then delete them (to avoid overwriting error)
if arcpy.Exists(destroyed_buildings):
arcpy.Delete_management(destroyed_buildings)
# Run model (with try-catch exceptions)
try:
# Try importing the toolbox, print error message if it fails.
try:
# Import custom toolbox - "Models", assign alias as Models
arcpy.ImportToolbox("GitHub/GEOG_5790/Practical1-ModelBuilder/Explosion Toolbox.tbx", "Models")
print ("Toolbox imported")
except arcpy.ExecuteError as e:
print("Import toolbox error", e)
# Try running the model, print error message if it fails.
try:
# Run the model 'Bomb Explosion' from the toolbox with alias 'Models'.
arcpy.BombExplosion_Models(explosion_location, explosion_distance, building_shpfile, destroyed_buildings)
print ("Explosion model executed")
except arcpy.ExecuteError as e:
print("Model run error", e)
except Exception as e:
print(e)
| [
7061,
6,
198,
7391,
543,
4539,
257,
9104,
32875,
2746,
45107,
13,
198,
3546,
3742,
262,
2891,
3524,
543,
4909,
262,
2746,
286,
1393,
532,
287,
428,
1339,
262,
366,
48478,
18438,
18442,
1,
2746,
422,
262,
13672,
605,
16,
62,
5841,
14... | 2.69209 | 708 |
from .bar import bar
from .animation import animation
__version__ = '0.0.3'
| [
6738,
764,
5657,
1330,
2318,
198,
6738,
764,
11227,
341,
1330,
11034,
198,
834,
9641,
834,
796,
705,
15,
13,
15,
13,
18,
6,
198
] | 3.04 | 25 |
from tkinter import *
from tkinter import messagebox
import requests
import json
type = 'ann'
cnt=0
apiKey = 'YOUR_API_KEY_HERE'
BASE_URL = f'http://newsapi.org/v2/top-headlines?country=in&category={type}&apiKey=' + apiKey
root = Tk()
obj = NewsApp(root)
root.mainloop() | [
6738,
256,
74,
3849,
1330,
1635,
201,
198,
6738,
256,
74,
3849,
1330,
3275,
3524,
201,
198,
11748,
7007,
201,
198,
11748,
33918,
201,
198,
201,
198,
4906,
796,
705,
1236,
6,
201,
198,
66,
429,
28,
15,
201,
198,
15042,
9218,
796,
7... | 2.257576 | 132 |
"""empty message
Revision ID: 77f2a6d0342a
Revises:
Create Date: 2020-05-01 15:46:52.747402
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '77f2a6d0342a'
down_revision = None
branch_labels = None
depends_on = None
| [
37811,
28920,
3275,
198,
198,
18009,
1166,
4522,
25,
8541,
69,
17,
64,
21,
67,
15,
31575,
64,
198,
18009,
2696,
25,
220,
198,
16447,
7536,
25,
12131,
12,
2713,
12,
486,
1315,
25,
3510,
25,
4309,
13,
48882,
32531,
198,
198,
37811,
... | 2.564815 | 108 |