hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
b5f0389774cedeaa041026bfccf255de23607efa
3,560
py
Python
app/profiles/schemas/update.py
MrPeker/acikkaynak-service
21c3f2faaa84342d2fa95709293bc84d1e2a23ae
[ "Apache-2.0" ]
5
2021-02-28T22:29:13.000Z
2021-11-29T00:24:28.000Z
app/profiles/schemas/update.py
MrPeker/acikkaynak-service
21c3f2faaa84342d2fa95709293bc84d1e2a23ae
[ "Apache-2.0" ]
null
null
null
app/profiles/schemas/update.py
MrPeker/acikkaynak-service
21c3f2faaa84342d2fa95709293bc84d1e2a23ae
[ "Apache-2.0" ]
3
2021-03-03T19:56:30.000Z
2021-03-06T22:10:35.000Z
import graphene from app.common.library import graphql from app.common.models import City from ..models import Profile from .queries import ProfileNode # queries class Query(graphene.ObjectType): pass # mutations class ProfileUpdateMutation(graphene.Mutation): Output = ProfileNode class Arguments: profile = graphene.ID(required=True) slug = graphene.String() first_name = graphene.String() last_name = graphene.String() gender = graphene.String() birthdate = graphene.String() email = graphene.String() phone = graphene.String() profile_picture_uri = graphene.String() locale = graphene.String() bio = graphene.String() location_city = graphene.ID() languages = graphene.List(graphene.ID) timezone = graphene.String() @classmethod # pylint:disable=unused-argument def mutate(cls, root, info, **kwargs): # TODO ensure that that profile belongs to this user profile_id = graphql.global_id_to_model_id(kwargs["profile"]) if profile_id is None: raise ValueError("Profile id is invalid") profile = Profile.objects.get(pk=profile_id) cognito_needs_update = False user = None if profile.users.count() == 1: user = profile.users.first() # if profile.users.filter(uuid=info.context.user.uuid).count() == 0: # raise ValueError("you don't own this profile") # for standard fields # (keyword, update_profile, update_user, update_cognito) fields = [ ("slug", True, False, False), ("first_name", True, True, True), ("last_name", True, True, True), ("gender", True, True, True), ("birthdate", True, True, True), ("email", True, True, True), ("phone", True, True, True), ("profile_picture_uri", True, True, True), ("bio", True, False, False), ("timezone", True, False, False), ("locale", False, True, True), ] for keyword, update_profile, update_user, update_cognito in fields: if kwargs.get(keyword): if update_profile: setattr(profile, keyword, kwargs[keyword]) if update_user and user is not None: setattr(user, keyword, kwargs[keyword]) if update_cognito: cognito_needs_update = True # for *-to-many fields if (kwargs.get("languages")): profile.languages.clear() for language_global_id in kwargs["languages"]: language_id = graphql.global_id_to_model_id(language_global_id) if language_id is not None: profile.languages.add(language_id) if (kwargs.get("location_city")): location_city_id = graphql.global_id_to_model_id(kwargs["location_city"]) if location_city_id is None: raise ValueError("City id is invalid") location_city = City.objects.get(pk=location_city_id) location_country = location_city.country profile.location_city = location_city profile.location_country = location_country if cognito_needs_update: pass # TODO: update cognito profile.full_clean() profile.save() return profile class Mutation(graphene.ObjectType): profile_update = ProfileUpdateMutation.Field()
31.504425
85
0.601404
387
3,560
5.369509
0.26615
0.057748
0.040423
0.024543
0.133782
0.084697
0.084697
0.030799
0
0
0
0.000804
0.301124
3,560
112
86
31.785714
0.834405
0.09382
0
0.026316
0
0
0.054121
0
0
0
0
0.008929
0
1
0.013158
false
0.026316
0.065789
0
0.171053
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
b5f230d3037e9e1528cdc347b55ec3805c78a481
3,352
py
Python
scripts/plot_fits.py
trichter/robust_earthquake_spectra
ef816e30944293e27c0d5da4d31ec2184e6d187b
[ "MIT" ]
8
2021-07-23T13:01:29.000Z
2022-03-27T17:57:36.000Z
scripts/plot_fits.py
trichter/robust_earthquake_spectra
ef816e30944293e27c0d5da4d31ec2184e6d187b
[ "MIT" ]
null
null
null
scripts/plot_fits.py
trichter/robust_earthquake_spectra
ef816e30944293e27c0d5da4d31ec2184e6d187b
[ "MIT" ]
null
null
null
# Copyright 2021 Tom Eulenfeld, MIT license import matplotlib as mpl import matplotlib.gridspec as gridspec import matplotlib.pyplot as plt import numpy as np import pickle from qopen.core import get_pair, Gsmooth from qopen.rt import G as G_func def set_gridlabels(ax, i, n, N, xlabel='frequency (Hz)', ylabel=None): if i % n != 0 and ylabel: plt.setp(ax.get_yticklabels(), visible=False) elif i // n == (n - 1) // 2 and ylabel: ax.set_ylabel(ylabel) if i < N - n and xlabel: plt.setp(ax.get_xticklabels(), visible=False) elif i % n == (n - 1) // 2 and i >= N - n - 1 and xlabel: ax.set_xlabel(xlabel) def _get_times(tr): t0 = tr.stats.starttime - tr.stats.origintime return np.arange(len(tr)) * tr.stats.delta + t0 def plot_fits(energies, g0, b, W, R, v0, info, smooth=None, smooth_window='bartlett'): fs = 250 / 25.4 plt.figure(figsize=(fs, 0.6*fs)) tcoda, tbulk, Ecoda, Ebulk, Gcoda, Gbulk = info N = len(energies) nx, ny = 3, 3 gs = gridspec.GridSpec(ny, nx, wspace=0.06, hspace=0.08) share = None if b is None: b = 0 c1 = 'mediumblue' c2 = 'darkred' c1l = '#8181CD' c2l = '#8B6969' for i, energy in enumerate(energies): evid, station = get_pair(energy) ax = plt.subplot(gs[i // nx, i % nx], sharex=share, sharey=share) plot = ax.semilogy def get_Emod(G, t): return R[station] * W[evid] * G * np.exp(-b * t) st = energy.stats r = st.distance t = _get_times(energy) + r / v0 - (st.sonset - st.origintime) if smooth: plot(t, energy.data_unsmoothed, color='0.7') plot(t, energy.data, color=c1l) G_ = Gsmooth(G_func, r, t, v0, g0, smooth=smooth, smooth_window=smooth_window) Emod = get_Emod(G_, t) index = np.argwhere(Emod < 1e-30)[-1] Emod[index] = 1e-30 plot(t, Emod, color=c2l) plot(tcoda[i], Ecoda[i], color=c1) Emodcoda = get_Emod(Gcoda[i], tcoda[i]) plot(tcoda[i], Emodcoda, color=c2) if tbulk and len(tbulk) > 0: plot(tbulk[i], Ebulk[i], 'o', color=c1, mec=c1, ms=4) Emodbulk = get_Emod(Gbulk[i], tbulk[i]) plot(tbulk[i], Emodbulk, 'o', ms=3, color=c2, mec=c2) l = '%s\n%dkm' % (station, r / 1000) ax.annotate(l, (1, 1), (-5, -5), 'axes fraction', 'offset points', ha='right', va='top', size='x-small') ylabel = 'spectral energy density $E$ (Jm$^{-3}$Hz$^{-1}$)' set_gridlabels(ax, i, nx, N, xlabel='time (s)', ylabel=ylabel) kw = dict(color='darkgreen', alpha=0.5, lw=0, zorder=10000) ax.axvspan(tcoda[i][0]-4, tcoda[i][0]-0.3, 0.05, 0.08, **kw) ax.axvspan(tcoda[i][0]+0.3, tcoda[i][-1], 0.05, 0.08, **kw) if share is None: share = ax ax.yaxis.set_minor_locator(mpl.ticker.NullLocator()) ax.set_yticks(10. ** np.arange(-11, -5, 2)) ax.set_xlim((-2, 62)) ax.set_ylim((1e-13 / 1.5, 1e-6 * 1.5)) if __name__ == '__main__': fname = '../qopen/01_go/fits_20186784_04.00Hz-08.00Hz.pkl' with open(fname, 'rb') as f: tup = pickle.load(f) plot_fits(*tup) plt.savefig('../figs/qopen_fits_20186784_4-8Hz.pdf', bbox_inches='tight')
34.204082
77
0.568019
524
3,352
3.545802
0.370229
0.022605
0.008073
0.006459
0.057589
0.025834
0.025834
0.025834
0.025834
0
0
0.062016
0.268795
3,352
97
78
34.556701
0.696042
0.012232
0
0
0
0
0.0822
0.025688
0
0
0
0
0
1
0.05
false
0
0.0875
0.0125
0.1625
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
b5f407423805cba0b85dc8b97c1c27b8ba3da9b6
225
py
Python
answers/Aryan Goyal/Day 10/Que 1.py
arc03/30-DaysOfCode-March-2021
6d6e11bf70280a578113f163352fa4fa8408baf6
[ "MIT" ]
22
2021-03-16T14:07:47.000Z
2021-08-13T08:52:50.000Z
answers/Aryan Goyal/Day 10/Que 1.py
arc03/30-DaysOfCode-March-2021
6d6e11bf70280a578113f163352fa4fa8408baf6
[ "MIT" ]
174
2021-03-16T21:16:40.000Z
2021-06-12T05:19:51.000Z
answers/Aryan Goyal/Day 10/Que 1.py
arc03/30-DaysOfCode-March-2021
6d6e11bf70280a578113f163352fa4fa8408baf6
[ "MIT" ]
135
2021-03-16T16:47:12.000Z
2021-06-27T14:22:38.000Z
def pangram(s): a = "abcdefghijklmnopqrstuvwxyz" for i in a: if i not in s.lower(): return False return True # main string1 = input() if(pangram(string1) == True): print("Yes") else: print("No")
17.307692
35
0.6
31
225
4.354839
0.677419
0
0
0
0
0
0
0
0
0
0
0.012048
0.262222
225
12
36
18.75
0.801205
0.017778
0
0
0
0
0.141553
0.118721
0
0
0
0
0
1
0.090909
false
0
0
0
0.272727
0.181818
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
b5f8afd3209dc9c313d59f605ef9e611cf525951
9,348
py
Python
tests/test_reliable_redis_backend.py
thread/django-lightweight-queue
2c67eb13a454fa1a02f8445c26915b6e9261fdad
[ "BSD-3-Clause" ]
23
2015-04-29T04:47:02.000Z
2022-03-11T12:43:01.000Z
tests/test_reliable_redis_backend.py
thread/django-lightweight-queue
2c67eb13a454fa1a02f8445c26915b6e9261fdad
[ "BSD-3-Clause" ]
23
2015-02-27T14:30:47.000Z
2021-12-02T14:18:34.000Z
tests/test_reliable_redis_backend.py
thread/django-lightweight-queue
2c67eb13a454fa1a02f8445c26915b6e9261fdad
[ "BSD-3-Clause" ]
1
2015-08-18T12:27:08.000Z
2015-08-18T12:27:08.000Z
import datetime import unittest import contextlib import unittest.mock from typing import Any, Dict, Tuple, Mapping, Iterator, Optional import fakeredis from django_lightweight_queue.job import Job from django_lightweight_queue.types import QueueName from django_lightweight_queue.backends.reliable_redis import ( ReliableRedisBackend, ) from . import settings from .mixins import RedisCleanupMixin class ReliableRedisDeduplicationTests(RedisCleanupMixin, unittest.TestCase): longMessage = True prefix = settings.LIGHTWEIGHT_QUEUE_REDIS_PREFIX def create_job( self, path: str = 'path', args: Tuple[Any, ...] = ('args',), kwargs: Optional[Dict[str, Any]] = None, timeout: Optional[int] = None, sigkill_on_stop: bool = False, created_time: Optional[datetime.datetime] = None, ) -> Job: if created_time is None: created_time = self.start_time job = Job(path, args, kwargs or {}, timeout, sigkill_on_stop) job.created_time = created_time return job def enqueue_job(self, queue: QueueName, *args: Any, **kwargs: Any) -> Job: job = self.create_job(*args, **kwargs) self.backend.enqueue(job, queue) return job @contextlib.contextmanager def mock_workers(self, workers: Mapping[str, int]) -> Iterator[None]: with unittest.mock.patch( 'django_lightweight_queue.utils._accepting_implied_queues', new=False, ), unittest.mock.patch.dict( 'django_lightweight_queue.app_settings.WORKERS', workers, ): yield def setUp(self) -> None: with unittest.mock.patch('redis.StrictRedis', fakeredis.FakeStrictRedis): self.backend = ReliableRedisBackend() self.client = self.backend.client super(ReliableRedisDeduplicationTests, self).setUp() self.start_time = datetime.datetime.utcnow() def test_empty_queue(self): result = self.backend.deduplicate('empty-queue') self.assertEqual( (0, 0), result, "Should do nothing when queue empty", ) def test_single_entry_in_queue(self): QUEUE = 'single-job-queue' self.enqueue_job(QUEUE) # sanity check self.assertEqual( 1, self.backend.length(QUEUE), ) result = self.backend.deduplicate(QUEUE) self.assertEqual( (1, 1), result, "Should do nothing when queue has only unique jobs", ) self.assertEqual( 1, self.backend.length(QUEUE), "Should still be a single entry in the queue", ) def test_unique_entries_in_queue(self): QUEUE = 'unique-jobs-queue' self.enqueue_job(QUEUE, args=('args1',)) self.enqueue_job(QUEUE, args=('args2',)) # sanity check self.assertEqual( 2, self.backend.length(QUEUE), ) result = self.backend.deduplicate(QUEUE) self.assertEqual( (2, 2), result, "Should do nothing when queue has only unique jobs", ) self.assertEqual( 2, self.backend.length(QUEUE), "Should still be a single entry in the queue", ) def test_duplicate_entries_in_queue(self): QUEUE = 'duplicate-jobs-queue' self.enqueue_job(QUEUE) self.enqueue_job(QUEUE) # sanity check self.assertEqual( 2, self.backend.length(QUEUE), ) result = self.backend.deduplicate(QUEUE) self.assertEqual( (2, 1), result, "Should remove duplicate entries from queue", ) self.assertEqual( 1, self.backend.length(QUEUE), "Should still be a single entry in the queue", ) def test_preserves_order_with_fixed_timestamps(self): QUEUE = 'job-queue' WORKER_NUMBER = 0 self.enqueue_job(QUEUE, args=['args1']) self.enqueue_job(QUEUE, args=['args2']) self.enqueue_job(QUEUE, args=['args1']) self.enqueue_job(QUEUE, args=['args3']) self.enqueue_job(QUEUE, args=['args2']) self.enqueue_job(QUEUE, args=['args1']) # sanity check self.assertEqual( 6, self.backend.length(QUEUE), ) result = self.backend.deduplicate(QUEUE) self.assertEqual( (6, 3), result, "Should remove duplicate entries from queue", ) self.assertEqual( 3, self.backend.length(QUEUE), "Wrong number of jobs remaining in queue", ) job = self.backend.dequeue(QUEUE, WORKER_NUMBER, timeout=1) self.assertEqual( ['args1'], job.args, "First job dequeued should be the first job enqueued", ) self.backend.processed_job(QUEUE, WORKER_NUMBER, job) job = self.backend.dequeue(QUEUE, WORKER_NUMBER, timeout=1) self.assertEqual( ['args2'], job.args, "Second job dequeued should be the second job enqueued", ) self.backend.processed_job(QUEUE, WORKER_NUMBER, job) job = self.backend.dequeue(QUEUE, WORKER_NUMBER, timeout=1) self.assertEqual( ['args3'], job.args, "Third job dequeued should be the third job enqueued", ) def test_preserves_order_with_unique_timestamps(self): QUEUE = 'job-queue' WORKER_NUMBER = 0 time = self.start_time self.enqueue_job(QUEUE, args=['args1'], created_time=time) time += datetime.timedelta(seconds=1) self.enqueue_job(QUEUE, args=['args2'], created_time=time) time += datetime.timedelta(seconds=1) self.enqueue_job(QUEUE, args=['args1'], created_time=time) time += datetime.timedelta(seconds=1) self.enqueue_job(QUEUE, args=['args3'], created_time=time) time += datetime.timedelta(seconds=1) self.enqueue_job(QUEUE, args=['args2'], created_time=time) time += datetime.timedelta(seconds=1) self.enqueue_job(QUEUE, args=['args1'], created_time=time) # sanity check self.assertEqual( 6, self.backend.length(QUEUE), ) result = self.backend.deduplicate(QUEUE) self.assertEqual( (6, 3), result, "Should remove duplicate entries from queue", ) self.assertEqual( 3, self.backend.length(QUEUE), "Wrong number of jobs remaining in queue", ) job = self.backend.dequeue(QUEUE, WORKER_NUMBER, timeout=1) self.assertEqual( ['args1'], job.args, "First job dequeued should be the first job enqueued", ) self.backend.processed_job(QUEUE, WORKER_NUMBER, job) job = self.backend.dequeue(QUEUE, WORKER_NUMBER, timeout=1) self.assertEqual( ['args2'], job.args, "Second job dequeued should be the second job enqueued", ) self.backend.processed_job(QUEUE, WORKER_NUMBER, job) job = self.backend.dequeue(QUEUE, WORKER_NUMBER, timeout=1) self.assertEqual( ['args3'], job.args, "Third job dequeued should be the third job enqueued", ) def test_startup_recovers_orphaned_job(self): QUEUE = 'the-queue' self.enqueue_job(QUEUE) orig_job = self.backend.dequeue(QUEUE, worker_number=3, timeout=1) self.assertEqual( 0, self.backend.length(QUEUE), "Queue should appear empty after dequeuing job", ) with self.mock_workers({QUEUE: 1}): self.backend.startup(QUEUE) self.assertEqual( 1, self.backend.length(QUEUE), "Queue should have recovered entry after running startup", ) actual_job = self.backend.dequeue(QUEUE, worker_number=1, timeout=1) self.assertEqual( orig_job.as_dict(), actual_job.as_dict(), "The queue job should be the original one", ) def test_startup_doesnt_move_job_on_known_queue(self): QUEUE = 'the-queue' self.enqueue_job(QUEUE) orig_job = self.backend.dequeue(QUEUE, worker_number=3, timeout=1) self.assertEqual( 0, self.backend.length(QUEUE), "Queue should appear empty after dequeuing job", ) with self.mock_workers({QUEUE: 3}): self.backend.startup(QUEUE) self.assertEqual( 0, self.backend.length(QUEUE), "Queue should still appear empty after startup", ) actual_job = Job.from_json( self.client.lpop( self.backend._processing_key(QUEUE, 3), ).decode(), ) self.assertEqual( orig_job.as_dict(), actual_job.as_dict(), "The queue job should be the original one", )
28.5
81
0.578947
1,009
9,348
5.234886
0.14668
0.081219
0.056797
0.068345
0.68989
0.6649
0.641992
0.634419
0.611132
0.593715
0
0.010722
0.321566
9,348
327
82
28.587156
0.822138
0.006846
0
0.576471
0
0
0.147769
0.010886
0
0
0
0
0.109804
1
0.047059
false
0
0.043137
0
0.109804
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
b5f91ae2a0e4966e6263d4fa5ec3616c068ac79a
653
py
Python
src/waldur_slurm/migrations/0019_fill_allocation_user_usage.py
geant-multicloud/MCMS-mastermind
81333180f5e56a0bc88d7dad448505448e01f24e
[ "MIT" ]
26
2017-10-18T13:49:58.000Z
2021-09-19T04:44:09.000Z
src/waldur_slurm/migrations/0019_fill_allocation_user_usage.py
geant-multicloud/MCMS-mastermind
81333180f5e56a0bc88d7dad448505448e01f24e
[ "MIT" ]
14
2018-12-10T14:14:51.000Z
2021-06-07T10:33:39.000Z
src/waldur_slurm/migrations/0019_fill_allocation_user_usage.py
geant-multicloud/MCMS-mastermind
81333180f5e56a0bc88d7dad448505448e01f24e
[ "MIT" ]
32
2017-09-24T03:10:45.000Z
2021-10-16T16:41:09.000Z
from django.db import migrations def fill_allocation_user_usage(apps, schema_editor): AllocationUserUsage = apps.get_model('waldur_slurm', 'AllocationUserUsage') for item in AllocationUserUsage.objects.all(): item.allocation = item.allocation_usage.allocation item.year = item.allocation_usage.year item.month = item.allocation_usage.month item.save(update_fields=['allocation', 'year', 'month']) class Migration(migrations.Migration): dependencies = [ ('waldur_slurm', '0018_add_allocation_month_year'), ] operations = [ migrations.RunPython(fill_allocation_user_usage), ]
28.391304
79
0.715161
70
653
6.414286
0.5
0.124722
0.126949
0.10245
0
0
0
0
0
0
0
0.007491
0.182236
653
22
80
29.681818
0.833333
0
0
0
0
0
0.140888
0.045942
0
0
0
0
0
1
0.066667
false
0
0.066667
0
0.333333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
b5ffeb36473c0df68ff9596c309080a9ed5b0766
4,584
py
Python
environments/env_locust.py
jwallnoefer/projectivesimulation
b8f7b3d7d492b5d5f6df7f9f0802bead33c946ca
[ "Apache-2.0" ]
14
2018-02-13T17:39:58.000Z
2021-07-06T18:09:28.000Z
environments/env_locust.py
jwallnoefer/projectivesimulation
b8f7b3d7d492b5d5f6df7f9f0802bead33c946ca
[ "Apache-2.0" ]
null
null
null
environments/env_locust.py
jwallnoefer/projectivesimulation
b8f7b3d7d492b5d5f6df7f9f0802bead33c946ca
[ "Apache-2.0" ]
8
2018-03-22T04:12:31.000Z
2021-01-31T19:14:28.000Z
# -*- coding: utf-8 -*- """ Copyright 2018 Alexey Melnikov and Katja Ried. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. Please acknowledge the authors when re-using this code and maintain this notice intact. Code written by Katja Ried, implementing ideas from 'Modelling collective motion based on the principle of agency' Katja Ried, Thomas Muller & Hans J. Briegel arXiv:1712.01334 (2017) """ import numpy as np class TaskEnvironment(object): """This is a one-dimensional, circular world in which multiple agents move around. Percepts show agents the net movement of their close neighbours relative to themselves. Actions are turning or keeping going. Agents are rewarded for aligning themselves with their neighbours. This environment is used to study the collective motion of marching locusts. Reference: 'Modelling collective motion based on the principle of agency', Katja Ried, Thomas Muller and Hans J. Briegel, arXiv:1712.01334.""" def __init__(self, num_agents, world_size, sensory_range): """Initializes a world. Arguments: num_agents (int>0) - number of agents world_size (int>0) - length of world; ends are identified (ie world is circular) sensory range (int>0) - how many steps away an agent can see others. Simple example: env = TaskEnvironment(5,40,4) (for 5 agents) max_num_trials, max_steps_per_trial = 20, 30 """ self.num_agents = num_agents; self.world_size = world_size; self.sensory_range = sensory_range; self.num_actions = 2 #turn or keep going self.num_percepts_list = [5] self.num_max_steps_per_trial = 10**9 self.positions = np.random.randint(world_size,size=num_agents) #where each agent is #Note that multiple agents can occupy the same position - they do not collide. self.speeds = np.ndarray.tolist(np.random.choice([-1,1],num_agents)) #which way they are going #note that positions is an array whereas speeds is a list def get_neighbours(self,agent_index): """Determine indices of all agents within visual range including self.""" focal_pos = self.positions[agent_index]; neighbours = np.ndarray.tolist(np.where(dist_mod(self.positions,focal_pos,self.world_size)<self.sensory_range+1)[0]); return(neighbours) def net_rel_mvmt(self,agent_index): """Returns the net flow of all neighbours (excluding self), with sign indicating movement relative to orientation of focal agent.""" neighbours = self.get_neighbours(agent_index) neighbours.remove(agent_index) return(self.speeds[agent_index]*sum([self.speeds[index] for index in neighbours])) def get_percept(self,agent_index): """Given an agent index, returns an integer [0,4] encoding the net flow relative to self (truncated at abs<=2).""" #compute percept net_rel_move = self.net_rel_mvmt(agent_index) #map to limited range of percepts if net_rel_move<-2: net_rel_move=-2 if net_rel_move>+2: net_rel_move=2 return(net_rel_move+2) def move(self,agent_index, action): """Given an agent_index and that agent's action (0 for turn, 1 for keep going), this function updates their speed and position and computes their reward, along with the percept for the next agent in the list.""" self.speeds[agent_index] = self.speeds[agent_index]*(action*2-1) self.positions[agent_index] = np.remainder(self.positions[agent_index]+self.speeds[agent_index],self.world_size) reward = (np.sign(self.net_rel_mvmt(agent_index))+1)/2 next_percept = self.get_percept((agent_index+1)%self.num_agents) return ([next_percept], reward, False) def reset(self): """Sets positions and speeds back to random values and returns the percept for the 0th agent.""" self.positions = np.random.randint(self.world_size,size=self.num_agents) self.speeds = np.ndarray.tolist(np.random.choice([-1,1],self.num_agents)) return([self.get_percept(0)]) def dist_mod(num1,num2,mod): """Distance between num1 and num2 (absolute value) if they are given modulo an integer mod, ie between zero and mod. Also works if num1 is an array (not a list) and num2 a number or vice versa.""" diff=np.remainder(num1-num2,mod) diff=np.minimum(diff, mod-diff) return(diff)
49.290323
128
0.695681
676
4,584
4.594675
0.338757
0.057952
0.019317
0.017708
0.191565
0.140373
0.08886
0.08886
0.08886
0.073406
0
0.021407
0.215314
4,584
92
129
49.826087
0.842091
0.506545
0
0
0
0
0
0
0
0
0
0
0
1
0.175
false
0
0.025
0
0.25
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
bd0162bf0a28c31d37370edf04366759674e96cb
1,174
py
Python
masktools/superskims/slit.py
adwasser/masktools
c96c8f375f0e94ee2791466d0ce6d31007f58022
[ "MIT" ]
null
null
null
masktools/superskims/slit.py
adwasser/masktools
c96c8f375f0e94ee2791466d0ce6d31007f58022
[ "MIT" ]
null
null
null
masktools/superskims/slit.py
adwasser/masktools
c96c8f375f0e94ee2791466d0ce6d31007f58022
[ "MIT" ]
null
null
null
from __future__ import (absolute_import, division, print_function, unicode_literals) class Slit: def __init__(self, x, y, length, width, pa, name): ''' Representation of a slit in a mask. Coordinates are relative to the mask, so that the x-axis is along the long end and the y-axis is along the short end. Parameters ---------- x: float, arcsec along long end of mask y: float, arcsec along short end of mask length: float, arcsec, slit length (along spatial axis), should be a minimum of 3 width: float, arcsec, width of slit (along dispersion axis) pa: float, degrees, position angle of slit, relative to sky (i.e., 0 is north, 90 is east) name: string, unique (within mask) identifier ''' self.x = x self.y = y self.length = length self.width = width self.pa = pa self.name = name def __repr__(self): info_str = ': length of {0:.2f}, PA of {1:.2f} at ({2:.2f}, {3:.2f})' return '<Slit: ' + self.name + info_str.format(self.length, self.pa, self.x, self.y) + '>'
39.133333
98
0.581772
168
1,174
3.964286
0.422619
0.066066
0.033033
0.042042
0
0
0
0
0
0
0
0.014851
0.311755
1,174
29
99
40.482759
0.809406
0.457411
0
0
0
0.076923
0.120075
0
0
0
0
0
0
1
0.153846
false
0
0.076923
0
0.384615
0.076923
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
bd0183d07de9ad7a1f13f37bb28f41e2ff5b5a7b
1,940
py
Python
gemmforge/instructions/builders/alloctor_builder.py
ravil-mobile/gemmforge
6381584c2d1ce77eaa938de02bc4f130f19cb2e4
[ "MIT" ]
null
null
null
gemmforge/instructions/builders/alloctor_builder.py
ravil-mobile/gemmforge
6381584c2d1ce77eaa938de02bc4f130f19cb2e4
[ "MIT" ]
2
2021-02-01T16:31:22.000Z
2021-05-05T13:44:43.000Z
gemmforge/instructions/builders/alloctor_builder.py
ravil-mobile/gemmforge
6381584c2d1ce77eaa938de02bc4f130f19cb2e4
[ "MIT" ]
null
null
null
from .abstract_builder import AbstractBuilder from gemmforge.symbol_table import SymbolType, Symbol from gemmforge.basic_types import RegMemObject, ShrMemObject from gemmforge.instructions import RegisterAlloc, ShrMemAlloc from gemmforge.basic_types import GeneralLexicon from abc import abstractmethod class AbstractAllocBuilder(AbstractBuilder): def __init__(self, vm, symbol_table): super(AbstractAllocBuilder, self).__init__(vm, symbol_table) self._obj = None @abstractmethod def _name_new_symbol(self): pass def get_resultant_obj(self): if not self._obj: raise NotImplementedError return self._obj class ShrMemAllocBuilder(AbstractAllocBuilder): def __init__(self, vm, symbol_table): super(ShrMemAllocBuilder, self).__init__(vm, symbol_table) self._counter = 0 def build(self, size=None): self._reset() name = self._name_new_symbol() self._obj = ShrMemObject(name, size) dest = Symbol(name=name, stype=SymbolType.SharedMem, obj=self._obj) self._symbol_table.add_symbol(dest) self._instructions.append(ShrMemAlloc(self._vm, dest, size)) def _name_new_symbol(self): name = f'{GeneralLexicon.LOCAL_SHR_MEM}{self._counter}' self._counter += 1 return name class RegistersAllocBuilder(AbstractAllocBuilder): def __init__(self, vm, symbol_table): super(RegistersAllocBuilder, self).__init__(vm, symbol_table) self._counter = 0 def build(self, size: int, init_value=None): self._reset() name = self._name_new_symbol() self._obj = RegMemObject(name, size) dest = Symbol(name, SymbolType.Register, self._obj) self._symbol_table.add_symbol(dest) self._instructions.append(RegisterAlloc(self._vm, dest, init_value)) def _name_new_symbol(self): name = f'{GeneralLexicon.REG_NAME}{self._counter}' self._counter += 1 return name
28.955224
72
0.723196
231
1,940
5.722944
0.255411
0.074887
0.059002
0.064297
0.538578
0.446293
0.427383
0.355522
0.22239
0.22239
0
0.00253
0.185052
1,940
66
73
29.393939
0.83365
0
0
0.352941
0
0
0.043814
0.043814
0
0
0
0
0
1
0.176471
false
0.019608
0.117647
0
0.411765
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
bd0555b1790f397fc8d762146f856a6acab0847d
3,043
py
Python
Python3/809.expressive-words.py
610yilingliu/leetcode
30d071b3685c2131bd3462ba77c6c05114f3f227
[ "MIT" ]
null
null
null
Python3/809.expressive-words.py
610yilingliu/leetcode
30d071b3685c2131bd3462ba77c6c05114f3f227
[ "MIT" ]
null
null
null
Python3/809.expressive-words.py
610yilingliu/leetcode
30d071b3685c2131bd3462ba77c6c05114f3f227
[ "MIT" ]
null
null
null
# # @lc app=leetcode id=809 lang=python3 # # [809] Expressive Words # # https://leetcode.com/problems/expressive-words/description/ # # algorithms # Medium (46.84%) # Likes: 320 # Dislikes: 823 # Total Accepted: 45.2K # Total Submissions: 96.2K # Testcase Example: '"heeellooo"\n["hello", "hi", "helo"]' # # Sometimes people repeat letters to represent extra feeling, such as "hello" # -> "heeellooo", "hi" -> "hiiii".  In these strings like "heeellooo", we have # groups of adjacent letters that are all the same:  "h", "eee", "ll", "ooo". # # For some given string S, a query word is stretchy if it can be made to be # equal to S by any number of applications of the following extension # operation: choose a group consisting of characters c, and add some number of # characters c to the group so that the size of the group is 3 or more. # # For example, starting with "hello", we could do an extension on the group "o" # to get "hellooo", but we cannot get "helloo" since the group "oo" has size # less than 3.  Also, we could do another extension like "ll" -> "lllll" to get # "helllllooo".  If S = "helllllooo", then the query word "hello" would be # stretchy because of these two extension operations: query = "hello" -> # "hellooo" -> "helllllooo" = S. # # Given a list of query words, return the number of words that are # stretchy.  # # # # # Example: # Input: # S = "heeellooo" # words = ["hello", "hi", "helo"] # Output: 1 # Explanation: # We can extend "e" and "o" in the word "hello" to get "heeellooo". # We can't extend "helo" to get "heeellooo" because the group "ll" is not size # 3 or more. # # # # Constraints: # # # 0 <= len(S) <= 100. # 0 <= len(words) <= 100. # 0 <= len(words[i]) <= 100. # S and all words in words consist only of lowercase letters # # # # @lc code=start class Solution(object): def expressiveWords(self, S, words): """ :type S: str :type words: List[str] :rtype: int """ if not S: return 0 ans = 0 set_S = set(S) S_list = [] pre_s, pre_index = S[0], 0 for i, s in enumerate(S): if pre_s != s: S_list.append(S[pre_index:i]) pre_s, pre_index = s, i if i == len(S) - 1: S_list.append(S[pre_index:]) for word in words: if set(word) != set_S: continue word_list = [] pre_w, pre_index = word[0], 0 for i, w in enumerate(word): if pre_w != w: word_list.append(word[pre_index:i]) pre_w, pre_index = w, i if i == len(word) - 1: word_list.append(word[pre_index:]) if len(S_list) == len(word_list): if all(S_list[i] == word_list[i] if len(S_list[i]) < 3 else len(S_list[i]) >= len(word_list[i]) for i in range(len(S_list))): ans += 1 return ans # @lc code=end
29.833333
141
0.57049
449
3,043
3.799555
0.367483
0.023447
0.021102
0.014068
0.069168
0.053927
0
0
0
0
0
0.02307
0.302005
3,043
101
142
30.128713
0.780132
0.578048
0
0
0
0
0
0
0
0
0
0
0
1
0.034483
false
0
0
0
0.137931
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
bd068843b439a58814f27d16075e43744d08bd52
1,601
py
Python
settings/Microscope_settings.py
bopopescu/Lauecollect
60ae2b05ea8596ba0decf426e37aeaca0bc8b6be
[ "MIT" ]
null
null
null
settings/Microscope_settings.py
bopopescu/Lauecollect
60ae2b05ea8596ba0decf426e37aeaca0bc8b6be
[ "MIT" ]
1
2019-10-22T21:28:31.000Z
2019-10-22T21:39:12.000Z
settings/Microscope_settings.py
bopopescu/Lauecollect
60ae2b05ea8596ba0decf426e37aeaca0bc8b6be
[ "MIT" ]
2
2019-06-06T15:06:46.000Z
2020-07-20T02:03:22.000Z
Size = (1255, 1160) Position = (39, 26) ScaleFactor = 1.0 ZoomLevel = 32.0 Orientation = 0 Mirror = False NominalPixelSize = 0.125 filename = 'Z:\\All Projects\\Crystallization\\2018.08.27.caplilary with crystals inspection\\2018.08.27 CypA 2.jpg' ImageWindow.Center = (649, 559) ImageWindow.ViewportCenter = (2.41796875, 2.0) ImageWindow.crosshair_color = (255, 0, 255) ImageWindow.boxsize = (0.04, 0.04) ImageWindow.box_color = (255, 0, 0) ImageWindow.show_box = False ImageWindow.Scale = [[0.21944444444444444, -0.0763888888888889], [0.46944444444444444, -0.075]] ImageWindow.show_scale = True ImageWindow.scale_color = (255, 0, 0) ImageWindow.crosshair_size = (0.05, 0.05) ImageWindow.show_crosshair = False ImageWindow.show_profile = False ImageWindow.show_FWHM = False ImageWindow.show_center = False ImageWindow.calculate_section = False ImageWindow.profile_color = (255, 0, 255) ImageWindow.FWHM_color = (0, 0, 255) ImageWindow.center_color = (0, 0, 255) ImageWindow.ROI = [[-0.5194444444444445, -0.3458333333333333], [0.225, 0.19305555555555556]] ImageWindow.ROI_color = (255, 255, 0) ImageWindow.show_saturated_pixels = False ImageWindow.mask_bad_pixels = False ImageWindow.saturation_threshold = 233 ImageWindow.saturated_color = (255, 0, 0) ImageWindow.linearity_correction = False ImageWindow.bad_pixel_threshold = 233 ImageWindow.bad_pixel_color = (30, 30, 30) ImageWindow.show_grid = False ImageWindow.grid_type = 'xy' ImageWindow.grid_color = (0, 0, 255) ImageWindow.grid_x_spacing = 0.3 ImageWindow.grid_x_offset = 0.0 ImageWindow.grid_y_spacing = 0.5 ImageWindow.grid_y_offset = 0.0
37.232558
116
0.775141
224
1,601
5.375
0.357143
0.13289
0.037375
0.024917
0.142857
0
0
0
0
0
0
0.175365
0.102436
1,601
42
117
38.119048
0.662491
0
0
0
0
0.02381
0.065584
0.043098
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
bd07434502bfcaa7d1b29853452ba88cedddad3e
3,259
py
Python
model_rocke3d.py
projectcuisines/gcm_ana
cd9f7d47dd4a9088bcd7556b4955d9b8e09b9741
[ "MIT" ]
1
2021-09-29T18:03:56.000Z
2021-09-29T18:03:56.000Z
model_rocke3d.py
projectcuisines/thai_trilogy_code
cd9f7d47dd4a9088bcd7556b4955d9b8e09b9741
[ "MIT" ]
null
null
null
model_rocke3d.py
projectcuisines/thai_trilogy_code
cd9f7d47dd4a9088bcd7556b4955d9b8e09b9741
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """Utilities for the ROCKE3D output.""" import dask.array as da import xarray as xr from grid import reverse_along_dim, roll_da_to_pm180 from model_um import calc_um_rel from names import rocke3d __all__ = ("adjust_rocke3d_grid", "calc_rocke3d_rei", "calc_rocke3d_rel") calc_rocke3d_rel = calc_um_rel def adjust_rocke3d_grid(darr, lon_name="lon", lat_name="lat"): """ Adjust the grid of a ROCKE3D data array. Reverse the latitude dimension and shift the substellar coordinate from -180 degrees to 0 degree in longitude. """ out = darr if lat_name in out.dims: out = reverse_along_dim(out, lat_name) if lon_name in out.dims: # Shift data along the longitude to center the substellar at (0,0) out = roll_da_to_pm180( out.assign_coords(**{lon_name: out[lon_name] + 180}), lon_name=lon_name ) return out def calc_rocke3d_rei(ds): """ Aggregate parametrization based on effective dimension. In the initial form, the same approach is used for stratiform and convective cloud. The fit provided here is based on Stephan Havemann's fit of Dge with temperature, consistent with David Mitchell's treatment of the variation of the size distribution with temperature. The parametrization of the optical properties is based on De (=(3/2)volume/projected area), whereas Stephan's fit gives Dge (=(2*SQRT(3)/3)*volume/projected area), which explains the conversion factor. The fit to Dge is in two sections, because Mitchell's relationship predicts a cusp at 216.208 K. Limits of 8 and 124 microns are imposed on Dge: these are based on this relationship and should be reviewed if it is changed. Note also that the relationship given here is for polycrystals only. Parameters ---------- ds: xarray.Dataset ROCKE-3D data set These are the parameters used in the temperature dependent parameterizations for ice cloud particle sizes below. Parameters for the aggregate parametrization a0_agg_cold = 7.5094588E-04, b0_agg_cold = 5.0830326E-07, a0_agg_warm = 1.3505403E-04, b0_agg_warm = 2.6517429E-05, t_switch = 216.208, t0_agg = 279.5, s0_agg = 0.05, Returns ------- rei: xarray.DataArray Ice effective radius [um]. """ a0_agg_cold = 7.5094588e-04 b0_agg_cold = 5.0830326e-07 a0_agg_warm = 1.3505403e-04 b0_agg_warm = 2.6517429e-05 t_switch = 216.208 t0_agg = 279.5 s0_agg = 0.05 # Air temperature in ROCKE-3D air_temp = ds[rocke3d.temp] # Calculate the R_eff rei = xr.where( air_temp < t_switch, a0_agg_cold * da.exp(s0_agg * (air_temp - t0_agg)) + b0_agg_cold, a0_agg_warm * da.exp(s0_agg * (air_temp - t0_agg)) + b0_agg_warm, ) # Limit of the parameterization rei = ( (3 / 2) * (3 / (2 * da.sqrt(3))) * xr.ufuncs.minimum(1.24e-04, xr.ufuncs.maximum(8.0e-06, rei)) ) rei = rei.rename("ice_cloud_condensate_effective_radius") rei.attrs.update( { "long_name": "ice_cloud_condensate_effective_radius", "units": "micron", } ) return rei
30.745283
83
0.666769
490
3,259
4.25102
0.383673
0.020163
0.013442
0.012482
0.163226
0.131541
0.131541
0.131541
0.131541
0.131541
0
0.074542
0.246701
3,259
105
84
31.038095
0.773931
0.505063
0
0
0
0
0.104788
0.051353
0
0
0
0
0
1
0.046512
false
0
0.116279
0
0.209302
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
bd080979389c4fa7ca1e77a7f150acdec97764c3
4,090
py
Python
models/wordcloud.py
mcxwx123/RecGFI
6e872c3b8c5398959b119e5ba14e665bbb45c56b
[ "MIT" ]
9
2022-01-28T14:24:35.000Z
2022-01-30T05:05:03.000Z
models/wordcloud.py
mcxwx123/RecGFI
6e872c3b8c5398959b119e5ba14e665bbb45c56b
[ "MIT" ]
null
null
null
models/wordcloud.py
mcxwx123/RecGFI
6e872c3b8c5398959b119e5ba14e665bbb45c56b
[ "MIT" ]
1
2022-01-28T14:24:41.000Z
2022-01-28T14:24:41.000Z
from wordcloud import WordCloud,STOPWORDS import matplotlib.pyplot as plt import numpy as np import pandas as pd import re import multidict as multidict from collections import Counter import json import datetime import os plt.switch_backend('agg') def removePunctuation(text): text = re.sub(r'[{}]+'.format('!,;:?`"\'、,;'),' ',text) return text.strip() def getFrequencyDictForText0(sentence,pro): global tmpDict0 # making dict for counting frequencies sentence=removePunctuation(sentence) for text in sentence.split(" "): if len(text)<3 or re.match("a|the|an|the|to|in|for|of|or|by|with|is|on|that|be", text) or (re.match("^[A-Za-z]+$", text) is None): continue val = tmpDict0.get(text, [0,[]]) pros=val[1] if pro not in pros: pros.append(pro) tmpDict0[text.lower()] = [val[0] + 1,pros] def getFrequencyDictForText1(sentence,pro): global tmpDict1 # making dict for counting frequencies sentence=removePunctuation(sentence) for text in sentence.split(" "): if len(text)<3 or re.match("a|the|an|the|to|in|for|of|or|by|with|is|on|that|be", text) or (re.match("^[A-Za-z]+$", text) is None): continue val = tmpDict1.get(text, [0,[]]) pros=val[1] if pro not in pros: pros.append(pro) tmpDict1[text.lower()] = [val[0] + 1,pros] class DateEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj,datetime.datetime): return obj.strftime("%Y-%m-%d %H:%M:%S") elif isinstance(obj,datetime.timedelta): return obj.seconds else: return json.JSONEncoder.default(self,obj) def drawwordcloud(): global tmpDict0,tmpDict1 finalbody0='' finalbody1='' current_work_dir = os.path.dirname(__file__) with open(current_work_dir+'/../data/issuedata.json') as f: issuestr = json.load(f) issuedic = json.loads(issuestr) issuedata = issuedic['issuedata'] lst=[] for i in range(len(issuedata)): lst.append(issuedata[i][0]) finaldata=pd.DataFrame(lst) finaldata=finaldata.values.tolist() finalbody0=[] finalbody1=[] for d in finaldata: pro=d[1] body=d[39] p=re.compile(r"```.+?```",flags=re.S) s=p.sub("",body) body=" ".join(s.split()) p=re.compile(r"http[:/\w\.]+") s=p.sub("",body) body=" ".join(s.split()) body.lower() if d[37]==0:#clscmt finalbody0.append([body,pro]) else: finalbody1.append([body,pro]) tmpDict0 = {} tmpDict1 = {} for i in finalbody0: getFrequencyDictForText0(i[0],i[1]) for i in finalbody1: getFrequencyDictForText1(i[0],i[1]) for key in list(tmpDict0.keys()): val0 = tmpDict0.get(key, [0,[]]) val1 = tmpDict1.get(key, [0,[]]) if len(list(set(val0[1]+val1[1])))<5: del tmpDict0[key] for key in list(tmpDict1.keys()): val0 = tmpDict0.get(key, [0,[]]) val1 = tmpDict1.get(key, [0,[]]) if len(list(set(val0[1]+val1[1])))<5: del tmpDict1[key] fullTermsDict0 = multidict.MultiDict() for key in tmpDict0: val0 = tmpDict0.get(key, [0,[]]) val1 = tmpDict1.get(key, [0,[]]) fullTermsDict0.add(key, pow(val0[0], 2)/(val0[0]+val1[0])) fullTermsDict1 = multidict.MultiDict() for key in tmpDict1: val0 = tmpDict0.get(key, [0,[]]) val1 = tmpDict1.get(key, [0,[]]) fullTermsDict1.add(key, pow(val1[0], 2)/(val0[0]+val1[0])) wc = WordCloud( background_color='white', width=500, height=350, max_font_size=100, min_font_size=3, max_words=50, relative_scaling=0.5, collocations=False, min_word_length=3, #stopwords=stopwords, mode='RGBA' #colormap='pink' ) wc.generate_from_frequencies(fullTermsDict0) wc.to_file(r"wordcloud0.png") wc.generate_from_frequencies(fullTermsDict1) wc.to_file(r"wordcloud1.png")
29.854015
138
0.596822
539
4,090
4.48423
0.306122
0.019859
0.023169
0.016549
0.343401
0.316094
0.29127
0.29127
0.272238
0.272238
0
0.038797
0.243765
4,090
136
139
30.073529
0.742645
0.027873
0
0.258621
0
0.034483
0.063508
0.030998
0
0
0
0
0
1
0.043103
false
0
0.086207
0
0.172414
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
bd08ddc4c6e6b83523aa9e949593219788ab5e5c
2,996
py
Python
favorites_updater.py
techonerd/moepoi
6440f39653bc3560e39429570bd25b7c564b7f54
[ "MIT" ]
36
2020-07-21T16:19:48.000Z
2022-03-21T15:31:02.000Z
favorites_updater.py
gaesant/moepoi
cd478ca00afa5140bb8057c7d37b1ccb2fcbe3b6
[ "MIT" ]
1
2022-02-18T07:41:14.000Z
2022-02-18T07:41:14.000Z
favorites_updater.py
gaesant/moepoi
cd478ca00afa5140bb8057c7d37b1ccb2fcbe3b6
[ "MIT" ]
176
2020-07-22T19:24:14.000Z
2022-03-30T23:42:58.000Z
from python_graphql_client import GraphqlClient import pathlib import re import os root = pathlib.Path(__file__).parent.resolve() client = GraphqlClient(endpoint="https://graphql.anilist.co") TOKEN = os.environ.get("ANILIST_TOKEN", "") def replace_chunk(content, marker, chunk, inline=False): r = re.compile( r"<!\-\- {} starts \-\->.*<!\-\- {} ends \-\->".format(marker, marker), re.DOTALL, ) if not inline: chunk = "\n{}\n".format(chunk) chunk = "<!-- {} starts -->{}<!-- {} ends -->".format(marker, chunk, marker) return r.sub(chunk, content) def make_query(): return """ query($favPage: Int) { Viewer { favourites { anime(page: $favPage) { nodes { title { romaji } siteUrl } pageInfo { total currentPage lastPage perPage hasNextPage } } manga(page: $favPage) { nodes { title { romaji } siteUrl } pageInfo { total currentPage lastPage perPage hasNextPage } } characters(page: $favPage) { nodes { name { full } siteUrl } pageInfo { total currentPage lastPage perPage hasNextPage } } } } } """ def fetch_favorites(oauth_token, types='anime'): results = [] variables = {"favPage": 1} data = client.execute( query=make_query(), variables=variables, headers={"Authorization": "Bearer {}".format(oauth_token)}, ) for x in data['data']['Viewer']['favourites'][types]['nodes']: results.append( { 'title': x['title']['romaji'] if types != 'characters' else x['name']['full'], 'url': x['siteUrl'] } ) return results if __name__ == "__main__": readme = root / "README.md" readme_contents = readme.open().read() # Favorites Anime data = fetch_favorites(TOKEN, types='anime') res = "\n".join( [ "* [{title}]({url})".format(**x) for x in data ] ) print (res) rewritten = replace_chunk(readme_contents, "favorites_anime", res) # Favorites Manga data = fetch_favorites(TOKEN, types='manga') res = "\n".join( [ "* [{title}]({url})".format(**x) for x in data ] ) print (res) rewritten = replace_chunk(readme_contents, "favorites_manga", res) # Favorites Characters data = fetch_favorites(TOKEN, types='characters') res = "\n".join( [ "* [{title}]({url})".format(**x) for x in data ] ) print (res) rewritten = replace_chunk(readme_contents, "favorites_characters", res) readme.open("w").write(rewritten)
23.046154
94
0.502003
274
2,996
5.364964
0.332117
0.032653
0.016327
0.027211
0.383673
0.326531
0.326531
0.287755
0.287755
0.287755
0
0.000519
0.357477
2,996
129
95
23.224806
0.763117
0.017356
0
0.34188
0
0
0.412585
0
0
0
0
0
0
1
0.025641
false
0
0.034188
0.008547
0.08547
0.025641
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
bd101452c6ae5bad47e4c2d957dbf69805a1b869
3,462
py
Python
SRC/common/IO/GUI/DIR.py
usnistgov/OOF3D
4fd423a48aea9c5dc207520f02de53ae184be74c
[ "X11" ]
31
2015-04-01T15:59:36.000Z
2022-03-18T20:21:47.000Z
SRC/common/IO/GUI/DIR.py
usnistgov/OOF3D
4fd423a48aea9c5dc207520f02de53ae184be74c
[ "X11" ]
3
2015-02-06T19:30:24.000Z
2017-05-25T14:14:31.000Z
SRC/common/IO/GUI/DIR.py
usnistgov/OOF3D
4fd423a48aea9c5dc207520f02de53ae184be74c
[ "X11" ]
7
2015-01-23T15:19:22.000Z
2021-06-09T09:03:59.000Z
# -*- python -*- # This software was produced by NIST, an agency of the U.S. government, # and by statute is not subject to copyright in the United States. # Recipients of this software assume all responsibilities associated # with its operation, modification and maintenance. However, to # facilitate maintenance we ask that before distributing modifed # versions of this software, you first contact the authors at # oof_manager@nist.gov. dirname = 'GUI' if not DIM_3: clib = 'oof2commonGUI' else: clib = 'oof3dcommonGUI' clib_order = 100 pyfiles = [ 'activeareaPage.py', 'activityViewer.py', 'chooser.py', 'colorparamwidgets.py', 'console.py', 'displaymethodwidget.py', 'fileselector.py', 'fixedwidthtext.py', 'fontselector.py', 'genericselectGUI.py', 'gfxLabelTree.py', 'gfxmenu.py', 'gfxwindow.py', 'gtklogger.py', 'gtkutils.py', 'guilogger.py', 'historian.py', 'initialize.py', 'introPage.py', 'labelledslider.py', 'mainmenuGUI.py', 'mainthreadGUI.py', 'matrixparamwidgets.py', 'microstructurePage.py', 'mousehandler.py', 'oofGUI.py', 'oof_mainiteration.py', 'parameterwidgets.py', 'pixelPage.py', 'pixelgroupwidget.py', 'pixelinfoGUI.py', 'pixelselectparamwidgets.py' 'pixelselecttoolboxGUI.py', 'progressbarGUI2.py', 'questioner.py', 'quit.py', 'regclassfactory.py', 'reporter_GUI.py', 'reporterrorGUI.py', 'subWindow.py', 'toolboxGUI.py', 'tutorialsGUI.py', 'viewertoolboxGUI.py', 'whowidget.py', 'widgetscope.py', 'workerGUI.py' ] if not DIM_3: cfiles = [ 'oofcanvas.C', 'rubberband.C', 'canvasdot.c', 'canvastriangle.c', 'gfxbrushstyle.C' ] swigfiles =[ 'oofcanvas.swg', 'rubberband.swg', 'gfxbrushstyle.swg' ] swigpyfiles = [ 'gfxbrushstyle.spy' ] hfiles = [ 'canvasdot.h', 'canvastriangle.h', 'oofcanvas.h', 'rubberband.h', 'rbstipple.xbm', 'rbstubble.xbm', 'gfxbrushstyle.h' ] else: cfiles = ['progressGUI.C'] if USE_COCOA: cfiles.append('oofcanvas3d.mm') else: cfiles.append('oofcanvas3d.C') swigfiles = ['oofcanvas3d.swg', 'progressGUI.swg'] hfiles = ['oofcanvas3d.h', 'progressGUI.h'] swigpyfiles = ['progressGUI.spy'] def set_clib_flags(clib): import oof2setuputils # This is a hack that is needed by pkg-config on Macs using # fink. After merging its pangocairo branch, fink isn't putting # pango.pc and freetype2.pc in the default locations because they # can cause conflicts. Once fink completes upgrading to modern # versions of these libraries, this hack can be removed. oof2setuputils.extend_path("PKG_CONFIG_PATH", "/sw/lib/pango-ft219/lib/pkgconfig", "/sw/lib/freetype219/lib/pkgconfig/") oof2setuputils.pkg_check("gtk+-2.0", GTK_VERSION, clib) oof2setuputils.pkg_check("pygtk-2.0", PYGTK_VERSION, clib) oof2setuputils.pkg_check("pygobject-2.0", PYGOBJECT_VERSION) if not DIM_3: oof2setuputils.pkg_check("libgnomecanvas-2.0", GNOMECANVAS_VERSION, clib) clib.externalLibs.append('oof2common') else: clib.externalLibs.append('oof3dcommon')
25.455882
75
0.624783
366
3,462
5.852459
0.519126
0.031746
0.041083
0.012605
0.030812
0
0
0
0
0
0
0.013867
0.250144
3,462
135
76
25.644444
0.811248
0.209705
0
0.068627
0
0
0.445956
0.066544
0
0
0
0
0
1
0.009804
false
0
0.009804
0
0.019608
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
bd12daa2d90f5e59ee73aa4f239e4f3eb0699f08
4,366
py
Python
chapter_01/main_chapter01_00.py
couldbebetter/simulations_radar_systems_design
fcb23964e10c7ebb9cb1beabadc257e970a2c1de
[ "MIT" ]
20
2018-02-02T06:46:14.000Z
2022-01-05T21:25:50.000Z
chapter_01/main_chapter01_00.py
couldbebetter/simulations_radar_systems_design
fcb23964e10c7ebb9cb1beabadc257e970a2c1de
[ "MIT" ]
null
null
null
chapter_01/main_chapter01_00.py
couldbebetter/simulations_radar_systems_design
fcb23964e10c7ebb9cb1beabadc257e970a2c1de
[ "MIT" ]
5
2018-05-31T16:42:07.000Z
2020-07-30T22:29:43.000Z
# -*- coding: utf-8 -*- """ Created on 21 October 2017 implements Listing 1.2. MATLAB Program “fig1_12.m” in Mahafza radar book @author: Ashiv Dhondea """ import numpy as np import RadarBasics as RB import RadarConstants as RC import RadarEquations as RE # Importing what's needed for nice plots. import matplotlib.pyplot as plt from matplotlib import rc rc('font', **{'family': 'serif', 'serif': ['Helvetica']}) rc('text', usetex=True) params = {'text.latex.preamble' : [r'\usepackage{amsmath}', r'\usepackage{amssymb}']} plt.rcParams.update(params) # ------------------------------------------------------------------------- # speed_light = RC.c; # [m/s] # ------------------------------------------------------------------------- # # Radar parameters P_Tx = 1.5e6; # [W] centre_freq = 5.6e9; #[Hz] G_Tx_dB = 45.; # [dB] G_Tx = RB.fn_dB_to_Power(G_Tx_dB) G_Rx = G_Tx; RCS = 0.1 #[m^2] bandwidth = 5e6; # [Hz] te = 290.; # [K] nf = 3; #[dB] T0 = RB.fn_dB_to_Power(nf)*te radar_loss = RB.fn_dB_to_Power(6.0); wavelength = RB.fnCalculate_Wavelength_or_Frequency(speed_light,centre_freq); rho_Tx = np.linspace(25e3,165e3,1000); # target range 25 -165 km, 1000 points P_Rx1 = np.zeros([np.shape(rho_Tx)[0]],dtype=np.float64); P_Rx2 = np.zeros([np.shape(rho_Tx)[0]],dtype=np.float64); P_Rx3 = np.zeros([np.shape(rho_Tx)[0]],dtype=np.float64); snr_Rx_1 = np.zeros([np.shape(rho_Tx)[0]],dtype=np.float64); snr_Rx_2 = np.zeros([np.shape(rho_Tx)[0]],dtype=np.float64); snr_Rx_3 = np.zeros([np.shape(rho_Tx)[0]],dtype=np.float64); snr_Rx_2_04 = np.zeros([np.shape(rho_Tx)[0]],dtype=np.float64); snr_Rx_3_18 = np.zeros([np.shape(rho_Tx)[0]],dtype=np.float64); for index in range(len(rho_Tx)): P_Rx1[index] = RE.fnCalculate_ReceivedPower(P_Tx,G_Tx,G_Rx,rho_Tx[index],rho_Tx[index],wavelength,RCS); P_Rx2[index] = RE.fnCalculate_ReceivedPower(P_Tx,G_Tx,G_Rx,rho_Tx[index],rho_Tx[index],wavelength,RCS/10.); P_Rx3[index] = RE.fnCalculate_ReceivedPower(P_Tx,G_Tx,G_Rx,rho_Tx[index],rho_Tx[index],wavelength,RCS*10.); snr_Rx_1[index] = RE.fnCalculate_ReceivedSNR(P_Rx1[index],T0,bandwidth,radar_loss); snr_Rx_2[index] = RE.fnCalculate_ReceivedSNR(P_Rx2[index],T0,bandwidth,radar_loss) snr_Rx_3[index] = RE.fnCalculate_ReceivedSNR(P_Rx3[index],T0,bandwidth,radar_loss) snr_Rx_2_04[index] = RE.fnCalculate_ReceivedSNR(P_Rx1[index]*0.4,T0,bandwidth,radar_loss) snr_Rx_3_18[index] = RE.fnCalculate_ReceivedSNR(P_Rx1[index]*1.8,T0,bandwidth,radar_loss) snr_Rx_1_dB = RB.fn_Power_to_dB(snr_Rx_1); snr_Rx_2_dB = RB.fn_Power_to_dB(snr_Rx_2); snr_Rx_3_dB = RB.fn_Power_to_dB(snr_Rx_3); rcs1 = RB.fn_Power_to_dB(RCS); rcs2 = RB.fn_Power_to_dB(RCS/10.) rcs3 = RB.fn_Power_to_dB(RCS*10.) snr_Rx_2_04_dB = RB.fn_Power_to_dB(snr_Rx_2_04); snr_Rx_3_18_dB = RB.fn_Power_to_dB(snr_Rx_3_18); # ------------------------------------------------------------------------- # fig = plt.figure(1); ax = fig.gca() plt.rc('text', usetex=True) plt.rc('font', family='serif') fig.suptitle(r"\textbf{SNR versus detection range for three different values of RCS}" ,fontsize=12); plt.plot(rho_Tx/1000.,snr_Rx_3_dB,label=r"$\sigma = %f~\mathrm{dBsm}$" %rcs3) plt.plot(rho_Tx/1000.,snr_Rx_1_dB,linestyle='-.',label=r"$\sigma = %f~\mathrm{dBsm}$" %rcs1) plt.plot(rho_Tx/1000.,snr_Rx_2_dB,linestyle='--',label=r"$\sigma = %f~\mathrm{dBsm}$" %rcs2) ax.set_ylabel(r"SNR $[\mathrm{dB}]$") ax.set_xlabel(r'Detection range $[\mathrm{km}]$'); plt.legend(loc='best') plt.grid(True,which='both',linestyle=(0,[0.7,0.7]),lw=0.4,color='black') fig.savefig('main_chapter01_00_12a.pdf',bbox_inches='tight',pad_inches=0.11,dpi=10) fig = plt.figure(2); ax = fig.gca() plt.rc('text', usetex=True) plt.rc('font', family='serif') fig.suptitle(r"\textbf{SNR versus detection range for three different values of radar peak power}" ,fontsize=12); plt.plot(rho_Tx/1000.,snr_Rx_3_18_dB,label=r"$P_\text{Tx} = 2.16~\mathrm{MW}$") plt.plot(rho_Tx/1000.,snr_Rx_1_dB,linestyle='-.',label=r"$P_\text{Tx} = 1.5~\mathrm{MW}$") plt.plot(rho_Tx/1000.,snr_Rx_2_04_dB,linestyle='--',label=r"$P_\text{Tx} = 0.6~\mathrm{MW}$" ) ax.set_ylabel(r"SNR $[\mathrm{dB}]$") ax.set_xlabel(r'Detection range $[\mathrm{km}]$'); plt.legend(loc='best') plt.grid(True,which='both',linestyle=(0,[0.7,0.7]),lw=0.4,color='black') fig.savefig('main_chapter01_00_12b.pdf',bbox_inches='tight',pad_inches=0.11,dpi=10)
40.803738
113
0.682776
785
4,366
3.549045
0.22293
0.046662
0.021536
0.040201
0.703877
0.655779
0.633166
0.549533
0.494257
0.450826
0
0.05715
0.082226
4,366
106
114
41.188679
0.638133
0.115208
0
0.186667
0
0
0.166276
0.013031
0
0
0
0
0
1
0
false
0
0.08
0
0.08
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
bd14232c1edf5c76909d75642903193968483bbc
1,087
py
Python
tests/jekpost_tests.py
arjunkrishnababu96/jekpost
2ddcb337e98c534426d83f1bd6fbde1f45f59225
[ "MIT" ]
1
2018-10-05T16:53:02.000Z
2018-10-05T16:53:02.000Z
tests/jekpost_tests.py
arjunkrishnababu96/jekpost
2ddcb337e98c534426d83f1bd6fbde1f45f59225
[ "MIT" ]
null
null
null
tests/jekpost_tests.py
arjunkrishnababu96/jekpost
2ddcb337e98c534426d83f1bd6fbde1f45f59225
[ "MIT" ]
null
null
null
import unittest import jekpost.jekpost_create as jek from datetime import date class JekpostTests(unittest.TestCase): def test_date_gets_formatted(self): """ Check 31-DEC-2016 (2016-12-31) 1-NOV-2015 (2015-11-01) 11-JAN-2015 (2015-01-11) """ sample_dates = [ (date(2014, 12, 31), '2014-12-31'), (date(2015, 11, 1), '2015-11-01'), (date(2015, 1, 11), '2015-01-11') ] for date_object, expected_date in sample_dates: with self.subTest(i=date_object): formatted_date = jek.get_date_formatted(date_object) self.assertEqual(formatted_date, expected_date) def test_make_filename(self): date_formatted = '2014-12-31' title = 'Post 01' expected_filename = '2014-12-31-Post-01.md' result_filename = jek.make_filename(title, date_formatted) self.assertEqual(result_filename, expected_filename) if __name__ == '__main__': unittest.main()
31.970588
68
0.580497
132
1,087
4.537879
0.371212
0.033389
0.053422
0
0
0
0
0
0
0
0
0.142287
0.308188
1,087
33
69
32.939394
0.654255
0.084637
0
0
0
0
0.080253
0.022175
0
0
0
0
0.095238
1
0.095238
false
0
0.142857
0
0.285714
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
bd1639f542971f0b9d004e950fd65037d1434c94
4,788
py
Python
data/fidt_generate.py
PPGod95/FIDTM
b5582c5cc485496d85af2043ffd6e4266f354f3b
[ "MIT" ]
null
null
null
data/fidt_generate.py
PPGod95/FIDTM
b5582c5cc485496d85af2043ffd6e4266f354f3b
[ "MIT" ]
null
null
null
data/fidt_generate.py
PPGod95/FIDTM
b5582c5cc485496d85af2043ffd6e4266f354f3b
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ @Project : @FileName: @Author :penghr @Time :2021/11/xx xx:xx @Desc : FIDTM-train/dataset/FIDTM/ ├── test │ ├── gt_fidt_map │ │ └── IMG_8.h5 │ ├── gt_show │ │ └── IMG_8.jpg │ ├── images │ │ └── IMG_8.jpg │ └── labels │ └── IMG_8.txt └── train ├── gt_fidt_map │ └── IMG_1.h5 ├── gt_show │ └── IMG_1.jpg ├── images │ └── IMG_1.jpg └── labels └── IMG_1.txt 原始数据集分为train&test,各目录下有images和labels文件夹,运行脚本生成gt_show以及gt_fidt_map文件夹,其中gt_show为可视化标注不参与训练,gt_fidt_map为生成的fidtmap和kpoint字典,参与下一步训练。 """ import math import os import cv2 import h5py import torch import numpy as np from tqdm import tqdm # 生成路径 dataset_path = '../dataset/FIDTM' label_type = 'txt' train_path = os.path.join(dataset_path, 'train') test_path = os.path.join(dataset_path, 'test') train_img_path = os.path.join(train_path, 'images') test_img_path = os.path.join(test_path, 'images') train_label_path = os.path.join(train_path, 'labels') test_label_path = os.path.join(test_path, 'labels') train_gt_map = train_img_path.replace('images', 'gt_fidt_map') test_gt_map = test_img_path.replace('images', 'gt_fidt_map') train_gt_show = train_img_path.replace('images', 'gt_show') test_gt_show = test_img_path.replace('images', 'gt_show') path_list = [train_gt_map, test_gt_map, train_gt_show, test_gt_show] for i in path_list: os.makedirs(i, exist_ok=True) train_list = [] for fs in os.listdir(train_img_path): train_list.append(os.path.join(train_img_path, fs)) test_list = [] for fs in os.listdir(test_img_path): test_list.append(os.path.join(test_img_path, fs)) img_paths = train_list + test_list img_paths.sort() def fidt_generate(im_data, gt_data, lamda): size = im_data.shape new_im_data = cv2.resize(im_data, (lamda * size[1], lamda * size[0]), 0) new_size = new_im_data.shape d_map = (np.zeros([new_size[0], new_size[1]]) + 255).astype(np.uint8) gt_data = lamda * gt_data for o in range(0, len(gt_data)): x = np.max([1, math.floor(gt_data[o][1])]) y = np.max([1, math.floor(gt_data[o][0])]) if x >= new_size[0] or y >= new_size[1]: continue d_map[x][y] = d_map[x][y] - 255 distance_map = cv2.distanceTransform(d_map, cv2.DIST_L2, 0) distance_map = torch.from_numpy(distance_map) distance_map = 1 / (1 + torch.pow(distance_map, 0.02 * distance_map + 0.75)) distance_map = distance_map.numpy() distance_map[distance_map < 1e-2] = 0 return distance_map print('开始生成训练数据') with tqdm(total=len(img_paths)) as pbar: for img_path in img_paths: img = cv2.imread(img_path) if label_type == 'txt': gt = np.loadtxt(img_path.replace('images', 'labels').replace('.jpg', '.txt'))[:, 0:2].round(8) elif label_type == 'npy': gt = np.load(img_path.replace('images', 'labels').replace('.jpg', '.npy')).round(8) elif label_type == 'mat': gt = np.loadtxt(img_path.replace('images', 'labels').replace('.jpg', '.mat'))[:, 0:2].round(8) '''最关键,根据标签生成fidt图''' fidt_map = fidt_generate(img, gt, 1) # cv2.imshow('1', fidt_map) # cv2.waitKey(0) '''标签对应像素为1其余为0''' kpoint = np.zeros((img.shape[0], img.shape[1])) for i in range(0, len(gt)): if int(gt[i][1]) < img.shape[0] and int(gt[i][0]) < img.shape[1]: kpoint[int(gt[i][1]), int(gt[i][0])] = 1 # cv2.imshow('1', kpoint) # cv2.waitKey(0) '''保存成h5文件,其实就是字典,后期优化''' with h5py.File(img_path.replace('.jpg', '.h5').replace('images', 'gt_fidt_map'), 'w') as hf: hf['fidt_map'] = fidt_map hf['kpoint'] = kpoint pbar.update() '''可视化,可以不要''' try: fidt_map1 = fidt_map fidt_map1 = fidt_map1 / np.max(fidt_map1) * 255 fidt_map1 = fidt_map1.astype(np.uint8) fidt_map1 = cv2.applyColorMap(fidt_map1, 2) cv2.imwrite(img_path.replace('images', 'gt_show'), fidt_map1) except Exception as e: print(img_path,e) # cv2.imshow('1', fidt_map1) # cv2.waitKey(0) print('完成')
33.71831
131
0.539474
672
4,788
3.714286
0.209821
0.050481
0.050481
0.064103
0.325321
0.213942
0.103365
0.055288
0.03766
0.03766
0
0.032592
0.314327
4,788
141
132
33.957447
0.706975
0.268797
0
0
0
0
0.071303
0
0
0
0
0
0
1
0.013158
false
0
0.092105
0
0.118421
0.039474
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
bd166a19a710f2d8a3cb312cb57d84d5ce6d3bb6
356
py
Python
tests/urls.py
maykinmedia/djadyen
8bde7172c72d68975d4a77c7ef6bed73412619dc
[ "BSD-3-Clause" ]
3
2018-10-19T06:57:50.000Z
2020-11-12T11:20:37.000Z
tests/urls.py
maykinmedia/djadyen
8bde7172c72d68975d4a77c7ef6bed73412619dc
[ "BSD-3-Clause" ]
16
2017-02-14T12:37:58.000Z
2019-04-25T07:55:42.000Z
tests/urls.py
maykinmedia/djadyen
8bde7172c72d68975d4a77c7ef6bed73412619dc
[ "BSD-3-Clause" ]
2
2018-05-16T10:08:34.000Z
2019-09-29T23:31:04.000Z
try: from django.urls import path, include except: from django.conf.urls import url as path, include from django.contrib import admin urlpatterns = [ path(r'^admin/', admin.site.urls), path(r'^app/', include('tests.app.urls')), path(r'^adyen/notifications/', include('djadyen.notifications.urls', namespace='adyen-notifications')), ]
27.384615
107
0.702247
47
356
5.319149
0.468085
0.12
0.072
0
0
0
0
0
0
0
0
0
0.143258
356
12
108
29.666667
0.819672
0
0
0
0
0
0.258427
0.132022
0
0
0
0
0
1
0
false
0
0.3
0
0.3
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
bd17b046c9a2e0dbd7f153a5a1f41fd0257f99eb
5,610
py
Python
src/Commands.py
rkpop/kokobot
d19d68e12a7e6c0a25373ae5404e46632d59c40f
[ "MIT" ]
3
2018-07-25T23:55:58.000Z
2018-10-17T05:50:18.000Z
src/Commands.py
rkpop/kokobot
d19d68e12a7e6c0a25373ae5404e46632d59c40f
[ "MIT" ]
null
null
null
src/Commands.py
rkpop/kokobot
d19d68e12a7e6c0a25373ae5404e46632d59c40f
[ "MIT" ]
1
2018-12-01T05:18:48.000Z
2018-12-01T05:18:48.000Z
import asyncio from discord.ext import commands from src.BaseCog import BaseCog from src.DB import DB from src.Reasons import Reasons class Commands(BaseCog): def __init__(self, bot, config): super().__init__(bot, config) self.reasons = Reasons() HELP_MESSAGE = """ Command: `/kkb <action> [args]` All messages sent by the bot will contain a "reddit_id" field. Use that ID for all of the below commands. Comments will be marked with a White color. Posts will be marked with a Blue color. Approve/Remove Comment: `approvec [comment_id,]` e.g. `/kkb approvec 7abc351` e.g. `/kkb approvec 7asb472,7bashf2` `removec [comment_id,]` Approve Posts: `approve [post_id,]` Remove Posts: `remove [post_id,]` OR `remove [post_id,] reasons [#]` e.g. `/kkb remove 7bas4e reasons 2 5 19` If the reason requires input from you, include the text after that number e.g. `/kkb remove 7bas4e reasons 1 r/kpoppers` You can also use the 'custom' reason for freeform response e.g. `/kkb remove 7bas4e reasons custom "My custom reason"` Make sure to use DOUBLE QUOTES instead of single quotes. Get help: `/kkb help` """ @commands.Cog.listener() async def on_command_error(self, ctx, error): await ctx.channel.send(str(error), delete_after=15) await asyncio.sleep(15) await ctx.message.delete() @commands.command() async def help(self, ctx): await asyncio.gather( ctx.message.delete(), ctx.send(self.HELP_MESSAGE, delete_after=30), ) @commands.command() async def approvec(self, ctx, comment_id_list): comment_ids = comment_id_list.split(",") if len(comment_ids) == 0: raise ValueError("No comment IDs were given") for comment_id in comment_ids: await self.reddit.approve_comment(comment_id) await asyncio.gather( ctx.message.delete(), self.delete_message(ctx.channel, comment_id), ) @commands.command() async def removec(self, ctx, comment_id_list, *reasons): comment_ids = comment_id_list.split(",") if len(comment_ids) == 0: raise ValueError("No comment IDs were given") for comment_id in comment_ids: await self.reddit.remove_comment(comment_id) await asyncio.gather( ctx.message.delete(), self.delete_message(ctx.channel, comment_id), ) @commands.command() async def approve(self, ctx, post_id_list): post_ids = post_id_list.split(",") if len(post_ids) == 0: raise ValueError("No posts were given") for post_id in post_ids: is_report = False if DB.get().is_post_resolved(post_id): is_report = True await self.reddit.approve_post(post_id, is_report=is_report) await asyncio.gather( ctx.message.delete(), self.delete_message(ctx.channel, post_id), ) @commands.command() async def remove(self, ctx, post_id_list, *reasons): post_ids = post_id_list.split(",") if len(post_ids) == 0: raise ValueError("No posts were given") if len(reasons) < 2: reasons = [] else: if reasons[0] != "reasons": raise ValueError('Invalid command format. Expected "reasons".') reasons = reasons[1:] if len(reasons) == 0: for post_id in post_ids: is_report = False if DB.get().is_post_resolved(post_id): is_report = True await self.reddit.remove_post(post_id, is_report=is_report) await ctx.message.delete() return if len(post_ids) > 1: raise ValueError("Reasons are not supported when removing multiple posts") post_id = post_ids[0] reason_body = self.parse_reasons(reasons) submission = await self.reddit.praw().submission(id=post_id) header = self.reasons.get_header(submission.author, "post") footer = self.reasons.get_footer() reason_text = "{}{}{}".format(header, reason_body, footer) is_report = False if DB.get().is_post_resolved(post_id): is_report = True await asyncio.gather( self.reddit.remove_post(post_id, reason_text, is_report=is_report), ctx.message.delete(), self.delete_message(ctx.channel, post_id), ) def parse_reasons(self, reason_input): # 1 'r/kpoppers' 2 3 6 9 'https://redd.it/7fb1r5' custom 'Custom reason!' reason_string = "" user_input = False for index, reason in enumerate(reason_input): if user_input: user_input = False continue if self.reasons.needs_text(reason): if len(reason_input) <= index + 1: raise ValueError("Reason {} required text.".format(reason)) if reason_input[index + 1] == "custom": raise ValueError("Reason {} required text.".format(reason)) reason_string += ( self.reasons.add_reason(reason, reason_input[index + 1]) + "\n\n" ) user_input = True else: reason_string += self.reasons.add_reason(reason) + "\n\n" return reason_string
31.166667
86
0.587344
691
5,610
4.596237
0.222865
0.035894
0.035264
0.036209
0.448048
0.406801
0.36335
0.311083
0.293136
0.293136
0
0.012438
0.312121
5,610
179
87
31.340782
0.810573
0.012656
0
0.382353
0
0
0.231714
0
0
0
0
0
0
1
0.014706
false
0
0.036765
0
0.080882
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
bd184a22649fd3e0a64f5b17ec6b9f8201e73eaa
2,981
py
Python
src/lur/grade.py
qlurkin/lur_python
39564f276b3c03a073d4922627634b67c3af2052
[ "MIT" ]
null
null
null
src/lur/grade.py
qlurkin/lur_python
39564f276b3c03a073d4922627634b67c3af2052
[ "MIT" ]
null
null
null
src/lur/grade.py
qlurkin/lur_python
39564f276b3c03a073d4922627634b67c3af2052
[ "MIT" ]
null
null
null
from cmath import nan from sqlite3 import DatabaseError import pandas as pd import numpy as np import json def load_from_csv(path): dt = pd.read_csv(path, sep=';', dtype={'matricule': object}) return dt.set_index('matricule') def fix_matricule(matricule): if matricule.startswith('195'): return '19' + matricule[3:] return matricule def load_from_claco_csv(path): df = pd.read_csv(path, delimiter=';') df['matricule'] = df['username'].str.split('@', expand=True)[0] df['name'] = df['firstname'] + " " + df['lastname'] df['grade'] = df['score'] / df['total_score_on'] df = df[['matricule', 'name', 'grade']] df['matricule'] = df['matricule'].map(fix_matricule, na_action='ignore') df = df.dropna(subset=['matricule']) df = df.set_index('matricule') return df def capwords(S): return ' '.join([w.capitalize() for w in S.split(' ')]) def save(df, path): df.to_json(path, indent=4, force_ascii=False) def combine(**kwargs): res = pd.DataFrame() for df in kwargs.values(): res = res.combine_first(df[['name']]) for name, df in kwargs.items(): res[name] = df['grade'] res[name] = res[name].fillna(0.0) return res def to_plus_ecam_csv(df: pd.DataFrame, activity_code, path=None): if path is None: path = activity_code + '.csv' if 'status' in df: df = pd.DataFrame(df[['grade', 'status']]) else: df = pd.DataFrame(df[['grade']]) df['status'] = np.nan df['stat'] = df['status'].map(to_plus_ecam_stat) df['cote'] = df['grade'] df['ae'] = activity_code df = pd.DataFrame(df[['ae', 'cote', 'stat']]) df.to_csv(path, sep=';', encoding='utf8', index_label='matricule') def to_plus_ecam_stat(status): if status == 'présent': return None if status == 'absent': return 'a' if status == 'malade': return 'm' return status def from_auto_correction(path): with open(path, encoding='utf8') as file: students = json.load(file)['students'] if 'check' in students[0]: grades = {student['student']['matricule']: student['check']['grade'] for student in students} else: grades = {student['student']['matricule']: student['grade'] for student in students} names = {student['student']['matricule']: student['student']['name'] for student in students} grades = pd.Series(grades) names = pd.Series(names) df = pd.DataFrame({'name': names, 'grade': grades}) return df def round_to_half(grade): return np.floor(2 * grade + 0.5)/2 def round_to_tenth(grade): return np.floor(10 * grade + 0.5)/10 if __name__ == '__main__': data = { 'matricule': ['12345', '23456', '34567'], 'name': ['Quentin', 'André', 'Ken'], 'grade': [12, 13, 14], 'status': ['absent', 'malade', 'présent'] } df = pd.DataFrame(data) df = df.set_index('matricule') to_plus_ecam_csv(df, 'ic1t', 'uc1t.csv')
31.378947
101
0.606172
406
2,981
4.330049
0.307882
0.015927
0.044369
0.025597
0.133106
0
0
0
0
0
0
0.02
0.211674
2,981
95
102
31.378947
0.728085
0
0
0.074074
0
0
0.154259
0
0
0
0
0
0
1
0.135802
false
0
0.061728
0.037037
0.358025
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
bd1bc728b1d732bdeadd112c3709dd6ba324fe1b
5,705
py
Python
simulate_position_covariance_data.py
ronniyjoseph/Hybrid-Calibration
7f24a8a5f67d647a47d4559566f7461cb3be57ac
[ "AFL-3.0" ]
null
null
null
simulate_position_covariance_data.py
ronniyjoseph/Hybrid-Calibration
7f24a8a5f67d647a47d4559566f7461cb3be57ac
[ "AFL-3.0" ]
9
2019-10-23T03:30:33.000Z
2020-02-19T05:25:27.000Z
simulate_position_covariance_data.py
ronniyjoseph/Hybrid-Calibration
7f24a8a5f67d647a47d4559566f7461cb3be57ac
[ "AFL-3.0" ]
null
null
null
import os import numpy import copy import argparse from matplotlib import pyplot from src.radiotelescope import RadioTelescope from src.radiotelescope import BaselineTable from src.skymodel import SkyRealisation from simulate_beam_covariance_data import compute_baseline_covariance from simulate_beam_covariance_data import create_hex_telescope from simulate_beam_covariance_data import plot_covariance_data import time def position_covariance_simulation(array_size=3, create_signal=True, compute_covariance=True, plot_covariance=True, show_plot=True): output_path = "/data/rjoseph/Hybrid_Calibration/numerical_simulations/" project_path = "linear_position_covariance_numerical_point_fixed/" n_realisations = 100000 position_precision = 1e-3 if not os.path.exists(output_path + project_path + "/"): print("Creating Project folder at output destination!") os.makedirs(output_path + project_path) telescope = RadioTelescope(load=False, shape=['linear', 14, 5])#create_hex_telescope(array_size) if create_signal: create_visibility_data(telescope, position_precision, n_realisations, output_path + project_path, output_data=True) if compute_covariance: compute_baseline_covariance(telescope, output_path + project_path, n_realisations, data_type='model') compute_baseline_covariance(telescope, output_path + project_path, n_realisations, data_type='perturbed') compute_baseline_covariance(telescope, output_path + project_path, n_realisations, data_type='residual') if plot_covariance: figure, axes = pyplot.subplots(1, 3, figsize=(18, 5)) plot_covariance_data(output_path + project_path, simulation_type="Position", figure=figure, axes=axes) if show_plot: pyplot.show() return def create_visibility_data(telescope_object, position_precision, n_realisations, path, output_data=False): print("Creating Signal Realisations") if not os.path.exists(path + "/" + "Simulated_Visibilities") and output_data: print("Creating realisation folder in Project path") os.makedirs(path + "/" + "Simulated_Visibilities") ideal_baselines = telescope_object.baseline_table for i in range(n_realisations): if i % int(n_realisations/100) == 0: print(f"Realisation {i}") # source_population = SkyRealisation(sky_type='random', flux_high=1, seed=i) # l_coordinate = numpy.random.uniform(-1, 1, 1) # m_coordinate = numpy.random.uniform(-1, 1, 1) # # source_population = SkyRealisation(sky_type="point", fluxes=numpy.array([100]), l_coordinates=l_coordinate, # m_coordinates=m_coordinate, spectral_indices=numpy.array([0.8])) source_population = SkyRealisation(sky_type="point", fluxes=numpy.array([100]), l_coordinates=0.3, m_coordinates=0.0, spectral_indices=numpy.array([0.8])) perturbed_telescope = copy.copy(telescope_object) # Compute position perturbations number_antennas = len(perturbed_telescope.antenna_positions.x_coordinates) x_offsets = numpy.random.normal(0, position_precision, number_antennas) y_offsets = numpy.random.normal(0, position_precision, number_antennas) # print(ideal_baselines.u_coordinates) perturbed_telescope.antenna_positions.x_coordinates += x_offsets perturbed_telescope.antenna_positions.y_coordinates += y_offsets # Compute uv coordinates perturbed_telescope.baseline_table = BaselineTable(position_table=perturbed_telescope.antenna_positions) perturbed_baselines = perturbed_telescope.baseline_table # Compute visibilities for the ideal case and the perturbed case model_visibilities = source_population.create_visibility_model(ideal_baselines, frequency_channels=numpy.array([150e6])) perturbed_visibilities = source_population.create_visibility_model(perturbed_baselines, frequency_channels=numpy.array([150e6])) residual_visibilities = model_visibilities - perturbed_visibilities numpy.save(path + "/" + "Simulated_Visibilities/" + f"model_realisation_{i}", model_visibilities.flatten()) numpy.save(path + "/" + "Simulated_Visibilities/" + f"perturbed_realisation_{i}", perturbed_visibilities.flatten()) numpy.save(path + "/" + "Simulated_Visibilities/" + f"residual_realisation_{i}", residual_visibilities.flatten()) return def perturbed_to_original_mapper(original_baselines, perturbed_baselines): perturbed_to_original_mapping = numpy.zeros(perturbed_baselines.number_of_baselines) for i in range(perturbed_baselines.number_of_baselines): antenna1_indices = numpy.where(original_baselines.antenna_id1 == perturbed_baselines.antenna_id1[i]) antenna2_indices = numpy.where(original_baselines.antenna_id2 == perturbed_baselines.antenna_id2[i]) perturbed_to_original_mapping[i] = numpy.intersect1d(antenna1_indices, antenna2_indices)[0] return perturbed_to_original_mapping.astype(int) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--ssh", action="store_true", dest="ssh_key", default=False) params = parser.parse_args() import matplotlib if params.ssh_key: matplotlib.use("Agg") from matplotlib import pyplot position_covariance_simulation()
46.382114
117
0.713234
636
5,705
6.069182
0.245283
0.025648
0.030829
0.038083
0.351554
0.315026
0.196114
0.180052
0.125907
0.096891
0
0.013623
0.202279
5,705
122
118
46.762295
0.834542
0.098335
0
0.074074
0
0
0.097214
0.055913
0
0
0
0
0
1
0.037037
false
0
0.17284
0
0.246914
0.049383
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
bd1d6496d7db8cd8d21e423c19bb1534688474e4
24,456
py
Python
anthill/event/admin.py
anthill-services/anthill-event
3c303f33e4c150ce2dfed4f3534ec40e935ecfb8
[ "MIT" ]
null
null
null
anthill/event/admin.py
anthill-services/anthill-event
3c303f33e4c150ce2dfed4f3534ec40e935ecfb8
[ "MIT" ]
null
null
null
anthill/event/admin.py
anthill-services/anthill-event
3c303f33e4c150ce2dfed4f3534ec40e935ecfb8
[ "MIT" ]
1
2017-12-03T22:03:10.000Z
2017-12-03T22:03:10.000Z
from anthill.common.validate import validate from anthill.common import admin as a, update from . model.event import EventNotFound, CategoryNotFound, EventFlags, EventEndAction import ujson import collections EVENT_END_ACTION_DESCRIPTION = """ <b>Send Message</b><br>A message with detailed information about event (including score, rank, profile) will be sent to the participating players<br><br> <b>Call Exec Function</b><br>A function on exec service will be called with detailed information about event (including score, rank, profile). In that case the Server Code should be enabled, with function with name <code>event_completed</code>: <pre><code>async function event_completed(args) { &nbsp;&nbsp;&nbsp;&nbsp;// args[\"event\"] would contain event info &nbsp;&nbsp;&nbsp;&nbsp;// args[\"participants\"] would contain a list of participation objects to process &nbsp;&nbsp;&nbsp;&nbsp;// (one object for each player/participant), like so: &nbsp;&nbsp;&nbsp;&nbsp;{ &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;\"account\": &lt;account id&gt;, // or \"group\" for group-based event &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;\"profile\": &lt;participation profile&gt;, &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;\"score\": &lt;score&gt;, &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;\"rank\": &lt;rank&gt; &nbsp;&nbsp;&nbsp;&nbsp;} } event_completed.allow_call = true; </code></pre><br> """ class CategoriesController(a.AdminController): async def get(self): categories = await self.application.events.list_categories(self.gamespace) result = { "categories": categories } return result def render(self, data): return [ a.breadcrumbs([ a.link("events", "Events") ], "Categories"), a.links("Categories", [ a.link("category", category.name, "list-alt", category_id=category.category_id) for category in data["categories"] ]), a.links("Navigate", [ a.link("events", "Go back", icon="chevron-left"), a.link("common", "Edit common template", icon="flask"), a.link("new_category", "Create a new category", icon="plus"), a.link("https://spacetelescope.github.io/understanding-json-schema/index.html", "See docs", icon="book") ]) ] def access_scopes(self): return ["event_admin"] class CategoryController(a.AdminController): async def delete(self, danger, **ingored): if danger != "confirm": raise a.Redirect("category", category_id=self.context.get("category_id")) category_id = self.context.get("category_id") await self.application.events.delete_category(self.gamespace, category_id) raise a.Redirect("categories", message="Category has been deleted") async def get(self, category_id): category = await self.application.events.get_category(self.gamespace, category_id) scheme_json = category.scheme result = { "scheme": scheme_json, "category_name": category.name } return result def render(self, data): return [ a.breadcrumbs([ a.link("events", "Events"), a.link("categories", "Categories") ], data["category_name"]), a.form("Category template", fields={ "scheme": a.field("scheme", "json", "primary"), "category_name": a.field("Category name (ID)", "text", "primary", "non-empty") }, methods={ "update": a.method("Update", "primary"), }, data=data), a.split([ a.notice( "About templates", "Each category template has a common template shared across categories. " "Category template inherits a common template." ), a.form("Danger", fields={ "danger": a.field("This cannot be undone! The events of this category will be also deleted! " "Type 'confirm' to do this.", "text", "danger", "non-empty") }, methods={ "delete": a.method("Delete category", "danger"), }, data=data), ]), a.links("Navigate", [ a.link("events", "Go back", icon="chevron-left"), a.link("common", "Edit common template", icon="flask"), a.link("events", "See events of this category", category=self.context.get("category_id")), a.link("https://spacetelescope.github.io/understanding-json-schema/index.html", "See docs", icon="book") ]) ] def access_scopes(self): return ["event_admin"] async def update(self, scheme, category_name): category_id = self.context.get("category_id") try: scheme_data = ujson.loads(scheme) except (KeyError, ValueError): raise a.ActionError("Corrupted json") await self.application.events.update_category(self.gamespace, category_id, scheme_data, category_name) raise a.Redirect( "category", message="Category has been updated", category_id=category_id) class ChooseCategoryController(a.AdminController): async def apply(self, category): raise a.Redirect("new_event", category=category) async def get(self, category=None): categories = await self.application.events.list_categories(self.gamespace) return { "category": category, "categories": { cat.category_id: cat.name for cat in categories } } def render(self, data): return [ a.breadcrumbs([ a.link("events", "Events") ], "Choose category"), a.form( title="Choose event category to create event of", fields={ "category": a.field( "Select category", "select", "primary", values=data["categories"] ) }, methods={ "apply": a.method("Proceed", "primary") }, data=data ), a.links("Navigation", links=[ a.link("events", "Go back", icon="chevron-left"), a.link("categories", "Manage categories", "list-alt") ]) ] def access_scopes(self): return ["event_admin"] class CommonController(a.AdminController): async def get(self): scheme = await self.application.events.get_common_scheme(self.gamespace) result = { "scheme": scheme } return result def render(self, data): return [ a.breadcrumbs([ a.link("events", "Events"), a.link("categories", "Categories") ], "Common template"), a.form("Common template", fields={ "scheme": a.field("scheme", "json", "primary") }, methods={ "update": a.method("Update", "primary"), }, data=data), a.links("Navigate", [ a.link("@back", "Go back", icon="chevron-left"), a.link("https://spacetelescope.github.io/understanding-json-schema/index.html", "See docs", icon="book") ]) ] def access_scopes(self): return ["event_admin"] async def update(self, scheme): try: scheme_data = ujson.loads(scheme) except (KeyError, ValueError): raise a.ActionError("Corrupted json") await self.application.events.update_common_scheme(self.gamespace, scheme_data) raise a.Redirect("common", message="Common template has been updated") class EventController(a.AdminController): async def delete(self, **ignored): event_id = self.context.get("event_id") try: event = await self.application.events.get_event(self.gamespace, event_id) except EventNotFound: raise a.ActionError("No such event") await self.application.events.delete_event(self.gamespace, event_id) raise a.Redirect( "events", message="Event has been deleted", category=event.category_id) async def get(self, event_id): events = self.application.events try: event = await events.get_event(self.gamespace, event_id) except EventNotFound: raise a.ActionError("Event was not found.") category_id = event.category_id category_name = event.category enabled = "true" if event.enabled else "false" tournament = "true" if event.tournament else "false" clustered = "true" if event.clustered else "false" group = "true" if event.group else "false" start_dt = str(event.time_start) end_dt = str(event.time_end) end_action = str(event.end_action) common_scheme = await events.get_common_scheme(self.gamespace) category = await events.get_category(self.gamespace, category_id) category_scheme = category.scheme scheme = common_scheme.copy() update(scheme, category_scheme) return { "enabled": enabled, "tournament": tournament, "clustered": clustered, "group": group, "event": event, "start_dt": start_dt, "end_dt": end_dt, "event_data": event.data, "scheme": scheme, "category": category_id, "category_name": category_name, "end_action": end_action } def render(self, data): category = data["category"] return [ a.breadcrumbs([ a.link("events", "Events", category=category), ], "Event"), a.form( title="Event editor", fields={ "event_data": a.field( "Event properties", "dorn", "primary", schema=data["scheme"], order=8 ), "enabled": a.field("Is event enabled", "switch", "primary", order=3), "tournament": a.field("Is tournament enabled (e.g. players will be ranked)", "switch", "primary", order=4), "clustered": a.field("Is tournament's leaderboard clustered", "switch", "primary", readonly=True, order=5), "group": a.field("Is event group-based", "switch", "primary", readonly=True, order=6), "end_action": a.field("Action Once Event Is Complete", "select", "primary", order=7, values={ EventEndAction.NONE: "Do nothing", EventEndAction.MESSAGE: "Send Message", EventEndAction.EXEC: "Call Exec Function" }, description=EVENT_END_ACTION_DESCRIPTION), "category_name": a.field("Category", "readonly", "primary"), "start_dt": a.field("Start date", "date", "primary", order=1), "end_dt": a.field("End date", "date", "primary", order=2) }, methods={ "save": a.method("Save", "primary"), "delete": a.method("Delete event", "danger") }, data=data ), a.links("Navigate", [ a.link("events", "Go back", icon="chevron-left", category=category), a.link("category", "Edit category", icon="list-alt", category_id=category), a.link("new_event", "Clone event", icon="clone", clone=self.context.get("event_id"), category=data.get("category")) ]) ] @validate(event_data="load_json_dict", start_dt="datetime", end_dt="datetime", enabled="bool", tournament="bool", end_action="str") async def save(self, event_data, start_dt, end_dt, enabled=False, tournament=False, end_action=EventEndAction.NONE, **ignore): event_id = self.context.get("event_id") events = self.application.events try: event = await events.get_event(self.gamespace, event_id) except EventNotFound: raise a.ActionError("Event was not found.") flags = event.flags flags.set(EventFlags.TOURNAMENT, tournament) end_action = EventEndAction(end_action) await events.update_event( self.gamespace, event_id, enabled, flags, event_data, start_dt, end_dt, end_action) raise a.Redirect( "event", message="Event has been updated", event_id=event_id) def access_scopes(self): return ["event_admin"] class EventsController(a.AdminController): EVENTS_IN_PAGE = 20 async def apply(self, category=None): if not category: raise a.Redirect("choose_category") raise a.Redirect("events", category=category) @validate(category="int", page="int") async def get(self, category=0, page=1): categories = await self.application.events.list_categories( self.gamespace) events, pages = await self.application.events.list_paged_events( self.gamespace, EventsController.EVENTS_IN_PAGE, page, category_id=category) cats = { cat.category_id: cat.name for cat in categories } cats[0] = "< Select >" return { "events": events, "category": category, "categories": cats, "pages": pages } def render(self, data): tbl_rows = [] for event in data["events"]: title = "unknown" description = "unknown" if "title" in event.data: title_object = event.data["title"] title = title_object.get("EN", title_object.get("en", "unknown")) elif "name" in event.data: title_object = event.data["name"] title = title_object.get("EN", title_object.get("en", "unknown")) if "description" in event.data: description_object = event.data["description"] description = description_object.get("EN", description_object.get("en", "unknown")) tbl_tr = { "edit": [a.link("event", event.item_id, icon="calendar", event_id=event.item_id)], "enabled": "yes" if event.enabled else "no", "tournament": "yes" + (" (clustered)" if event.clustered else "") if event.tournament else "no", "name": title[:32], "description": description[:32], "category": event.category, "dates": str(event.time_start) + " -<br> " + str(event.time_end), "controls": [a.button("event", "Delete", "danger", _method="delete", event_id=event.item_id)] } tbl_rows.append(tbl_tr) return [ a.breadcrumbs([], "Events"), a.form( title="Filters", fields={ "category": a.field( "Category", "select", "primary", values=data["categories"] ) }, methods={ "apply": a.method("Apply", "primary") }, data=data ), a.content("Events", [ { "id": "edit", "title": "Edit" }, { "id": "name", "title": "Name" }, { "id": "description", "title": "Description" }, { "id": "enabled", "title": "Enabled" }, { "id": "tournament", "title": "Tournament" }, { "id": "category", "title": "Category" }, { "id": "dates", "title": "Dates" }, { "id": "controls", "title": "Controls" }], tbl_rows, "default"), a.pages(data["pages"]), a.links("Navigation", links=[ a.link("choose_category", "Create new event", "plus", category=self.context.get("category", "0")), a.link("categories", "Manage categories", "list-alt") ]) ] def access_scopes(self): return ["event_admin"] class NewCategoryController(a.AdminController): async def create(self, scheme, category_name): try: scheme_data = ujson.loads(scheme) except (KeyError, ValueError): raise a.ActionError("Corrupted json") category_id = await self.application.events.create_category(self.gamespace, category_name, scheme_data) raise a.Redirect( "category", message="Category has been created", category_id=category_id) def render(self, data): return [ a.breadcrumbs([ a.link("events", "Events"), a.link("categories", "Categories") ], "New category"), a.form("Category template", fields={ "scheme": a.field("scheme", "json", "primary"), "category_name": a.field("Category name (ID)", "text", "primary", "non-empty") }, methods={ "create": a.method("Create", "primary"), }, data={"scheme": {}}), a.notice( "About templates", "Each category template has a common template shared across categories. " "Category template inherits a common template." ), a.links("Navigate", [ a.link("categories", "Go back", icon="chevron-left"), a.link("common", "Edit common template", icon="flask"), a.link("events", "See events of this category", category=self.context.get("category_id")), a.link("https://spacetelescope.github.io/understanding-json-schema/index.html", "See docs", icon="book") ]) ] def access_scopes(self): return ["event_admin"] class NewEventController(a.AdminController): @validate(event_data="load_json_dict", start_dt="datetime", end_dt="datetime", enabled="bool", tournament="bool", clustered="bool", group="bool", end_action="str_name") async def create(self, event_data, start_dt, end_dt, enabled=False, tournament=False, clustered=False, group=False, end_action=EventEndAction.NONE, **ignore): category_id = self.context.get("category") flags = EventFlags() if tournament: flags.set(EventFlags.TOURNAMENT) if clustered: flags.set(EventFlags.CLUSTERED) if group: flags.set(EventFlags.GROUP) end_action = EventEndAction(end_action) try: event_id = await self.application.events.create_event( self.gamespace, category_id, enabled, flags, event_data, start_dt, end_dt, end_action) except CategoryNotFound: raise a.ActionError("Category not found") raise a.Redirect( "event", message="Event has been created", event_id=event_id) @validate(category="int", clone="int") async def get(self, category, clone=None): events = self.application.events common_scheme = await events.get_common_scheme(self.gamespace) category = await events.get_category(self.gamespace, category) category_name = category.name category_scheme = category.scheme def update(d, u): for k, v in u.items(): if isinstance(v, collections.Mapping): r = update(d.get(k, {}), v) d[k] = r else: d[k] = u[k] return d scheme = common_scheme.copy() update(scheme, category_scheme) event_data = None start_dt = None end_dt = None enabled = "true" tournament = "false" clustered = "false" group = "false" end_action = EventEndAction.NONE if clone: try: event = await events.get_event(self.gamespace, clone) except EventNotFound: raise a.ActionError("Event was not found.") event_data = event.data enabled = "true" if event.enabled else "false" tournament = "true" if event.tournament else "false" clustered = "true" if event.clustered else "false" group = "true" if event.group else "false" start_dt = str(event.time_start) end_dt = str(event.time_end) end_action = str(event.end_action) return { "scheme": scheme, "enabled": enabled, "tournament": tournament, "clustered": clustered, "group": group, "category_name": category_name, "event_data": event_data, "start_dt": start_dt, "end_dt": end_dt, "end_action": end_action } def render(self, data): category = self.context.get("category") return [ a.breadcrumbs([ a.link("events", "Events", category=category), ], "New event"), a.form( title="New event (of category " + data.get("category_name") + ")", fields={ "event_data": a.field( "Event properties", "dorn", "primary", schema=data["scheme"], order=8 ), "enabled": a.field("Is event enabled", "switch", "primary", order=3), "tournament": a.field("Is tournament enabled (e.g. players will be ranked)", "switch", "primary", order=4), "clustered": a.field("Is tournament's leaderboard clustered", "switch", "primary", order=5, description="Cannot be changed later"), "group": a.field("In even group-based", "switch", "primary", order=6, description="Cannot be changed later"), "end_action": a.field("Action Once Event Is Complete", "select", "primary", order=7, values={ EventEndAction.NONE: "Do nothing", EventEndAction.MESSAGE: "Send Message", EventEndAction.EXEC: "Call Exec Function" }, description=EVENT_END_ACTION_DESCRIPTION), "start_dt": a.field("Start date", "date", "primary", "non-empty", order=1), "end_dt": a.field("End date", "date", "primary", "non-empty", order=2) }, methods={ "create": a.method("Create", "primary") }, data=data ), a.links("Navigate", [ a.link("events", "Go back", icon="chevron-left", category=category), a.link("category", "Edit category", icon="list-alt", category_id=category) ]) ] def access_scopes(self): return ["event_admin"] class RootAdminController(a.AdminController): def render(self, data): return [ a.links("Events service", [ a.link("events", "Edit events", icon="wrench") ]) ] def access_scopes(self): return ["event_admin"]
36.392857
124
0.5294
2,457
24,456
5.171754
0.111518
0.027701
0.033997
0.035256
0.65279
0.592744
0.551507
0.522625
0.466121
0.413866
0
0.001618
0.342738
24,456
671
125
36.447094
0.788914
0
0
0.509158
0
0.016484
0.236639
0.018851
0
0
0
0
0
1
0.034799
false
0
0.009158
0.027473
0.10989
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
bd1d74e5ac367e134c8e0a19a4b10cfe4ee5fb88
15,704
py
Python
main.py
opt12/gym-jsbsim-eee
fa61d0d4679fd65b5736fc562fe268714b4e08d8
[ "MIT" ]
7
2020-11-10T07:33:40.000Z
2021-06-23T07:25:43.000Z
main.py
opt12/gym-jsbsim-eee
fa61d0d4679fd65b5736fc562fe268714b4e08d8
[ "MIT" ]
null
null
null
main.py
opt12/gym-jsbsim-eee
fa61d0d4679fd65b5736fc562fe268714b4e08d8
[ "MIT" ]
5
2020-07-12T00:10:59.000Z
2021-06-22T09:13:13.000Z
import sys, os # sys.path.append(os.path.join(os.path.dirname(__file__)) #TODO: Is this a good idea? Dunno! It works! # print(os.path.join(os.path.dirname(__file__))) import argparse import markov_pilot.environment.properties as prp from markov_pilot.environment.environment import NoFGJsbSimEnv_multi, JsbSimEnv_multi from markov_pilot.wrappers.episodePlotterWrapper import EpisodePlotterWrapper_multi from markov_pilot.wrappers.varySetpointsWrapper import VarySetpointsWrapper from markov_pilot.tasks.tasks import SingleChannel_FlightTask, SingleChannel_MinimumProps_Task from reward_funcs import _make_base_reward_components, make_angular_integral_reward_components, make_sideslip_angle_reward_components from markov_pilot.agents.AgentTrainer import DDPG_AgentTrainer, PID_AgentTrainer, PidParameters, MADDPG_AgentTrainer from markov_pilot.agents.agent_container import AgentContainer, AgentSpec from markov_pilot.agents.train import perform_training from markov_pilot.helper.lab_journal import LabJournal from markov_pilot.helper.load_store import restore_agent_container_from_journal, restore_env_from_journal, save_test_run from markov_pilot.testbed.evaluate_training import evaluate_training ## define the initial setpoints target_path_angle_gamma_deg = -6.5 target_kias = 92 target_roll_angle_phi_deg = -15 target_sideslip_angle_beta_deg = 0 def parse_args(): #used https://github.com/openai/maddpg/ as a basis parser = argparse.ArgumentParser("Reinforcement Learning experiments for multiagent environments") # Environment parser.add_argument("--max-episode-len-sec", type=int, default=120, help="maximum episode length in seconds (steps = seconds*interaction frequ.)") parser.add_argument("--num-steps", type=int, default=30000, help="number of training steps to perform") parser.add_argument("--interaction-frequency", type=float, default=5, help="frequency of agent interactions with the environment") # Core training parameters parser.add_argument("--lr_actor", type=float, default=1e-4, help="learning rate for the actor training Adam optimizer") parser.add_argument("--lr_critic", type=float, default=1e-3, help="learning rate for the critic training Adam optimizer") parser.add_argument("--tau", type=float, default=1e-3, help="target network adaptation factor") parser.add_argument("--gamma", type=float, default=0.99, help="discount factor") parser.add_argument("--batch-size", type=int, default=64, help="number of episodes to optimize at the same time") parser.add_argument("--replay-size", type=int, default=1000000, help="size of the replay buffer") # Checkpointing parser.add_argument("--exp-name", type=str, default='Default_Experiment', help="name of the experiment") parser.add_argument("--save-dir", type=str, default="./tmp/policy/", help="directory in which training state and model should be saved") parser.add_argument("--save-rate", type=int, default=1000, help="save model once every time this many episodes are completed") parser.add_argument("--load-dir", type=str, default="", help="directory in which training state and model are loaded") # Evaluation parser.add_argument("--restore", nargs='+', type=int, default=False) #to restore agents and env from lab-journal lines given as list and continue training parser.add_argument("--play", nargs='+', type=int, default=False) #to play with agents and env restored from lab-journal lines parser.add_argument("--best", type=bool, default=False) #TODO: when given, the first line from restore or play will be used to restore the environment and the best agents for that run will be loaded parser.add_argument("--flightgear", type=bool, default=False) #TODO: when given, together with --play [lines] the environment will be replaced with the flight-gear enabled and the player will render to FlightGear parser.add_argument("--testing-iters", type=int, default=2000, help="number of steps before running a performance test") parser.add_argument("--plots-dir", type=str, default="./learning_curves/", help="directory where plot data is saved") parser.add_argument("--base-dir", type=str, default="./", help="directory the test_run date is saved") return parser.parse_args() def setup_env(arglist) -> NoFGJsbSimEnv_multi: agent_interaction_freq = arglist.interaction_frequency episode_time_s=arglist.max_episode_len_sec ## define the initial conditions initial_path_angle_gamma_deg = target_path_angle_gamma_deg + 3 initial_roll_angle_phi_deg = target_roll_angle_phi_deg + 10 initial_sideslip_angle_beta_deg = 0 initial_fwd_speed_KAS = 80 initial_aoa_deg = 1.0 initial_altitude_ft = 6000 elevator_AT_for_PID = SingleChannel_FlightTask('elevator', prp.elevator_cmd, {prp.flight_path_deg: target_path_angle_gamma_deg}, make_base_reward_components=_make_base_reward_components, #pass this in here as otherwise, the restore form disk gets nifty integral_limit = 100) #integral_limit: self.Ki * dt * int <= output_limit --> int <= 1/0.2*6.5e-2 = 77 aileron_AT_for_PID = SingleChannel_FlightTask('aileron', prp.aileron_cmd, {prp.roll_deg: initial_roll_angle_phi_deg}, make_base_reward_components=_make_base_reward_components, #pass this in here as otherwise, the restore form disk gets nifty integral_limit = 100) #integral_limit: self.Ki * dt * int <= output_limit --> int <= 1/0.2*1e-2 = 500 rudder_AT_for_PID = SingleChannel_FlightTask('rudder', prp.rudder_cmd, {prp.sideslip_deg: 0}, max_allowed_error= 10, make_base_reward_components=_make_base_reward_components, #pass this in here as otherwise, the restore form disk gets nifty integral_limit = 100) #integral_limit: self.Ki * dt * int <= output_limit --> int <= 1/0.2*1e-2 = 500 coop_flight_path_task = SingleChannel_FlightTask('flight_path_angle', prp.elevator_cmd, {prp.flight_path_deg: target_path_angle_gamma_deg}, presented_state=[prp.q_radps, prp.indicated_airspeed, prp.elevator_cmd, prp.rudder_cmd, prp.aileron_cmd], max_allowed_error= 30, make_base_reward_components= make_angular_integral_reward_components, integral_limit = 0.25) coop_banking_task = SingleChannel_FlightTask('banking_angle', prp.aileron_cmd, {prp.roll_deg: target_roll_angle_phi_deg}, presented_state=[prp.p_radps, prp.indicated_airspeed, prp.aileron_cmd, prp.elevator_cmd, prp.aileron_cmd], max_allowed_error= 60, make_base_reward_components= make_angular_integral_reward_components, integral_limit = 0.25) coop_sideslip_task = SingleChannel_FlightTask('sideslip_angle', prp.rudder_cmd, {prp.sideslip_deg: target_sideslip_angle_beta_deg}, presented_state=[prp.r_radps, prp.indicated_airspeed, prp.rudder_cmd, prp.aileron_cmd, prp.elevator_cmd, coop_banking_task.setpoint_value_props[0], coop_banking_task.setpoint_props[0]], #TODO: this relies on defining coop_banking_task before coop_sideslip_task :-() max_allowed_error= 30, make_base_reward_components= make_sideslip_angle_reward_components, integral_limit = 0.25) task_list = [coop_flight_path_task, coop_banking_task, coop_sideslip_task] env = NoFGJsbSimEnv_multi(task_list, agent_interaction_freq = agent_interaction_freq, episode_time_s = episode_time_s) env = EpisodePlotterWrapper_multi(env, output_props=[prp.sideslip_deg]) env.set_initial_conditions({ prp.initial_u_fps: 1.6878099110965*initial_fwd_speed_KAS , prp.initial_flight_path_deg: initial_path_angle_gamma_deg , prp.initial_roll_deg: initial_roll_angle_phi_deg , prp.initial_aoa_deg: initial_aoa_deg , prp.initial_altitude_ft: initial_altitude_ft }) #just an example, sane defaults are already set in env.__init()__ constructor env.set_meta_information(experiment_name = arglist.exp_name) return env def setup_container(task_list, arglist): agent_classes_dict = { 'PID': PID_AgentTrainer, 'MADDPG': MADDPG_AgentTrainer, 'DDPG': DDPG_AgentTrainer, } #for PID controllers we need an elaborated parameter set for each type pid_params = {'aileron': PidParameters(3.5e-2, 1e-2, 0.0), 'elevator': PidParameters( -5e-2, -6.5e-2, -1e-3), 'rudder': PidParameters( 0, 0, 0), #TODO: This parameter set just leaves the rudder alone. No actuation at all } params_aileron_pid_agent = { 'pid_params': pid_params['aileron'], 'writer': None, } params_elevator_pid_agent = { 'pid_params': pid_params['elevator'], 'writer': None, } params_rudder_pid_agent = { 'pid_params': pid_params['rudder'], 'writer': None, } #for the learning agents, a standard parameter set will do; the details will be learned params_DDPG_MADDPG_agent = { **vars(arglist), 'layer1_size': 400, 'layer2_size': 300, 'writer': None, } #for the learning agents, a standard parameter set will do; the details will be learned params_DDPG_MADDPG_agent_big_net = { **vars(arglist), 'layer1_size': 1200, 'layer2_size': 900, 'writer': None, } agent_spec_aileron_PID = AgentSpec('aileron', 'PID', ['banking_angle'], params_aileron_pid_agent) agent_spec_aileron_DDPG = AgentSpec('aileron', 'DDPG', ['banking_angle'], params_DDPG_MADDPG_agent) agent_spec_aileron_MADDPG = AgentSpec('aileron', 'MADDPG', ['banking_angle'], params_DDPG_MADDPG_agent) agent_spec_elevator_PID = AgentSpec('elevator', 'PID', ['flight_path_angle'], params_elevator_pid_agent) agent_spec_elevator_DDPG = AgentSpec('elevator', 'DDPG', ['flight_path_angle'], params_DDPG_MADDPG_agent) agent_spec_elevator_MADDPG = AgentSpec('elevator', 'MADDPG', ['flight_path_angle'], params_DDPG_MADDPG_agent) agent_spec_rudder_MADDPG = AgentSpec('rudder', 'MADDPG', ['sideslip_angle'], params_DDPG_MADDPG_agent_big_net) agent_spec_rudder_DDPG = AgentSpec('rudder', 'DDPG', ['sideslip_angle'], params_DDPG_MADDPG_agent) agent_spec_rudder_PID = AgentSpec('rudder', 'PID', ['sideslip_angle'], params_rudder_pid_agent) # #this is an example on how an assignment of an agent to multiple task could look like # #it is assumed, that the glidepath task is split into two subtasks: one to control the elevator, the other to monitor the glide angle set-point # #following this scheme e. g. combined speed control and glide path angle tasks could be defined to control elevator and thrust # params_DDPG_MADDPG_separated_agent = { # **vars(arglist), # 'layer1_size': 400, # 'layer2_size': 300, # 'task_reward_weights': [2, 14], # 'writer': None, # } # attention, the tasks are currently undefined in setup_env() # agent_spec_glide_path_MADDPG_separated_tasks = AgentSpec('elevator', 'MADDPG', ['elevator_actuation_task', 'glide_path_task'], params_DDPG_MADDPG_separated_agent) # the agent spec to train elevator and aileron control in one single agent (failed) # agent_spec_elevator_aileron_DDPG = AgentSpec('elevator_aileron', 'DDPG', ['flight_path_angle', 'banking_angle'], params_DDPG_MADDPG_agent) # the agent spec to train elevator and aileron and rudder control in one single agent (failed) # agent_spec_elevator_aileron_rudder_MADDPG = AgentSpec('ele_ail_rud', 'DDPG', ['flight_path_angle', 'banking_angle', 'sideslip_angle'], params_DDPG_MADDPG_agent_big_net) #Here we specify which agents shall be initiated; chose from the above defined single-specs # agent_spec = [agent_spec_elevator_MADDPG, agent_spec_aileron_MADDPG, agent_spec_rudder_MADDPG] # agent_spec = [agent_spec_elevator_aileron_DDPG] # agent_spec = [agent_spec_elevator_PID, agent_spec_aileron_PID, agent_spec_rudder_DDPG] # the best controller was yielded by training three cooperating DDPG agents agent_spec = [agent_spec_elevator_DDPG, agent_spec_aileron_DDPG, agent_spec_rudder_DDPG] task_list_n = task_list #we only need the task list to create the mapping. Anything else form the env is not interesting for the agent container. agent_container = AgentContainer.init_from_specs(task_list_n, agent_spec, agent_classes_dict, **vars(arglist)) return agent_container if __name__ == '__main__': arglist = parse_args() lab_journal = LabJournal(arglist.base_dir, arglist) # # uncomment the following lines when trying to restore from disk # restore_lines = [3463, 3488, 3489] # testing_env = restore_env_from_journal(lab_journal, restore_lines[0]) # # if needed, change to FlightGear enabled environment # # testing_env = restore_env_from_journal(lab_journal, restore_lines[0], target_environment='FG') # #alternatively, use setup_env() to create a new testin_env # # testing_env = setup_env(arglist) # # if needed, apply VarySetpointsWrapper to see wild action: # # testing_env = VarySetpointsWrapper(testing_env, prp.roll_deg, (-30, 30), (10, 120), (5, 30), (0.05, 0.1)) # # testing_env = VarySetpointsWrapper(testing_env, prp.flight_path_deg, (-9, -5.5), (10, 120), (5, 30), (0.05, 0.1)) # agent_container = restore_agent_container_from_journal(lab_journal, restore_lines) # # normally, we don't save the test runs restored from disk # # save_test_run(testing_env, agent_container, lab_journal, arglist) #use the testing_env here to have the save_path available in the evaluation # evaluate_training(agent_container, testing_env, lab_journal=lab_journal) #run the standardized test on the test_env # # if FligthGear rendering is desired, use this alternative # # evaluate_training(agent_container, testing_env, lab_journal=None, render_mode = 'flightgear') #run the standardized test on the test_env # # when restoring form disk, exit now. # exit(0) training_env = setup_env(arglist) testing_env = setup_env(arglist) #apply Varyetpoints to the training to increase the variance of training data training_env = VarySetpointsWrapper(training_env, prp.roll_deg, (-30, 30), (10, 30), (5, 30), (0.05, 0.5)) training_env = VarySetpointsWrapper(training_env, prp.flight_path_deg, (-10, -5.5), (10, 45), (5, 30), (0.05, 0.5)) training_env = VarySetpointsWrapper(training_env, prp.sideslip_deg, (-2, 2), (10, 45), (5, 30), (0.05, 0.5)) agent_container = setup_container(training_env.task_list, arglist) save_test_run(testing_env, agent_container, lab_journal, arglist) #use the testing_env here to have the save_path available in the evaluation perform_training(training_env, testing_env, agent_container, lab_journal, arglist) training_env.close() testing_env.close()
59.037594
219
0.707972
2,071
15,704
5.061806
0.201352
0.024897
0.032433
0.022894
0.409425
0.338167
0.26252
0.228084
0.178575
0.142898
0
0.020727
0.201223
15,704
265
220
59.260377
0.814971
0.310303
0
0.136054
0
0
0.139292
0.004097
0
0
0
0.003774
0
1
0.020408
false
0
0.095238
0
0.136054
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
bd1d8b232aa33e6da7911055afde86063303f3d6
19,781
py
Python
atm/core.py
HDI-Project/ATM
dde454a95e963a460843a61bbb44d18982984b17
[ "MIT" ]
554
2017-12-19T06:43:11.000Z
2022-03-26T04:24:55.000Z
atm/core.py
BTHUNTERCN/ATM
dde454a95e963a460843a61bbb44d18982984b17
[ "MIT" ]
128
2017-12-19T21:30:32.000Z
2021-04-19T17:03:39.000Z
atm/core.py
BTHUNTERCN/ATM
dde454a95e963a460843a61bbb44d18982984b17
[ "MIT" ]
140
2017-12-20T03:47:04.000Z
2022-03-17T01:50:24.000Z
# -*- coding: utf-8 -*- """Core ATM module. This module contains the ATM class, which is the one responsible for executing and orchestrating the main ATM functionalities. """ import logging import random import time from datetime import datetime, timedelta from operator import attrgetter from tqdm import tqdm from atm.constants import TIME_FMT, PartitionStatus, RunStatus from atm.database import Database from atm.method import Method from atm.worker import ClassifierError, Worker LOGGER = logging.getLogger(__name__) class ATM(object): _LOOP_WAIT = 5 def __init__( self, # SQL Conf dialect='sqlite', database='atm.db', username=None, password=None, host=None, port=None, query=None, # AWS Conf access_key=None, secret_key=None, s3_bucket=None, s3_folder=None, # Log Conf models_dir='models', metrics_dir='metrics', verbose_metrics=False, ): self.db = Database(dialect, database, username, host, port, query) self.aws_access_key = access_key self.aws_secret_key = secret_key self.s3_bucket = s3_bucket self.s3_folder = s3_folder self.models_dir = models_dir self.metrics_dir = metrics_dir self.verbose_metrics = verbose_metrics def add_dataset(self, train_path, test_path=None, name=None, description=None, class_column=None): """Add a new dataset to the Database. Args: train_path (str): Path to the training CSV file. It can be a local filesystem path, absolute or relative, or an HTTP or HTTPS URL, or an S3 path in the format ``s3://{bucket_name}/{key}``. Required. test_path (str): Path to the testing CSV file. It can be a local filesystem path, absolute or relative, or an HTTP or HTTPS URL, or an S3 path in the format ``s3://{bucket_name}/{key}``. Optional. If not given, the training CSV will be split in two parts, train and test. name (str): Name given to this dataset. Optional. If not given, a hash will be generated from the training_path and used as the Dataset name. description (str): Human friendly description of the Dataset. Optional. class_column (str): Name of the column that will be used as the target variable. Optional. Defaults to ``'class'``. Returns: Dataset: The created dataset. """ return self.db.create_dataset( train_path=train_path, test_path=test_path, name=name, description=description, class_column=class_column, aws_access_key=self.aws_access_key, aws_secret_key=self.aws_secret_key, ) def add_datarun(self, dataset_id, budget=100, budget_type='classifier', gridding=0, k_window=3, metric='f1', methods=['logreg', 'dt', 'knn'], r_minimum=2, run_per_partition=False, score_target='cv', priority=1, selector='uniform', tuner='uniform', deadline=None): """Register one or more Dataruns to the Database. The methods hyperparameters will be analyzed and Hyperpartitions generated from them. If ``run_per_partition`` is ``True``, one Datarun will be created for each Hyperpartition. Otherwise, a single one will be created for all of them. Args: dataset_id (int): Id of the Dataset which this Datarun will belong to. budget (int): Budget amount. Optional. Defaults to ``100``. budget_type (str): Budget Type. Can be 'classifier' or 'walltime'. Optional. Defaults to ``'classifier'``. gridding (int): ``gridding`` setting for the Tuner. Optional. Defaults to ``0``. k_window (int): ``k`` setting for the Selector. Optional. Defaults to ``3``. metric (str): Metric to use for the tuning and selection. Optional. Defaults to ``'f1'``. methods (list): List of methods to try. Optional. Defaults to ``['logreg', 'dt', 'knn']``. r_minimum (int): ``r_minimum`` setting for the Tuner. Optional. Defaults to ``2``. run_per_partition (bool): whether to create a separated Datarun for each Hyperpartition or not. Optional. Defaults to ``False``. score_target (str): Which score to use for the tuning and selection process. It can be ``'cv'`` or ``'test'``. Optional. Defaults to ``'cv'``. priority (int): Priority of this Datarun. The higher the better. Optional. Defaults to ``1``. selector (str): Type of selector to use. Optional. Defaults to ``'uniform'``. tuner (str): Type of tuner to use. Optional. Defaults to ``'uniform'``. deadline (str): Time deadline. It must be a string representing a datetime in the format ``'%Y-%m-%d %H:%M'``. If given, ``budget_type`` will be set to ``'walltime'``. Returns: Datarun: The created Datarun or list of Dataruns. """ if deadline: deadline = datetime.strptime(deadline, TIME_FMT) budget_type = 'walltime' elif budget_type == 'walltime': deadline = datetime.now() + timedelta(minutes=budget) run_description = '___'.join([tuner, selector]) target = score_target + '_judgment_metric' method_parts = {} for method in methods: # enumerate all combinations of categorical variables for this method method_instance = Method(method) method_parts[method] = method_instance.get_hyperpartitions() LOGGER.info('method {} has {} hyperpartitions'.format( method, len(method_parts[method]))) dataruns = list() if not run_per_partition: datarun = self.db.create_datarun( dataset_id=dataset_id, description=run_description, tuner=tuner, selector=selector, gridding=gridding, priority=priority, budget_type=budget_type, budget=budget, deadline=deadline, metric=metric, score_target=target, k_window=k_window, r_minimum=r_minimum ) dataruns.append(datarun) for method, parts in method_parts.items(): for part in parts: # if necessary, create a new datarun for each hyperpartition. # This setting is useful for debugging. if run_per_partition: datarun = self.db.create_datarun( dataset_id=dataset_id, description=run_description, tuner=tuner, selector=selector, gridding=gridding, priority=priority, budget_type=budget_type, budget=budget, deadline=deadline, metric=metric, score_target=target, k_window=k_window, r_minimum=r_minimum ) dataruns.append(datarun) # create a new hyperpartition in the database self.db.create_hyperpartition(datarun_id=datarun.id, method=method, tunables=part.tunables, constants=part.constants, categoricals=part.categoricals, status=PartitionStatus.INCOMPLETE) dataset = self.db.get_dataset(dataset_id) LOGGER.info('Dataruns created. Summary:') LOGGER.info('\tDataset ID: {}'.format(dataset.id)) LOGGER.info('\tTraining data: {}'.format(dataset.train_path)) LOGGER.info('\tTest data: {}'.format(dataset.test_path)) if run_per_partition: LOGGER.info('\tDatarun IDs: {}'.format( ', '.join(str(datarun.id) for datarun in dataruns))) else: LOGGER.info('\tDatarun ID: {}'.format(dataruns[0].id)) LOGGER.info('\tHyperpartition selection strategy: {}'.format(dataruns[0].selector)) LOGGER.info('\tParameter tuning strategy: {}'.format(dataruns[0].tuner)) LOGGER.info('\tBudget: {} ({})'.format(dataruns[0].budget, dataruns[0].budget_type)) return dataruns if run_per_partition else dataruns[0] def work(self, datarun_ids=None, save_files=True, choose_randomly=True, cloud_mode=False, total_time=None, wait=True, verbose=False): """Get unfinished Dataruns from the database and work on them. Check the ModelHub Database for unfinished Dataruns, and work on them as they are added. This process will continue to run until it exceeds total_time or there are no more Dataruns to process or it is killed. Args: datarun_ids (list): list of IDs of Dataruns to work on. If ``None``, this will work on any unfinished Dataruns found in the database. Optional. Defaults to ``None``. save_files (bool): Whether to save the fitted classifiers and their metrics or not. Optional. Defaults to True. choose_randomly (bool): If ``True``, work on all the highest-priority dataruns in random order. Otherwise, work on them in sequential order (by ID). Optional. Defaults to ``True``. cloud_mode (bool): Save the models and metrics in AWS S3 instead of locally. This option works only if S3 configuration has been provided on initialization. Optional. Defaults to ``False``. total_time (int): Total time to run the work process, in seconds. If ``None``, continue to run until interrupted or there are no more Dataruns to process. Optional. Defaults to ``None``. wait (bool): If ``True``, wait for more Dataruns to be inserted into the Database once all have been processed. Otherwise, exit the worker loop when they run out. Optional. Defaults to ``False``. verbose (bool): Whether to be verbose about the process. Optional. Defaults to ``True``. """ start_time = datetime.now() # main loop while True: # get all pending and running dataruns, or all pending/running dataruns # from the list we were given dataruns = self.db.get_dataruns(include_ids=datarun_ids, ignore_complete=True) if not dataruns: if wait: LOGGER.debug('No dataruns found. Sleeping %d seconds and trying again.', self._LOOP_WAIT) time.sleep(self._LOOP_WAIT) continue else: LOGGER.info('No dataruns found. Exiting.') break # either choose a run randomly between priority, or take the run with the lowest ID if choose_randomly: run = random.choice(dataruns) else: run = sorted(dataruns, key=attrgetter('id'))[0] # say we've started working on this datarun, if we haven't already self.db.mark_datarun_running(run.id) LOGGER.info('Computing on datarun %d' % run.id) # actual work happens here worker = Worker(self.db, run, save_files=save_files, cloud_mode=cloud_mode, aws_access_key=self.aws_access_key, aws_secret_key=self.aws_secret_key, s3_bucket=self.s3_bucket, s3_folder=self.s3_folder, models_dir=self.models_dir, metrics_dir=self.metrics_dir, verbose_metrics=self.verbose_metrics) try: if run.budget_type == 'classifier': pbar = tqdm( total=run.budget, ascii=True, initial=run.completed_classifiers, disable=not verbose ) while run.status != RunStatus.COMPLETE: worker.run_classifier() run = self.db.get_datarun(run.id) if verbose and run.completed_classifiers > pbar.last_print_n: pbar.update(run.completed_classifiers - pbar.last_print_n) pbar.close() elif run.budget_type == 'walltime': pbar = tqdm( disable=not verbose, ascii=True, initial=run.completed_classifiers, unit=' Classifiers' ) while run.status != RunStatus.COMPLETE: worker.run_classifier() run = self.db.get_datarun(run.id) # Refresh the datarun object. if verbose and run.completed_classifiers > pbar.last_print_n: pbar.update(run.completed_classifiers - pbar.last_print_n) pbar.close() except ClassifierError: # the exception has already been handled; just wait a sec so we # don't go out of control reporting errors LOGGER.error('Something went wrong. Sleeping %d seconds.', self._LOOP_WAIT) time.sleep(self._LOOP_WAIT) elapsed_time = (datetime.now() - start_time).total_seconds() if total_time is not None and elapsed_time >= total_time: LOGGER.info('Total run time for worker exceeded; exiting.') break def run(self, train_path, test_path=None, name=None, description=None, class_column='class', budget=100, budget_type='classifier', gridding=0, k_window=3, metric='f1', methods=['logreg', 'dt', 'knn'], r_minimum=2, run_per_partition=False, score_target='cv', selector='uniform', tuner='uniform', deadline=None, priority=1, save_files=True, choose_randomly=True, cloud_mode=False, total_time=None, verbose=True): """Create a Dataset and a Datarun and then work on it. Args: train_path (str): Path to the training CSV file. It can be a local filesystem path, absolute or relative, or an HTTP or HTTPS URL, or an S3 path in the format ``s3://{bucket_name}/{key}``. Required. test_path (str): Path to the testing CSV file. It can be a local filesystem path, absolute or relative, or an HTTP or HTTPS URL, or an S3 path in the format ``s3://{bucket_name}/{key}``. Optional. If not given, the training CSV will be split in two parts, train and test. name (str): Name given to this dataset. Optional. If not given, a hash will be generated from the training_path and used as the Dataset name. description (str): Human friendly description of the Dataset. Optional. class_column (str): Name of the column that will be used as the target variable. Optional. Defaults to ``'class'``. budget (int): Budget amount. Optional. Defaults to ``100``. budget_type (str): Budget Type. Can be 'classifier' or 'walltime'. Optional. Defaults to ``'classifier'``. gridding (int): ``gridding`` setting for the Tuner. Optional. Defaults to ``0``. k_window (int): ``k`` setting for the Selector. Optional. Defaults to ``3``. metric (str): Metric to use for the tuning and selection. Optional. Defaults to ``'f1'``. methods (list): List of methods to try. Optional. Defaults to ``['logreg', 'dt', 'knn']``. r_minimum (int): ``r_minimum`` setting for the Tuner. Optional. Defaults to ``2``. run_per_partition (bool): whether to create a separated Datarun for each Hyperpartition or not. Optional. Defaults to ``False``. score_target (str): Which score to use for the tuning and selection process. It can be ``'cv'`` or ``'test'``. Optional. Defaults to ``'cv'``. priority (int): Priority of this Datarun. The higher the better. Optional. Defaults to ``1``. selector (str): Type of selector to use. Optional. Defaults to ``'uniform'``. tuner (str): Type of tuner to use. Optional. Defaults to ``'uniform'``. deadline (str): Time deadline. It must be a string representing a datetime in the format ``'%Y-%m-%d %H:%M'``. If given, ``budget_type`` will be set to ``'walltime'``. verbose (bool): Whether to be verbose about the process. Optional. Defaults to ``True``. Returns: Datarun: The created Datarun or list of Dataruns. """ dataset = self.add_dataset(train_path, test_path, name, description, class_column) datarun = self.add_datarun( dataset.id, budget, budget_type, gridding, k_window, metric, methods, r_minimum, run_per_partition, score_target, priority, selector, tuner, deadline ) if run_per_partition: datarun_ids = [_datarun.id for _datarun in datarun] else: datarun_ids = [datarun.id] if verbose: print('Processing dataset {}'.format(train_path)) self.work( datarun_ids, save_files, choose_randomly, cloud_mode, total_time, False, verbose=verbose ) dataruns = self.db.get_dataruns( include_ids=datarun_ids, ignore_complete=False, ignore_pending=True ) if run_per_partition: return dataruns elif len(dataruns) == 1: return dataruns[0] def load_model(self, classifier_id): """Load a Model from the Database. Args: classifier_id (int): Id of the Model to load. Returns: Model: The loaded model instance. """ return self.db.get_classifier(classifier_id).load_model()
40.954451
95
0.547495
2,186
19,781
4.830741
0.16011
0.051515
0.057955
0.009659
0.492045
0.479356
0.464583
0.464583
0.452083
0.443182
0
0.005054
0.36985
19,781
482
96
41.039419
0.842118
0.408473
0
0.25974
0
0
0.059423
0
0
0
0
0
0
1
0.025974
false
0.004329
0.04329
0
0.099567
0.021645
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
bd225009cbeb540acf88e600f37e2294b3fa16ce
742
py
Python
dbcollection/datasets/leeds_sports_pose/leeds_sports_pose/__init__.py
dbcollection/dbcollection
a36f57a11bc2636992e26bba4406914162773dd9
[ "MIT" ]
23
2017-09-20T19:23:26.000Z
2022-01-09T16:18:11.000Z
dbcollection/datasets/leeds_sports_pose/leeds_sports_pose/__init__.py
dbcollection/dbcollection
a36f57a11bc2636992e26bba4406914162773dd9
[ "MIT" ]
148
2017-07-23T14:28:28.000Z
2022-01-13T00:35:17.000Z
dbcollection/datasets/leeds_sports_pose/leeds_sports_pose/__init__.py
dbcollection/dbcollection
a36f57a11bc2636992e26bba4406914162773dd9
[ "MIT" ]
6
2018-01-12T15:47:57.000Z
2021-02-09T06:32:39.000Z
""" Leeds Sports Pose (LSP) Dataset download/process functions. """ from dbcollection.datasets import BaseDataset from .keypoints import Keypoints, KeypointsOriginal urls = ( 'http://sam.johnson.io/research/lsp_dataset_original.zip', { 'url': 'http://sam.johnson.io/research/lsp_dataset.zip', 'extract_dir': 'lsp_dataset', }, ) keywords = ('image_processing', 'detection', 'human_pose', 'keypoints') tasks = { "keypoints": Keypoints, "keypoints_original": KeypointsOriginal, } default_task = 'keypoints' class Dataset(BaseDataset): """Leeds Sports Pose (LSP) Dataset preprocessing/downloading functions.""" urls = urls keywords = keywords tasks = tasks default_task = default_task
24.733333
78
0.699461
78
742
6.512821
0.474359
0.098425
0.059055
0.070866
0.232283
0.133858
0.133858
0
0
0
0
0
0.172507
742
29
79
25.586207
0.827362
0.172507
0
0
0
0
0.342762
0
0
0
0
0
0
1
0
false
0
0.1
0
0.35
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
bd25018110a4f497d278f0c5fcc41f39296d2cf6
3,505
py
Python
flydra_analysis/flydra_analysis/a2/check_mainbrain_h5_contiguity.py
elhananby/flydra
09b86859b1863700cdea0bbcdd4758da6c83930b
[ "Apache-2.0", "MIT" ]
45
2017-08-25T06:46:56.000Z
2021-08-29T16:42:49.000Z
flydra_analysis/flydra_analysis/a2/check_mainbrain_h5_contiguity.py
elhananby/flydra
09b86859b1863700cdea0bbcdd4758da6c83930b
[ "Apache-2.0", "MIT" ]
7
2017-10-16T10:46:20.000Z
2020-12-03T16:42:55.000Z
flydra_analysis/flydra_analysis/a2/check_mainbrain_h5_contiguity.py
elhananby/flydra
09b86859b1863700cdea0bbcdd4758da6c83930b
[ "Apache-2.0", "MIT" ]
21
2018-04-11T09:06:40.000Z
2021-12-26T23:38:40.000Z
#!/usr/bin/env python from __future__ import print_function import tables import argparse import numpy as np import sys def check_mainbrain_h5_contiguity( filename, slow_but_less_ram=False, shortcircuit=False, verbose=False ): failed_obj_ids = [] if verbose: print("opening %r" % filename) with tables.open_file(filename, mode="r") as f: table = f.root.kalman_estimates all_obj_ids = table.cols.obj_id[:] obj_ids = np.unique(all_obj_ids) if verbose: print("checking %d obj_ids" % len(obj_ids)) if not slow_but_less_ram: # faster but more RAM all_frames = table.cols.frame[:] for obj_id in obj_ids: frame = all_frames[all_obj_ids == obj_id] diff = frame[1:] - frame[:-1] if np.any(diff != 1): failed_obj_ids.append(obj_id) if verbose: print("failed: %d" % obj_id) if shortcircuit: return failed_obj_ids else: # slower but more memory efficient for obj_id in obj_ids: cond = all_obj_ids == obj_id idxs = np.nonzero(cond)[0] frame = table.read_coordinates(idxs, field="frame") diff = frame[1:] - frame[:-1] if np.any(diff != 1): failed_obj_ids.append(obj_id) if verbose: print("failed: %d" % obj_id) if shortcircuit: return failed_obj_ids return failed_obj_ids def main(): parser = argparse.ArgumentParser() parser.add_argument("file", type=str, default=None, help="file to check") parser.add_argument( "--verbose", action="store_true", default=False, help="print stuff" ) parser.add_argument( "--findall", action="store_true", default=False, help="continue after first hit (only make sense with verbose or output-log)", ) parser.add_argument( "--slow-but-less-ram", action="store_true", default=False, help="print stuff" ) parser.add_argument( "--no-output-log", action="store_true", default=False, help="do not print a final summary", ) options = parser.parse_args() failed_obj_ids = check_mainbrain_h5_contiguity( filename=options.file, slow_but_less_ram=options.slow_but_less_ram, shortcircuit=not options.findall, verbose=options.verbose, ) if len(failed_obj_ids): if not options.no_output_log: print("%s some objects failed: %r" % (options.file, failed_obj_ids)) sys.exit(1) else: if not options.no_output_log: print("%s no objects failed" % options.file) sys.exit(0) def cls(root="/mnt/strawscience/data/auto_pipeline/raw_archive/by_date"): """Generates example command lines amenable to use, for example, with GNU parallel.""" from itertools import product import os.path as op for year, month in product( (2015, 2014, 2013, 2012), ["%02d" % d for d in xrange(1, 13)] ): print( "find %s -iname '*.mainbrain.h5' " "-exec flydra_analysis_check_mainbrain_h5_contiguity --findall {} \; " "&>~/%d-%s.log" % (op.join(root, str(year), month), year, month) ) if __name__ == "__main__": main()
33.066038
90
0.575178
437
3,505
4.389016
0.343249
0.056309
0.056309
0.036496
0.324296
0.253389
0.20438
0.20438
0.17414
0.17414
0
0.014178
0.315835
3,505
105
91
33.380952
0.785655
0.043937
0
0.366667
0
0
0.152558
0.030212
0
0
0
0
0
1
0.033333
false
0
0.077778
0
0.144444
0.122222
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
bd2629883944c343ab1a2e4d82cafb22e7d45e13
2,304
py
Python
reader.py
Birdulon/html-mangareader
dbdbbaa454125896b9de2d918f2ab59a3c06adc2
[ "MIT" ]
1
2021-05-08T14:58:17.000Z
2021-05-08T14:58:17.000Z
reader.py
Birdulon/html-mangareader
dbdbbaa454125896b9de2d918f2ab59a3c06adc2
[ "MIT" ]
null
null
null
reader.py
Birdulon/html-mangareader
dbdbbaa454125896b9de2d918f2ab59a3c06adc2
[ "MIT" ]
null
null
null
import sys import traceback import webbrowser from argparse import ArgumentParser, Namespace from os import path from tkinter import Tk, messagebox, filedialog from mangareader.mangarender import extract_render from mangareader import templates from time import sleep def parse_args() -> Namespace: parser = ArgumentParser(description='Mangareader') parser.add_argument('path', nargs='?', help='Path to image, folder, or comic book archive') parser.add_argument('--no-browser', action='store_true') return parser.parse_args() def main() -> None: args = parse_args() if not args.path: imagetypes = ';'.join(f'*.{ext}' for ext in templates.DEFAULT_IMAGETYPES) archivetypes = ';'.join( f'*.{ext}' for ext in (*templates.ZIP_TYPES, *templates.RAR_TYPES, *templates._7Z_TYPES) ) filetypes = ( ('Supported files', ';'.join((imagetypes, archivetypes))), ('Images', imagetypes), ('Comic book archive', archivetypes), ('All files', '*'), ) target_path = filedialog.askopenfilename( filetypes=filetypes, title='Open Image - Mangareader', ) if not target_path: return else: target_path = args.path working_dir = getattr(sys, '_MEIPASS', path.abspath(path.dirname(__file__))) lib_dir = f'{working_dir}/mangareader' with open(f'{working_dir}/version', encoding='utf-8') as version_file: version = version_file.read().strip() try: boot_path = extract_render( path=target_path, version=version, doc_template_path=f'{lib_dir}/doc.template.html', page_template_path=f'{lib_dir}/img.template.html', boot_template_path=f'{lib_dir}/boot.template.html', asset_paths=(f'{lib_dir}/{asset}' for asset in templates.ASSETS), img_types=templates.DEFAULT_IMAGETYPES, ) if args.no_browser: print(boot_path) else: webbrowser.open(boot_path.as_uri()) except Exception as e: Tk().withdraw() messagebox.showerror( 'Mangareader encountered an error: ' + type(e).__name__, ''.join(traceback.format_exc()) ) if __name__ == '__main__': main()
34.909091
100
0.631076
262
2,304
5.324427
0.416031
0.021505
0.020072
0.034409
0.076703
0.035842
0.035842
0
0
0
0
0.001154
0.24783
2,304
65
101
35.446154
0.803808
0
0
0.033898
0
0
0.161458
0.055556
0
0
0
0
0
1
0.033898
false
0.016949
0.152542
0
0.220339
0.016949
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
bd29d7f8357ca28a05195118a23e7f338eea17aa
483
py
Python
Qemu/power_on_qemu.py
I-Rinka/Virtualization-Difference
7727215f5b5cdb8bf18d91ef76685ccd3489e760
[ "MIT" ]
null
null
null
Qemu/power_on_qemu.py
I-Rinka/Virtualization-Difference
7727215f5b5cdb8bf18d91ef76685ccd3489e760
[ "MIT" ]
null
null
null
Qemu/power_on_qemu.py
I-Rinka/Virtualization-Difference
7727215f5b5cdb8bf18d91ef76685ccd3489e760
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 import socket import os import time import threading def power_on(): os.system("sudo bash ./start_vm.sh") if __name__ == "__main__": n=os.fork() if n>0: os.system("sleep 2") os.system("sudo ip addr add 172.19.0.1/24 dev tap1") os.system("sudo ip link set tap1 up") os.wait() else: # os.execl("./1_start_vm.sh","./1_start_vm.sh") power_on()
18.576923
64
0.52588
71
483
3.366197
0.577465
0.133891
0.150628
0.117155
0
0
0
0
0
0
0
0.049844
0.335404
483
25
65
19.32
0.694704
0.138716
0
0
0
0
0.243961
0
0
0
0
0
0
1
0.066667
false
0
0.266667
0
0.333333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
bd2a739ca5325c09ff24414f0ce30e0bab1eacb7
381
py
Python
tests/unit/python/execution_tree/dynamic_init.py
frzfrsfra4/phylanx
001fe7081f3a24e56157cdb21b2d126b8953ff5d
[ "BSL-1.0" ]
83
2017-08-27T15:09:13.000Z
2022-01-18T17:03:41.000Z
tests/unit/python/execution_tree/dynamic_init.py
frzfrsfra4/phylanx
001fe7081f3a24e56157cdb21b2d126b8953ff5d
[ "BSL-1.0" ]
808
2017-08-27T15:35:01.000Z
2021-12-14T17:30:50.000Z
tests/unit/python/execution_tree/dynamic_init.py
frzfrsfra4/phylanx
001fe7081f3a24e56157cdb21b2d126b8953ff5d
[ "BSL-1.0" ]
55
2017-08-27T15:09:22.000Z
2022-03-25T12:07:34.000Z
# Copyright (c) 2018 R. Tohid # # Distributed under the Boost Software License, Version 1.0. (See accompanying # file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) from phylanx import Phylanx, PhylanxSession @Phylanx def foo(): a = 2 return a def main(): assert (2 == foo()) if __name__ == "__main__": PhylanxSession.init(1) main()
17.318182
79
0.671916
56
381
4.357143
0.678571
0.02459
0.07377
0.098361
0
0
0
0
0
0
0
0.043189
0.209974
381
21
80
18.142857
0.767442
0.461942
0
0
0
0
0.040201
0
0
0
0
0
0.1
1
0.2
false
0
0.1
0
0.4
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
bd2af34a041fa744101d9895d1374416d6964a87
1,073
py
Python
indexStackexchange.py
o19s/semantic-search-course
ebe15eaa65c5009fa2d526b4df72bf8dbfb8630f
[ "Apache-2.0" ]
6
2016-03-07T18:41:52.000Z
2016-12-22T20:45:17.000Z
indexStackexchange.py
o19s/semantic-search-course
ebe15eaa65c5009fa2d526b4df72bf8dbfb8630f
[ "Apache-2.0" ]
1
2016-03-07T19:09:19.000Z
2016-03-07T19:09:19.000Z
indexStackexchange.py
o19s/semantic-search-course
ebe15eaa65c5009fa2d526b4df72bf8dbfb8630f
[ "Apache-2.0" ]
null
null
null
import requests import json def openPosts(): data = "" try: f = open("scifi_stackexchange.json") data = f.read() except IOError: stackExchangeData ="https://storage.googleapis.com/quepid-sample-datasets/elasticsearch/scifi_stackexchange.json" resp = requests.get(stackExchangeData) print("GET %s Len %s" % (resp.status_code, len(resp.text))) f = open("scifi_stackexchange.json", "w") f.write(resp.text) data = resp.text f.close() return json.loads(data) posts = openPosts() def bulkAdds(posts, index='stackexchange'): print("Indexing %s Posts" % len(posts)) for post in posts: print("indexing %s" % post['Id']) yield { "_id": post['Id'], "_index": index, '_type': 'post', '_op_type': 'index', '_source': post } from elasticsearch import Elasticsearch from elasticsearch.helpers import bulk es = Elasticsearch("http://localhost:9200") bulk(es, bulkAdds(posts))
26.825
121
0.587139
117
1,073
5.299145
0.470085
0.087097
0.106452
0.074194
0.087097
0
0
0
0
0
0
0.005168
0.278658
1,073
39
122
27.512821
0.795866
0
0
0
0
0
0.240447
0.044734
0
0
0
0
0
1
0.0625
false
0
0.125
0
0.21875
0.09375
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
bd2c89f3c83b146173c4e02b15272145ff176687
1,634
py
Python
Lab01_Introduction/exercise-4.py
rodrigoc-silva/Python-course
327b20738a4b383510faddc0ec26a54be1bbd717
[ "MIT" ]
null
null
null
Lab01_Introduction/exercise-4.py
rodrigoc-silva/Python-course
327b20738a4b383510faddc0ec26a54be1bbd717
[ "MIT" ]
null
null
null
Lab01_Introduction/exercise-4.py
rodrigoc-silva/Python-course
327b20738a4b383510faddc0ec26a54be1bbd717
[ "MIT" ]
null
null
null
#This program shows the amount of each ingredient needed for a numbers of cookies. #constants sugar = 1.5 butter = 1 flour = 2.75 cookies = 48 #input numOfCookies = int(input('Enter the number of cookies:')) #calculation amtSugar = sugar / cookies * numOfCookies amtButter = butter / cookies * numOfCookies amtFlour = flour / cookies * numOfCookies #output print('To make', numOfCookies, 'cookies, you will nedd:') print(format(amtSugar, ',.2f'), 'cups of sugar.') print(format(amtButter, ',.2f'), 'cups of butter.') print(format(amtFlour, ',.2f'), 'cups of flour.') #ask user to quit program input("\n\nPress any key to quit...") ##Output with 5 test cases ## ##Test Case 1. ## # Enter the number of cookies:56 # To make 56 cookies, you will nedd: # 1.75 cups of sugar. # 1.17 cups of butter. # 3.21 cups of flour. ## ## # Press any key to quit... ## ##Test Case 2. ## # Enter the number of cookies:96 # To make 96 cookies, you will nedd: # 3.00 cups of sugar. # 2.00 cups of butter. # 5.50 cups of flour. ## ## # Press any key to quit... ## ##Test Case 3. ## # Enter the number of cookies:480 # To make 480 cookies, you will nedd: # 15.00 cups of sugar. # 10.00 cups of butter. # 27.50 cups of flour. ## ## # Press any key to quit... ## ##Test Case 4. ## # Enter the number of cookies:200 # To make 200 cookies, you will nedd: # 6.25 cups of sugar. # 4.17 cups of butter. # 11.46 cups of flour. ## ## # Press any key to quit... ## ##Test Case 5. ## # Enter the number of cookies:2 # To make 2 cookies, you will nedd: # 0.06 cups of sugar. # 0.04 cups of butter. # 0.11 cups of flour. ## ## # Press any key to quit...
18.155556
82
0.660343
274
1,634
3.937956
0.266423
0.100093
0.07785
0.088971
0.29101
0.163114
0.163114
0.163114
0.137164
0.137164
0
0.066971
0.195838
1,634
89
83
18.359551
0.754186
0.603427
0
0
0
0
0.255435
0
0
0
0
0
0
1
0
false
0
0
0
0
0.307692
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
bd2ee870e5845b50e43bca14345288b03bd674b2
1,340
py
Python
zombie_infection.py
schana/random-hacking
5eeda2f05681ce9f56f1b9114255c2392e92ee9a
[ "Apache-2.0" ]
null
null
null
zombie_infection.py
schana/random-hacking
5eeda2f05681ce9f56f1b9114255c2392e92ee9a
[ "Apache-2.0" ]
null
null
null
zombie_infection.py
schana/random-hacking
5eeda2f05681ce9f56f1b9114255c2392e92ee9a
[ "Apache-2.0" ]
null
null
null
import random import sys sys.setrecursionlimit(15000) count_columns = 50 count_rows = 40 matrix = [[random.randint(0, 1) for i in range(count_columns)] for j in range(count_rows)] matrix = [[0] * count_columns for _ in range(count_rows)] for _ in range(10): matrix[random.randint(0, count_rows - 1)][random.randint(0, count_columns - 1)] = 1 visited = [[False] * len(row) for row in matrix] def print_matrix(): for row in matrix: for value in row: print(value if value else ' ', end=' ') print() # can use stack if recursion depth is too much - just push items on to be spread # and iterate in a loop def spread(r, c): if r < 0 or r >= count_rows or c < 0 or c >= count_columns: return if matrix[r][c] == 1 and not visited[r][c]: visited[r][c] = True spread(r, c+1) spread(r, c-1) spread(r+1, c) spread(r-1, c) else: matrix[r][c] = 1 visited[r][c] = True time = 0 while not all(all(row) for row in matrix): print_matrix() print() time += 1 visited = [[False] * len(row) for row in matrix] for r, row in enumerate(matrix): for c, value in enumerate(row): if not visited[r][c] and value == 1: spread(r, c) visited[r][c] = True print_matrix() print(time)
24.363636
90
0.590299
217
1,340
3.576037
0.271889
0.028351
0.05799
0.072165
0.203608
0.155928
0.085052
0.085052
0.085052
0
0
0.031185
0.28209
1,340
54
91
24.814815
0.775468
0.074627
0
0.225
0
0
0.001617
0
0
0
0
0
0
1
0.05
false
0
0.05
0
0.125
0.175
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
bd347bef874fe2b7fd02a07a979e78547511f381
216
py
Python
src/Main.py
Yee172/Memory_Revival
e9bf4598564546ada3b9d9bfce7bf35fad348850
[ "MIT" ]
null
null
null
src/Main.py
Yee172/Memory_Revival
e9bf4598564546ada3b9d9bfce7bf35fad348850
[ "MIT" ]
null
null
null
src/Main.py
Yee172/Memory_Revival
e9bf4598564546ada3b9d9bfce7bf35fad348850
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # -*- coding: utf-8 -*- __author__ = 'Yee_172' __date__ = '2017/12/03' import sys PATH = sys.path[0][:-4] sys.path.append(PATH) from src.Func import * win = MainWin() sys.exit(app.exec_())
14.4
23
0.648148
35
216
3.714286
0.8
0.161538
0
0
0
0
0
0
0
0
0
0.081081
0.143519
216
14
24
15.428571
0.621622
0.199074
0
0
0
0
0.099415
0
0
0
0
0
0
1
0
false
0
0.25
0
0.25
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
bd34863190099e5a1deaa0f914751c6c45b7892c
1,191
py
Python
tools/protonvpn-ips/main.py
alessandrobasi/basi-warninglist
995d3cd94e1dc7afdc09eff11bc1baa352b225e9
[ "MIT" ]
null
null
null
tools/protonvpn-ips/main.py
alessandrobasi/basi-warninglist
995d3cd94e1dc7afdc09eff11bc1baa352b225e9
[ "MIT" ]
null
null
null
tools/protonvpn-ips/main.py
alessandrobasi/basi-warninglist
995d3cd94e1dc7afdc09eff11bc1baa352b225e9
[ "MIT" ]
null
null
null
import requests, os dir_name = os.path.basename(os.path.dirname(os.path.realpath(__file__))) save_path = "../../lists/"+dir_name+"/" def main(): ips = set() with open(save_path+"all.txt","r",encoding="UTF-8") as f: for line in f: ips.add(line[:-1]) url_ = 'https://api.protonmail.ch/vpn/logicals' headers = {'user-agent': 'Mozilla/5.0 (X11; CrOS x86_64 8172.45.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.64 Safari/537.36'} r = requests.get(url=url_, headers=headers) json_request = r.json() for obj in json_request["LogicalServers"]: for server in obj["Servers"]: ips.add(server["EntryIP"]) ips.add(server["ExitIP"]) with open(save_path+"ipv4CIDR.txt","w", encoding="UTF-8") as ipv4F, open(save_path+"ipv6CIDR.txt","w", encoding="UTF-8") as ipv6F, open(save_path+"all.txt","w", encoding="UTF-8") as allF: for ip in ips: allF.write(ip+"\n") if '.' in ip: ipv4F.write(ip+"\n") else: ipv6F.write(ip+"\n") return str(len(ips)) if __name__ == "__main__": print("ProtonVPN ips") main()
31.342105
191
0.577666
173
1,191
3.83815
0.491329
0.060241
0.072289
0.084337
0.131024
0.081325
0
0
0
0
0
0.049724
0.240134
1,191
38
192
31.342105
0.683978
0
0
0
0
0.037037
0.25
0
0
0
0
0
0
1
0.037037
false
0
0.037037
0
0.111111
0.037037
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
bd3567ec2bb0a247f32f1485e666f3eac6f7dc19
2,809
py
Python
dakota/sobol/sobol.py
arfc/dcwrapper
82226f601580be464668fa63df64f037962db57e
[ "BSD-3-Clause" ]
1
2020-03-26T14:09:30.000Z
2020-03-26T14:09:30.000Z
dakota/sobol/sobol.py
mehmeturkmen/dcwrapper
82226f601580be464668fa63df64f037962db57e
[ "BSD-3-Clause" ]
10
2019-10-08T18:46:36.000Z
2019-11-14T19:23:05.000Z
dakota/sobol/sobol.py
mehmeturkmen/dcwrapper
82226f601580be464668fa63df64f037962db57e
[ "BSD-3-Clause" ]
3
2019-10-29T19:23:44.000Z
2020-09-18T13:09:49.000Z
# Dakota Python Driving Script # necessary python modules import dakota.interfacing as di import subprocess import sys import os import multiprocessing sys.path.append('../../scripts') import input as inp import output as oup import external_cym cycdir = '../../cyclus-files/sobol/' # ---------------------------- # Parse Dakota parameters file # ---------------------------- params, results = di.read_parameters_file() # ------------------------------- # Convert and send to Cyclus # ------------------------------- # Edit Cyclus input file cyclus_template = cycdir + 'sobol.xml.in' scenario_name = 'fs' + str(int(params['fs'])) + 'ty' + \ str(int(params['ty'])) + 'ct' + str(int(params['ct'])) variable_dict = {'fleet_share_mox': int((params['fs'])), 'fleet_share_fr': int((100 - params['fs'])), 'transition_year': int((params['ty'])), 'cooling_time': int((params['ct'] * 12))} output_xml = cycdir + 'sobol.xml' inp.render_input(cyclus_template, variable_dict, output_xml) # Run Cyclus with edited input file output_sqlite = cycdir + scenario_name + '.sqlite' os.system('cyclus -i ' + output_xml + ' -o ' + output_sqlite) # ---------------------------- # Return the results to Dakota # ---------------------------- f = open('output_name.txt', 'w+') f.write(output_sqlite) f.close() p = multiprocessing.Process(target=external_cym.hlw) p.start() fresh = False while fresh is False: if os.path.exists('hlw.txt'): if os.stat('hlw.txt').st_size > 0: fresh = True p.terminate() f = open('hlw.txt', 'r') if f.mode == 'r': hlw = f.read() f.close() q = multiprocessing.Process(target=external_cym.dep_u) q.start() fresh = False while fresh is False: if os.path.exists('depu.txt'): if os.stat('depu.txt').st_size > 0: fresh = True p.terminate() f = open('depu.txt', 'r') if f.mode == 'r': depleted_u = f.read() f.close() p = multiprocessing.Process(target=external_cym.idlecapp) p.start() fresh = False while fresh is False: if os.path.exists('idlecap.txt'): if os.stat('idlecap.txt').st_size > 0: fresh = True p.terminate() f = open('idlecap.txt', 'r') if f.mode == 'r': idlecap = f.read() f.close() for i, r in enumerate(results.responses()): if r.asv.function: if i == 0: r.function = hlw if i == 1: r.function = depleted_u if i == 2: r.function = idlecap if os.path.exists('depu.txt'): os.remove('depu.txt') if os.path.exists('hlw.txt'): os.remove('hlw.txt') if os.path.exists('idlecap.txt'): os.remove('idlecap.txt') results.write()
25.770642
62
0.555714
364
2,809
4.200549
0.302198
0.023545
0.031393
0.054938
0.328973
0.299542
0.218443
0.218443
0.158273
0.158273
0
0.005119
0.234959
2,809
108
63
26.009259
0.706375
0.133499
0
0.355263
0
0
0.133651
0.010813
0
0
0
0
0
1
0
false
0
0.105263
0
0.105263
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
bd3a8db83a92cdd76c21b817a1af0e0151e6c4ab
5,690
py
Python
app/hide-and-seek/common/computils/debug.py
loramf/mlforhealthlabpub
aa5a42a4814cf69c8223f27c21324ee39d43c404
[ "BSD-3-Clause" ]
171
2021-02-12T10:23:19.000Z
2022-03-29T01:58:52.000Z
app/hide-and-seek/common/computils/debug.py
loramf/mlforhealthlabpub
aa5a42a4814cf69c8223f27c21324ee39d43c404
[ "BSD-3-Clause" ]
4
2021-06-01T08:18:33.000Z
2022-02-20T13:37:30.000Z
app/hide-and-seek/common/computils/debug.py
loramf/mlforhealthlabpub
aa5a42a4814cf69c8223f27c21324ee39d43c404
[ "BSD-3-Clause" ]
93
2021-02-10T03:21:59.000Z
2022-03-30T19:10:37.000Z
""" Debug helpers. """ import io import logging from typing import Union, Optional, Callable import numpy as np import pandas as pd _printt_log_method = print def set_log_method(log_method: Optional[Callable] = None) -> None: global _printt_log_method # pylint: disable=global-statement if log_method is not None: _printt_log_method = log_method else: _printt_log_method = print def _init_str(minimal: bool = False) -> str: if minimal: return "" global _printt_log_method # pylint: disable=global-statement return "\n" if _printt_log_method == print else "\n\n" # pylint: disable=comparison-with-callable force_minimal_logging = False def ar( array: np.ndarray, name: Optional[str] = None, lim: Union[int, str, None] = None, lw: int = 200, minimal: bool = False, ) -> None: """Debug `ar`ray. Print helper for `numpy.ndarray`, will print like so: ``` my_array [<class 'numpy.ndarray'>] [dtype=float32]: SHAPE: (3, 3) [[ 0.5372, 1.2580, -0.9479], [-0.7958, -1.6064, -1.2641], [ 1.6119, 1.3587, -0.1000]]) ``` The `linewith` printoption will be set to `200` by default (`lw` argument) to allow for fewer line breaks. Args: array (np.ndarray): array to print. name (Optional[str], optional): The name for the array to print. Defaults to None. lim (Optional[int, str], optional): If `int`, will set `edgeitems` printoption to this value. If set to `"full"` will print the entire array (can be slow). Defaults to None. lw (int, optional): Set the `linewith` printoption to this. Defaults to 200. minimal (bool, optional): If true, will not print the array itself. Defaults to False. """ global _printt_log_method # pylint: disable=global-statement if force_minimal_logging: minimal = True if name is None: name = f"Array-{id(array)}" content = _init_str(minimal) if not minimal: content += f"=== <{name}> ===:\n[{type(array)}] [dtype={array.dtype}]\n" content += f"SHAPE: {tuple(array.shape)}\n" with np.printoptions( threshold=np.product(array.shape) if lim == "full" else 1000, # 1000 is default. edgeitems=lim if isinstance(lim, int) else 3, # 3 is default. linewidth=lw, ): content += str(array) content += "\n" # Leave one blank line after printing. else: content += f"<{name}>:: {array.shape}" _printt_log_method(content) def ar_(*args, **kwargs): """ Shortcut for `ar(..., minimal=True)`. """ ar(*args, **kwargs, minimal=True) def setup_logger( name: str, level: int = logging.INFO, format_str: str = "%(name)s:%(levelname)s:\t%(message)s" ) -> logging.Logger: """Set up a console logger with name `name`. Args: name (str): Logger name. level (int): Logging level to set. Defaults to logging.INFO. format_str (str, optional): The format string to use for the logger formatter. Defaults to "%(name)s:%(levelname)s:\t%(message)s". Returns: logging.Logger: [description] """ _logger = logging.getLogger(name) handler = logging.StreamHandler() handler.setLevel(logging.DEBUG) formatter = logging.Formatter(fmt=format_str) handler.setFormatter(formatter) _logger.addHandler(handler) _logger.setLevel(level) return _logger def _df_info_to_str(dataframe: pd.DataFrame) -> str: buf = io.StringIO() dataframe.info(buf=buf) return buf.getvalue() def df( dataframe: Union[pd.DataFrame, pd.Series], name: Optional[str] = None, info: bool = False, max_rows_before_collapse: Optional[Union[int, str]] = None, keep_rows_if_collapsed: Optional[int] = None, force_show_all_cols: bool = False, minimal: bool = False, ) -> None: """Debug `d`ata`f`rame. Print helper for `pd.DataFrame`. """ global _printt_log_method # pylint: disable=global-statement if force_minimal_logging: minimal = True if name is None: name = f"DataFrame-{id(dataframe)}" if isinstance(dataframe, pd.DataFrame): tp = "<class 'pd.DataFrame'>" elif isinstance(dataframe, pd.Series): tp = "<class 'pd.Series'>" else: raise ValueError(f"`df` must be a pandas DataFrame or Series, was {type(dataframe)}.") content = _init_str(minimal) if not minimal: content += f"=== <{name}> ===:\n[{tp}]\n\n" pd_option_seq = [] if max_rows_before_collapse is not None: if max_rows_before_collapse == "full": max_rows_before_collapse = dataframe.shape[0] pd_option_seq.extend(["display.max_rows", max_rows_before_collapse]) if keep_rows_if_collapsed is not None: pd_option_seq.extend(["display.min_rows", keep_rows_if_collapsed]) if force_show_all_cols: pd_option_seq.extend(["display.max_columns", dataframe.shape[1]]) pd_option_seq.extend(["display.expand_frame_repr", True]) def _build(c): if info: c += _df_info_to_str(dataframe) + "\n" c += str(dataframe) + "\n" return c if len(pd_option_seq) > 0: with pd.option_context(*pd_option_seq): content = _build(content) else: content = _build(content) else: content += f"<{name}>:: {tp}:: {dataframe.shape}" _printt_log_method(content) def df_(*args, **kwargs): """ Shortcut for `df(..., minimal=True)`. """ df(*args, **kwargs, minimal=True)
30.10582
110
0.618102
741
5,690
4.585695
0.233468
0.037081
0.044144
0.030901
0.258682
0.16186
0.128311
0.114185
0.099765
0.084756
0
0.016675
0.25167
5,690
188
111
30.265957
0.781353
0.273814
0
0.284404
0
0
0.114696
0.038316
0
0
0
0
0
1
0.082569
false
0
0.045872
0
0.174312
0.100917
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
bd3b0f2c14b30cd87e31089661c02ceeb62af81c
3,862
py
Python
setup.py
jacklinke/django-directed
8ef8cd8a71e9a03a8628dce6465351f676f542ff
[ "Apache-2.0" ]
2
2022-02-09T10:15:40.000Z
2022-02-22T14:11:03.000Z
setup.py
jacklinke/django-directed
8ef8cd8a71e9a03a8628dce6465351f676f542ff
[ "Apache-2.0" ]
1
2022-02-20T14:49:37.000Z
2022-02-20T14:49:37.000Z
setup.py
jacklinke/django-directed
8ef8cd8a71e9a03a8628dce6465351f676f542ff
[ "Apache-2.0" ]
null
null
null
import os import re import sys from collections import defaultdict try: from setuptools import setup except ImportError: from distutils.core import setup def get_version(*file_paths): """Retrieves the version from django_directed/__init__.py""" filename = os.path.join(os.path.dirname(__file__), *file_paths) version_file = open(filename).read() version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M) if version_match: return version_match.group(1) raise RuntimeError("Unable to find version string.") def get_extras_require(path, add_all=True): # https://hanxiao.io/2019/11/07/A-Better-Practice-for-Managing-extras-require-Dependencies-in-Python/ with open(path) as fp: extra_deps = defaultdict(set) for k in fp: if k.strip() and not k.startswith("#"): tags = set() if ":" in k: k, v = k.split(":") tags.update(vv.strip() for vv in v.split(",")) tags.add(re.split("[<=>]", k)[0]) for t in tags: extra_deps[t].add(k) # add tag `all` at the end if add_all: extra_deps["all"] = set(vv for v in extra_deps.values() for vv in v) return extra_deps readme = open("README.md").read() changelog = open("CHANGELOG.md").read() requirements = open("requirements/base.txt").readlines() extras_requirements_path = "requirements/extras.txt" version = get_version("django_directed", "__init__.py") if sys.argv[-1] == "publish": try: import wheel print("Wheel version: ", wheel.__version__) except ImportError: print('Wheel library missing. Please run "pip install wheel"') sys.exit() os.system("python setup.py sdist upload") os.system("python setup.py bdist_wheel upload") sys.exit() if sys.argv[-1] == "tag": print("Tagging the version on git:") os.system("git tag -a %s -m 'version %s'" % (version, version)) os.system("git push --tags") sys.exit() classifiers = [ "Development Status :: 3 - Alpha", "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Programming Language :: Python", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Operating System :: OS Independent", "Topic :: Software Development :: Libraries", "Topic :: Database", "Topic :: Utilities", "Environment :: Web Environment", "Framework :: Django", "Framework :: Django :: 2.2", "Framework :: Django :: 3.0", "Framework :: Django :: 3.1", "Framework :: Django :: 3.2", ] setup( name="django-directed", version=version, description="""Tools for building, querying, manipulating, and exporting directed graphs with django""", long_description=readme + "\n\n" + changelog, long_description_content_type="text/markdown", author="Jack Linke", author_email="jack@watervize.com", license="Apache Software License", url="https://github.com/jacklinke/django-directed/", project_urls={ "Documentation": "https://django-directed.readthedocs.io/en/latest/", "Source": "https://github.com/jacklinke/django-directed/", "Tracker": "https://github.com/jacklinke/django-directed/issues", }, packages=[ "django_directed", ], package_dir={"django_directed": "django_directed"}, include_package_data=True, keywords="django-directed, graph, tree, dag, network, directed, acyclic, postgres, cte", python_requires=">=3.7, <4", classifiers=classifiers, install_requires=requirements, extras_require=get_extras_require(extras_requirements_path), )
33.877193
108
0.634645
466
3,862
5.126609
0.405579
0.064462
0.052323
0.043533
0.064044
0.046463
0
0
0
0
0
0.010905
0.216468
3,862
113
109
34.176991
0.778586
0.046608
0
0.072917
0
0
0.391293
0.011973
0
0
0
0
0
1
0.020833
false
0
0.09375
0
0.135417
0.03125
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
bd3b1d81b7abc114bb78bcdb8316981a6a5efeb1
2,050
py
Python
cv_utils/object_detection/dataset/utils.py
fadamsyah/cv_utils
487fc65fe4a71f05dd03df31cde21d866968c0b4
[ "MIT" ]
null
null
null
cv_utils/object_detection/dataset/utils.py
fadamsyah/cv_utils
487fc65fe4a71f05dd03df31cde21d866968c0b4
[ "MIT" ]
1
2021-11-01T06:10:29.000Z
2021-11-09T12:47:48.000Z
cv_utils/object_detection/dataset/utils.py
fadamsyah/cv_utils
487fc65fe4a71f05dd03df31cde21d866968c0b4
[ "MIT" ]
null
null
null
import json import os import shutil from copy import deepcopy from pathlib import Path def create_and_overwrite_dir(path_dir): # Create the directory Path(path_dir).mkdir(parents=True, exist_ok=True) # Overwrite the directory for path in os.listdir(path_dir): try: os.remove(os.path.join(path_dir, path)) except IsADirectoryError: shutil.rmtree(os.path.join(path_dir, path)) def read_json(path): """ Read a .json file Args: path (string): Path of a .json file Returns: data (dictionary): Output dictionary """ f = open(path,) data = json.load(f) f.close() return data def write_json(files, path, indent=4): """ Write a json file from a dictionary Args: files (dictionary): Data path (string): Saved json path indent (int, optional): Number of spaces of indentation. Defaults to 4. """ json_object = json.dumps(files, indent = indent) # Writing to saved_path_json with open(path, "w") as outfile: outfile.write(json_object) def coco_to_img2annots(coco_annotations): # Initialize img2annots img2annots = {} # Generate img2annots key num_obj_init = {category['id']: 0 for category in coco_annotations['categories']} for image in coco_annotations['images']: image_id = image['id'] img2annots[image_id] = { 'description': deepcopy(image), 'annotations': [], 'num_objects': deepcopy(num_obj_init) } # Add every annotation to its corresponding image key for annotation in coco_annotations['annotations']: image_id = annotation['image_id'] category_id = annotation['category_id'] img2annots[image_id]['annotations'].append(annotation) img2annots[image_id]['num_objects'][category_id] += 1 return img2annots def yolo_to_img2annots(yolo_annotations, yolo_classes): pass # return img2annots
25.308642
85
0.632683
248
2,050
5.068548
0.366935
0.038982
0.02148
0.022275
0.033413
0.033413
0
0
0
0
0
0.009365
0.270732
2,050
81
86
25.308642
0.831438
0.238049
0
0
0
0
0.07095
0
0
0
0
0
0
1
0.135135
false
0.027027
0.135135
0
0.324324
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
bd41d6fca25f541134f0afce1961c06f85b0df9b
1,806
py
Python
tests/fixtures.py
DNXLabs/ssm-loader
eae0257794126247584150eeb1b74ae05f4fcaf5
[ "Apache-2.0" ]
null
null
null
tests/fixtures.py
DNXLabs/ssm-loader
eae0257794126247584150eeb1b74ae05f4fcaf5
[ "Apache-2.0" ]
2
2020-07-31T05:32:10.000Z
2020-09-07T10:38:24.000Z
tests/fixtures.py
DNXLabs/ssm-loader
eae0257794126247584150eeb1b74ae05f4fcaf5
[ "Apache-2.0" ]
null
null
null
import pytest import os import json import boto3 from click.testing import CliRunner from moto import mock_ssm @pytest.fixture def runner(): return CliRunner() @pytest.fixture(scope='function') def aws_credentials(): """Mocked AWS Credentials for moto.""" os.environ['AWS_ACCESS_KEY_ID'] = 'test' os.environ['AWS_SECRET_ACCESS_KEY'] = 'test' os.environ['AWS_SECURITY_TOKEN'] = 'test' os.environ['AWS_SESSION_TOKEN'] = 'test' @pytest.fixture(scope='function') def ssm(aws_credentials): with mock_ssm(): yield boto3.client('ssm', region_name='us-east-1') @pytest.fixture def ssm_put_parameter(ssm): ssm.put_parameter( Name='/app/env/ssm_string', Description='description', Value='PLACEHOLDER', Type='String' ) ssm.put_parameter( Name='/app/env/ssm_secure_string', Description='description secure string', Value='PLACEHOLDER', Type='SecureString' ) @pytest.fixture def ssm_empty_parameters(): result = { "parameters": [] } return json.dumps(result, indent=4, sort_keys=True, default=str) + '\n' @pytest.fixture def load_command_parameters_output(): return '/app/env/ssm_string OK\n/app/env/ssm_secure_string OK\n' @pytest.fixture def ssm_parameters(): result = { "parameters": [ { "Name": "/app/env/ssm_string", "Type": "String", "Value": "PLACEHOLDER", "Version": 1 }, { "Name": "/app/env/ssm_secure_string", "Type": "SecureString", "Value": "PLACEHOLDER", "Version": 1 } ] } return json.dumps(result, indent=4, sort_keys=True, default=str) + '\n'
22.860759
75
0.593577
203
1,806
5.103448
0.330049
0.087838
0.052124
0.050193
0.28861
0.188224
0.15251
0.098456
0.098456
0.098456
0
0.005303
0.269103
1,806
78
76
23.153846
0.779545
0.017719
0
0.311475
0
0
0.250141
0.058291
0
0
0
0
0
1
0.114754
false
0
0.098361
0.032787
0.278689
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
bd42f92ac6de47d16f3dec018fcdc491713b5ba6
5,656
py
Python
scripts/plotting/create_num_demos_plots.py
Learning-and-Intelligent-Systems/predicators
0b2e71cacf86ba2bfdc1d9059c3a78016d0a4d7e
[ "MIT" ]
24
2021-11-20T16:35:41.000Z
2022-03-30T03:49:52.000Z
scripts/plotting/create_num_demos_plots.py
Learning-and-Intelligent-Systems/predicators
0b2e71cacf86ba2bfdc1d9059c3a78016d0a4d7e
[ "MIT" ]
214
2021-10-12T01:17:50.000Z
2022-03-31T20:18:36.000Z
scripts/plotting/create_num_demos_plots.py
Learning-and-Intelligent-Systems/predicators
0b2e71cacf86ba2bfdc1d9059c3a78016d0a4d7e
[ "MIT" ]
1
2022-02-15T20:24:17.000Z
2022-02-15T20:24:17.000Z
"""Create plots for learning from varying numbers of demonstrations.""" import os import matplotlib import matplotlib.pyplot as plt import pandas as pd from predicators.scripts.analyze_results_directory import create_dataframes, \ get_df_for_entry pd.options.mode.chained_assignment = None # default='warn' # plt.rcParams["font.family"] = "CMU Serif" ############################ Change below here ################################ # Details about the plt figure. DPI = 500 FONT_SIZE = 18 # Groups over which to take mean/std. GROUPS = [ "ENV", "APPROACH", "EXCLUDED_PREDICATES", "EXPERIMENT_ID", "NUM_TRAIN_TASKS", "CYCLE" ] # All column names and keys to load into the pandas tables before plotting. COLUMN_NAMES_AND_KEYS = [ ("ENV", "env"), ("APPROACH", "approach"), ("EXCLUDED_PREDICATES", "excluded_predicates"), ("EXPERIMENT_ID", "experiment_id"), ("SEED", "seed"), ("NUM_TRAIN_TASKS", "num_train_tasks"), ("CYCLE", "cycle"), ("NUM_SOLVED", "num_solved"), ("AVG_NUM_PREDS", "avg_num_preds"), ("AVG_TEST_TIME", "avg_suc_time"), ("AVG_NODES_CREATED", "avg_num_nodes_created"), ("LEARNING_TIME", "learning_time"), ("PERC_SOLVED", "perc_solved"), ] DERIVED_KEYS = [("perc_solved", lambda r: 100 * r["num_solved"] / r["num_test_tasks"])] # The first element is the name of the metric that will be plotted on the # x axis. See COLUMN_NAMES_AND_KEYS for all available metrics. The second # element is used to label the x axis. X_KEY_AND_LABEL = [ ("NUM_TRAIN_TASKS", "Number of Training Tasks"), # ("LEARNING_TIME", "Learning time in seconds"), ] # Same as above, but for the y axis. Y_KEY_AND_LABEL = [ ("PERC_SOLVED", "% Evaluation Tasks Solved"), # ("AVG_NODES_CREATED", "Averaged nodes created"), ] # PLOT_GROUPS is a nested dict where each outer dict corresponds to one plot, # and each inner entry corresponds to one line on the plot. # The keys of the outer dict are plot titles. # The keys of the inner dict are (legend label, marker, df selector). PLOT_GROUPS = { "Learning from Few Demonstrations": [ ("PickPlace1D", "o", lambda df: df["EXPERIMENT_ID"].apply(lambda v: "cover_main_" in v)), ("Blocks", ".", lambda df: df["EXPERIMENT_ID"].apply(lambda v: "blocks_main_" in v)), ("Painting", "*", lambda df: df["EXPERIMENT_ID"].apply(lambda v: "painting_main_" in v) ), ("Tools", "s", lambda df: df["EXPERIMENT_ID"].apply(lambda v: "tools_main_" in v)), ], "GNN Shooting LfD": [ ("PickPlace1D", "o", lambda df: df["EXPERIMENT_ID"].apply( lambda v: "cover_gnn_shooting_" in v)), ("Blocks", ".", lambda df: df["EXPERIMENT_ID"].apply( lambda v: "blocks_gnn_shooting_" in v)), ("Painting", "*", lambda df: df["EXPERIMENT_ID"].apply( lambda v: "painting_gnn_shooting_" in v)), ("Tools", "s", lambda df: df["EXPERIMENT_ID"].apply( lambda v: "tools_gnn_shooting_" in v)), ], "GNN Model-Free LfD": [ ("PickPlace1D", "o", lambda df: df["EXPERIMENT_ID"].apply( lambda v: "cover_gnn_modelfree_" in v)), ("Blocks", ".", lambda df: df["EXPERIMENT_ID"].apply( lambda v: "blocks_gnn_modelfree_" in v)), ("Painting", "*", lambda df: df["EXPERIMENT_ID"].apply( lambda v: "painting_gnn_modelfree_" in v)), ("Tools", "s", lambda df: df["EXPERIMENT_ID"].apply( lambda v: "tools_gnn_modelfree_" in v)), ], } # If True, add (0, 0) to every plot ADD_ZERO_POINT = True Y_LIM = (-5, 110) #################### Should not need to change below here ##################### def _main() -> None: outdir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "results") os.makedirs(outdir, exist_ok=True) matplotlib.rcParams.update({'font.size': FONT_SIZE}) grouped_means, grouped_stds, _ = create_dataframes(COLUMN_NAMES_AND_KEYS, GROUPS, DERIVED_KEYS) means = grouped_means.reset_index() stds = grouped_stds.reset_index() for x_key, x_label in X_KEY_AND_LABEL: for y_key, y_label in Y_KEY_AND_LABEL: for plot_title, d in PLOT_GROUPS.items(): _, ax = plt.subplots() for label, marker, selector in d: exp_means = get_df_for_entry(x_key, means, selector) exp_stds = get_df_for_entry(x_key, stds, selector) xs = exp_means[x_key].tolist() ys = exp_means[y_key].tolist() y_stds = exp_stds[y_key].tolist() if ADD_ZERO_POINT: xs = [0] + xs ys = [0] + ys y_stds = [0] + y_stds ax.errorbar(xs, ys, yerr=y_stds, label=label, marker=marker) ax.set_xticks(xs) ax.set_title(plot_title) ax.set_xlabel(x_label) ax.set_ylabel(y_label) ax.set_ylim(Y_LIM) plt.legend() plt.tight_layout() filename = f"{plot_title}_{x_key}_{y_key}.png" filename = filename.replace(" ", "_").lower() outfile = os.path.join(outdir, filename) plt.savefig(outfile, dpi=DPI) print(f"Wrote out to {outfile}") if __name__ == "__main__": _main()
37.456954
79
0.572313
708
5,656
4.303672
0.295198
0.059075
0.039383
0.078766
0.218904
0.218904
0.207745
0.207745
0.207745
0.207745
0
0.004938
0.283946
5,656
150
80
37.706667
0.747407
0.161068
0
0.097345
0
0
0.233665
0.025746
0
0
0
0
0
1
0.00885
false
0
0.044248
0
0.053097
0.00885
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
bd43a1e72c9d194feac6f21f795a8c2f2065d1a1
85,638
py
Python
pyaedt/modeler/stackup_3d.py
pyansys/pyaedt
c7b045fede6bc707fb20a8db7d5680c66d8263f6
[ "MIT" ]
38
2021-10-01T23:15:26.000Z
2022-03-30T18:14:41.000Z
pyaedt/modeler/stackup_3d.py
pyansys/pyaedt
c7b045fede6bc707fb20a8db7d5680c66d8263f6
[ "MIT" ]
362
2021-09-30T17:11:55.000Z
2022-03-31T13:36:20.000Z
pyaedt/modeler/stackup_3d.py
pyansys/pyaedt
c7b045fede6bc707fb20a8db7d5680c66d8263f6
[ "MIT" ]
15
2021-09-30T20:21:02.000Z
2022-02-21T20:22:03.000Z
import os from collections import OrderedDict try: import joblib except ImportError: pass try: import numpy as np except ImportError: pass from pyaedt import constants from pyaedt.generic.general_methods import generate_unique_name from pyaedt.generic.general_methods import pyaedt_function_handler from pyaedt.modules.MaterialLib import Material from pyaedt.generic.general_methods import is_ironpython LAYERS = {"s": "signal", "g": "ground", "d": "dielectric"} def _replace_by_underscore(character, string): if not isinstance(character, str): raise TypeError("character must be str") if not isinstance(character, str): raise TypeError("string must be str") reformat_name = list(string) while character in reformat_name: index = reformat_name.index(character) reformat_name[index] = "_" return "".join(reformat_name) class NamedVariable(object): """Cast PyAEDT variable object to simplify getters and setters in Stackup3D. Parameters ---------- application : :class:`pyaedt.hfss.Hfss HFSS design or project where the variable is to be created. name : str The name of the variable. If the the name begins with an '$', the variable will be a project variable. Otherwise, it will be a design variable. expression : str Expression of the value. Examples -------- >>> from pyaedt import Hfss >>> from pyaedt.modeler.stackup_3d import Stackup3D >>> hfss = Hfss() >>> my_frequency = NamedVariable(hfss, "my_frequency", "900000Hz") >>> wave_length_formula = "c0/" + my_frequency.name >>> my_wave_length = NamedVariable(hfss, "my_wave_length", wave_length_formula) >>> my_permittivity = NamedVariable(hfss, "my_permittivity", "2.2") >>> my_wave_length.expression = my_wave_length.expression + "/" + my_permittivity.name """ def __init__(self, application, name, expression): self._application = application self._name = name self._expression = expression application[name] = expression @property def _variable(self): return self._application.variable_manager.variables[self._name] @property def name(self): """Name of the variable as a string.""" return self._name @property def expression(self): """Expression of the variable as a string.""" return self._expression @expression.setter def expression(self, expression): """Set the expression of the variable. Parameters ---------- expression: str Value expression of the variable.""" if isinstance(expression, str): self._expression = expression self._application[self.name] = expression else: self._application.logger.error("Expression must be a string") @property def unit_system(self): """Unit system of the expression as a string.""" return self._variable.unit_system @property def units(self): """Units.""" return self._variable.units @property def value(self): """Value.""" return self._variable.value @property def numeric_value(self): """Numeric part of the expression as a float value.""" return self._variable.numeric_value @property def evaluated_value(self): """String that combines the numeric value and the units.""" return self._variable.evaluated_value @pyaedt_function_handler() def hide_variable(self, value=True): """Set the variable to a hidden variable. Parameters ---------- value : bool, optional Whether the variable is a hidden variable. The default is ``True``. Returns bool """ self._application.variable_manager[self._name].hidden = value return True @pyaedt_function_handler() def read_only_variable(self, value=True): """Set the variable to a read-only variable. Parameters ---------- value : bool, optional Whether the variable is a read-only variable. The default is ``True``. Returns ------- bool """ self._application.variable_manager[self._name].read_only = value return True class Layer3D(object): """Provides a class for a management of a parametric layer in 3D Modeler.""" def __init__( self, stackup, app, name, layer_type="S", material="copper", thickness=0.035, fill_material="FR4_epoxy", index=1, ): self._stackup = stackup self._index = index self._app = app self._name = name layer_position = "layer_" + name + "_position" self._position = NamedVariable(app, layer_position, "0mm") self._thickness = None self._layer_type = LAYERS.get(layer_type.lower()) self._obj_3d = [] obj_3d = None self._material = self.duplicate_parametrize_material(material) self._material_name = self._material.name if self._layer_type != "dielectric": self._fill_material = self.duplicate_parametrize_material(fill_material) self._fill_material_name = self._fill_material.name self._thickness_variable = self._name + "_thickness" if thickness: self._thickness = NamedVariable(self._app, self._thickness_variable, str(thickness) + "mm") if self._layer_type == "dielectric": obj_3d = self._app.modeler.create_box( ["dielectric_x_position", "dielectric_y_position", layer_position], ["dielectric_length", "dielectric_width", self._thickness_variable], name=self._name, matname=self._material_name, ) elif self._layer_type == "ground": if thickness: obj_3d = self._app.modeler.create_box( ["dielectric_x_position", "dielectric_y_position", layer_position], ["dielectric_length", "dielectric_width", self._thickness_variable], name=self._name, matname=self._material_name, ) else: obj_3d = self._app.modeler.create_rectangle( "Z", ["dielectric_x_position", "dielectric_y_position", layer_position], ["dielectric_length", "dielectric_width"], name=self._name, matname=self._material_name, ) elif self._layer_type == "signal": if thickness: obj_3d = self._app.modeler.create_box( ["dielectric_x_position", "dielectric_y_position", layer_position], ["dielectric_length", "dielectric_width", self._thickness_variable], name=self._name, matname=self._fill_material, ) else: obj_3d = self._app.modeler.create_rectangle( "Z", ["dielectric_x_position", "dielectric_y_position", layer_position], ["dielectric_length", "dielectric_width"], name=self._name, matname=self._fill_material, ) obj_3d.group_name = "Layer_{}".format(self._name) if obj_3d: self._obj_3d.append(obj_3d) else: self._app.logger.error("Generation of the ground layer does not work.") @property def name(self): """Layer name. Returns ------- str """ return self._name @property def number(self): """Layer ID. Returns ------- int """ return self._index @property def material_name(self): """Material name. Returns ------- str """ return self._material_name @property def material(self): """Material. Returns ------- :class:`pyaedt.modules.Material.Material` Material. """ return self._material @property def filling_material(self): """Fill material. Returns ------- :class:`pyaedt.modules.Material.Material` Material. """ return self._fill_material @property def filling_material_name(self): """Fill material name. Returns ------- str """ return self._fill_material_name @property def thickness(self): """Thickness variable. Returns ------- :class:`pyaedt.modeler.stackup_3d.NamedVariable` Variable Object. """ return self._thickness @property def thickness_value(self): """Thickness value. Returns ------- float, str """ return self._thickness.value @thickness.setter def thickness(self, value): self._thickness.expression = value @property def elevation(self): """Layer elevation. Returns ------- :class:`pyaedt.modeler.stackup_3d.NamedVariable` Variable object. """ return self._position @property def elevation_value(self): """Layer elevation value. Returns ------- str, float """ return self._app.variable_manager[self._position.name].value @pyaedt_function_handler() def duplicate_parametrize_material(self, material_name, cloned_material_name=None, list_of_properties=None): """Duplicate a material and parametrize all properties. Parameters ---------- material_name : str Name of origin material cloned_material_name : str, optional Name of destination material. The default is ``None``. list_of_properties : list, optional Properties to parametrize. The default is ``None``. Returns ------- :class:`pyaedt.modules.Material.Material` Material object. """ application = self._app if isinstance(material_name, Material): return material_name if isinstance(cloned_material_name, Material): return cloned_material_name if self._app.materials.checkifmaterialexists(material_name): if not cloned_material_name: cloned_material_name = "cloned_" + material_name if not self._app.materials.checkifmaterialexists(cloned_material_name): if not list_of_properties: cloned_material = application.materials.duplicate_material(material_name, cloned_material_name) permittivity = cloned_material.permittivity.value permeability = cloned_material.permeability.value conductivity = cloned_material.conductivity.value dielectric_loss_tan = cloned_material.dielectric_loss_tangent.value magnetic_loss_tan = cloned_material.magnetic_loss_tangent.value reformat_name = _replace_by_underscore(" ", cloned_material_name) reformat_name = _replace_by_underscore("(", reformat_name) reformat_name = _replace_by_underscore(")", reformat_name) reformat_name = _replace_by_underscore("/", reformat_name) reformat_name = _replace_by_underscore("-", reformat_name) reformat_name = _replace_by_underscore(".", reformat_name) reformat_name = _replace_by_underscore(",", reformat_name) permittivity_variable = "$" + reformat_name + "_permittivity" permeability_variable = "$" + reformat_name + "_permeability" conductivity_variable = "$" + reformat_name + "_conductivity" dielectric_loss_variable = "$" + reformat_name + "_dielectric_loss" magnetic_loss_variable = "$" + reformat_name + "_magnetic_loss" application[permittivity_variable] = str(permittivity) application[permeability_variable] = str(permeability) application[conductivity_variable] = str(conductivity) application[dielectric_loss_variable] = str(dielectric_loss_tan) application[magnetic_loss_variable] = str(magnetic_loss_tan) cloned_material.permittivity = permittivity_variable cloned_material.permeability = permeability_variable cloned_material.conductivity = conductivity_variable cloned_material.dielectric_loss_tangent = dielectric_loss_variable cloned_material.magnetic_loss_tangent = magnetic_loss_variable return cloned_material else: return application.materials[cloned_material_name] else: application.logger.error("The material name %s doesn't exist" % material_name) return None @pyaedt_function_handler() def add_patch( self, frequency, patch_width, patch_length=None, patch_position_x=0, patch_position_y=0, patch_name=None, axis="X", ): """Create a parametric patch. Parameters ---------- frequency : float Frequency value for the patch calculation in Hz. patch_width : float Patch width. patch_length : float, optional Patch length. The default is ``None``. patch_position_x : float, optional Patch start x position. patch_position_y : float, optional Patch start y position. The default is ``0.`` patch_name : str, optional Patch name. The default is ``None``. axis : str, optional Line orientation axis. The default is ``"X"``. Returns ------- :class:`pyaedt.modeler.stackup_3d.Patch` """ if not patch_name: patch_name = generate_unique_name("{}_patch".format(self._name), n=3) lst = self._stackup._layer_name for i in range(len(lst)): if lst[i] == self._name: if self._stackup.stackup_layers[lst[i - 1]]._layer_type == "dielectric": below_layer = self._stackup.stackup_layers[lst[i - 1]] break else: self._app.logger.error("The layer below the selected one must be of dielectric type") return False created_patch = Patch( self._app, frequency, patch_width, signal_layer=self, dielectric_layer=below_layer, patch_length=patch_length, patch_position_x=patch_position_x, patch_position_y=patch_position_y, patch_name=patch_name, axis=axis, ) self._obj_3d.append(created_patch.aedt_object) self._stackup._object_list.append(created_patch) created_patch.aedt_object.group_name = "Layer_{}".format(self._name) return created_patch @pyaedt_function_handler() def ml_patch( self, frequency, patch_width, patch_position_x=0, patch_position_y=0, patch_name=None, axis="X", ): """Create a new parametric patch using machine learning algorithm rather than analytic formulas. Parameters ---------- frequency : float Frequency value for patch calculation in Hz. patch_width : float Patch width. patch_length : float Patch Length. patch_position_x : float, optional Patch start x position. patch_position_y : float, optional Patch start y position. patch_name : str, optional Patch name. axis : str, optional Line orientation axis. Returns ------- :class:`pyaedt.modeler.stackup_3d.Patch` """ if not patch_name: patch_name = generate_unique_name("{}_patch".format(self._name), n=3) lst = self._stackup._layer_name for i in range(len(lst)): if lst[i] == self._name: if self._stackup.stackup_layers[lst[i - 1]]._layer_type == "dielectric": below_layer = self._stackup.stackup_layers[lst[i - 1]] break else: self._app.logger.error("The layer below the selected one must be of dielectric type") return False created_patch = MachineLearningPatch( self._app, frequency, patch_width, signal_layer=self, dielectric_layer=below_layer, patch_position_x=patch_position_x, patch_position_y=patch_position_y, patch_name=patch_name, axis=axis, ) self._obj_3d.append(created_patch.aedt_object) self._stackup._object_list.append(created_patch) created_patch.aedt_object.group_name = "Layer_{}".format(self._name) return created_patch @pyaedt_function_handler() def add_trace( self, line_width, line_length, is_electrical_length=False, is_impedance=False, line_position_x=0, line_position_y=0, line_name=None, axis="X", reference_system=None, frequency=1e9, ): """Create a trace. Parameters ---------- line_width : float Line width. It can be the physical width or the line impedance. line_length : float Line length. It can be the physical length or the electrical length. is_electrical_length : bool, optional Whether the line length is an electrical length or a physical length. The default is ``False``, which means it is a physical length. is_impedance : bool, optional Whether the line width is an impedance. The default is ``False``, in which case the line width is a geometrical value. line_position_x : float, optional Line center start x position. The default is ``0``. line_position_y : float, optional Line center start y position. The default is ``0``. line_name : str, optional Line name. The default is ``None``. axis : str, optional Line orientation axis. The default is ``"X"``. reference_system : str, optional Line reference system. The default is ``None``, in which case a new coordinate system is created. frequency : float, optional Frequency value for the line calculation in Hz. The default is ``1e9``. Returns ------- :class:`pyaedt.modeler.stackup_3d.Line` """ if not line_name: line_name = generate_unique_name("{0}_line".format(self._name), n=3) dielectric_layer = None for v in list(self._stackup._stackup.values()): if v._index == self._index - 1: dielectric_layer = v break if dielectric_layer is None: self._app.logger.error("There is no layer under this layer.") created_line = Trace( self._app, frequency, line_width if is_impedance else None, line_width if not is_impedance else None, self, dielectric_layer, line_length=line_length if not is_electrical_length else None, line_electrical_length=line_length if is_electrical_length else None, line_position_x=line_position_x, line_position_y=line_position_y, line_name=line_name, reference_system=reference_system, axis=axis, ) created_line.aedt_object.group_name = "Layer_{}".format(self._name) self._obj_3d.append(created_line.aedt_object) self._stackup._object_list.append(created_line) return created_line @pyaedt_function_handler() def add_polygon(self, points, material="copper", is_void=False, poly_name=None): """Create a polygon. Parameters ---------- points : list Points list of [x,y] coordinates. material : str, optional Material name. The default is ``"copper"``. is_void : bool, optional Whether the polygon is a void. The default is ``False``. On ground layers, it will act opposite of the Boolean value because the ground is negative. poly_name : str, optional Polygon name. The default is ``None``. Returns ------- """ if not poly_name: poly_name = generate_unique_name("{0}_poly".format(self._name), n=3) polygon = Polygon( self._app, points, thickness=self._thickness, signal_layer_name=self._name, mat_name=material, is_void=is_void, poly_name=poly_name, ) polygon.aedt_object.group_name = "Layer_{}".format(self._name) if self._layer_type == "ground": if not is_void: if polygon.aedt_object.is3d: self._app.modeler[self._name].subtract(polygon.aedt_object, True) polygon.aedt_object.material_name = self.filling_material_name else: self._app.modeler[self._name].subtract(polygon.aedt_object, False) return True elif is_void: if polygon.aedt_object.is3d: self._app.modeler.subtract(self._obj_3d, polygon.aedt_object, True) polygon.aedt_object.material_name = self.filling_material_name else: self._app.modeler[self._name].subtract(polygon.aedt_object, False) return True else: self._app.modeler.subtract(self._obj_3d[0], polygon.aedt_object, True) self._obj_3d.append(polygon.aedt_object) self._stackup._object_list.append(polygon) return polygon class PadstackLayer(object): """Provides a data class for the definition of a padstack layer and relative pad and antipad values.""" def __init__(self, padstack, layer_name, elevation, thickness): self._padstack = padstack self._layer_name = layer_name self._layer_elevation = elevation self._layer_thickness = thickness self._pad_radius = 1 self._antipad_radius = 2 self._units = "mm" class Padstack(object): """Padstack Class member of Stackup3D.""" def __init__(self, app, stackup, name, material="copper"): self._app = app self._stackup = stackup self.name = name self._padstacks_by_layer = OrderedDict({}) self._vias_objects = [] self._num_sides = 16 self._plating_ratio = 1 v = None k = None for k, v in self._stackup.stackup_layers.items(): if not self._padstacks_by_layer and v._layer_type == "dielectric": continue self._padstacks_by_layer[k] = PadstackLayer(self, k, v.elevation, v.thickness) if v and v._layer_type == "dielectric": del self._padstacks_by_layer[k] self._padstacks_material = material @property def plating_ratio(self): """Plating ratio between 0 and 1. Returns ------- float """ return self._plating_ratio @plating_ratio.setter def plating_ratio(self, val): if isinstance(val, (float, int)) and val > 0 and val <= 1: self._plating_ratio = val elif isinstance(val, str): self._plating_ratio = val else: self._app.logger.error("Plating has to be between 0 and 1") @property def num_sides(self): """Number of sides on the circle, which is 0 for a true circle. Returns ------- int """ return self._num_sides @num_sides.setter def num_sides(self, val): self._num_sides = val @pyaedt_function_handler() def set_all_pad_value(self, value): """Set all pads in all layers to a specified value. Parameters ---------- value : float Pad radius. Returns ------- bool "True`` when successful, ``False`` when failed. """ for v in list(self._padstacks_by_layer.values()): v._pad_radius = value return True @pyaedt_function_handler() def set_all_antipad_value(self, value): """Set all antipads in all layers to a specified value. Parameters ---------- value : float Pad radius. Returns ------- bool "True`` when successful, ``False`` when failed. """ for v in list(self._padstacks_by_layer.values()): v._antipad_radius = value return True @pyaedt_function_handler() def set_start_layer(self, layer): """Set the start layer to a specified value. Parameters ---------- layer : str Layer name. Returns ------- bool "True`` when successful, ``False`` when failed. """ found = False new_stackup = OrderedDict({}) for k, v in self._stackup.stackup_layers.items(): if k == layer: found = True if found and layer not in self._padstacks_by_layer: new_stackup[k] = PadstackLayer(self, k, v.elevation) elif found: new_stackup[k] = self._padstacks_by_layer[k] self._padstacks_by_layer = new_stackup return True @pyaedt_function_handler() def set_stop_layer(self, layer): """Set the stop layer to a specified value. Parameters ---------- layer : str Layer name. Returns ------- bool "True`` when successful, ``False`` when failed. """ found = False new_stackup = OrderedDict({}) for k in list(self._stackup.stackup_layers.keys()): if k == layer: found = True if not found and k in list(self._padstacks_by_layer.keys()): new_stackup[k] = self._padstacks_by_layer[k] self._padstacks_by_layer = new_stackup @pyaedt_function_handler() def add_via(self, position_x=0, position_y=0, instance_name=None, reference_system=None): """Insert a new via on this padstack. Parameters ---------- position_x : float, optional Center x position. The default is ``0``. position_y : float, optional Center y position. The default is ``0``. instance_name : str, optional Via name. The default is ``None``. reference_system : str, optional Whether to use an existing reference system or create a new one. The default is ``None``, in which case a new reference system is created. Returns ------- :class:`pyaedt.modeler.Object3d.Object3d` Object created. """ if not instance_name: instance_name = generate_unique_name("{}_".format(self.name), n=3) if reference_system: self._app.modeler.set_working_coordinate_system(reference_system) self._reference_system = reference_system else: self._app.modeler.create_coordinate_system( origin=[0, 0, 0], reference_cs="Global", name=instance_name + "_CS" ) self._app.modeler.set_working_coordinate_system(instance_name + "_CS") self._reference_system = instance_name + "_CS" first_el = None cyls = [] for v in list(self._padstacks_by_layer.values()): if not first_el: first_el = v._layer_elevation else: position_x = self._app.modeler._arg_with_dim(position_x) position_y = self._app.modeler._arg_with_dim(position_y) cyls.append( self._app.modeler.create_cylinder( "Z", [position_x, position_y, v._layer_elevation.name], v._pad_radius, v._layer_thickness.name, matname=self._padstacks_material, name=instance_name, numSides=self._num_sides, ) ) if self.plating_ratio < 1: hole = self._app.modeler.create_cylinder( "Z", [position_x, position_y, v._layer_elevation.name], "{}*{}".format(self._app.modeler._arg_with_dim(v._pad_radius), 1 - self.plating_ratio), v._layer_thickness.name, matname=self._padstacks_material, name=instance_name, numSides=self._num_sides, ) cyls[-1].subtract(hole, False) anti = self._app.modeler.create_cylinder( "Z", [position_x, position_y, v._layer_elevation.name], v._antipad_radius, v._layer_thickness.name, matname="air", name=instance_name + "_antipad", ) self._app.modeler.subtract( self._stackup._signal_list + self._stackup._ground_list + self._stackup._dielectric_list, anti, False, ) first_el = v._layer_elevation if len(cyls) > 1: self._app.modeler.unite(cyls) self._vias_objects.append(cyls[0]) cyls[0].group_name = "Vias" self._stackup._vias.append(self) return cyls[0] class Stackup3D(object): """Main Stackup3D Class.""" def __init__(self, application): self._app = application self._layer_name = [] self._layer_position = [] self._dielectric_list = [] self._dielectric_name_list = [] self._ground_list = [] self._ground_name_list = [] self._ground_fill_material = [] self._signal_list = [] self._signal_name_list = [] self._signal_material = [] self._object_list = [] self._vias = [] self._end_of_stackup3D = NamedVariable(self._app, "StackUp_End", "0mm") self._z_position_offset = 0 self._first_layer_position = "layer_1_position" self._shifted_index = 0 self._stackup = OrderedDict({}) self._start_position = NamedVariable(self._app, self._first_layer_position, "0mm") self._dielectric_x_position = NamedVariable(self._app, "dielectric_x_position", "0mm") self._dielectric_y_position = NamedVariable(self._app, "dielectric_y_position", "0mm") self._dielectric_width = NamedVariable(self._app, "dielectric_width", "1000mm") self._dielectric_length = NamedVariable(self._app, "dielectric_length", "1000mm") self._padstacks = [] @property def padstacks(self): """List of padstacks created. Returns ------- List """ return self._padstacks @property def dielectrics(self): """List of dielectrics created. Returns ------- List """ return self._dielectric_list @property def grounds(self): """List of grounds created. Returns ------- List """ return self._ground_list @property def signals(self): """List of signals created. Returns ------- List """ return self._signal_list @property def objects(self): """List of obects created. Returns ------- List """ return self._object_list @property def objects_by_layer(self): """List of padstacks created. Returns ------- List """ objs = {} for obj in self.objects: if objs.get(obj.layer_name, None): objs[obj.layer_name].append(obj) else: objs[obj.layer_name] = [obj] return objs @property def start_position(self): """Variable containing the start position. Returns ------- :class:`pyaedt.modeler.stackup_3d.NamedVariable` """ return self._start_position @start_position.setter def start_position(self, expression): self._start_position.expression = expression @property def dielectric_x_position(self): """Stackup x origin. Returns ------- :class:`pyaedt.modeler.stackup_3d.NamedVariable` Variable object. """ return self._dielectric_x_position @dielectric_x_position.setter def dielectric_x_position(self, expression): self._dielectric_x_position.expression = expression @property def dielectric_y_position(self): """Stackup y origin. Returns ------- :class:`pyaedt.modeler.stackup_3d.NamedVariable` Variable object. """ return self._dielectric_x_position @dielectric_y_position.setter def dielectric_y_position(self, expression): self._dielectric_y_position.expression = expression @property def dielectric_width(self): """Stackup width. Returns ------- :class:`pyaedt.modeler.stackup_3d.NamedVariable` Variable object. """ return self._dielectric_width @dielectric_width.setter def dielectric_width(self, expression): self._dielectric_width.expression = expression @property def dielectric_length(self): """Stackup length. Returns ------- :class:`pyaedt.modeler.stackup_3d.NamedVariable` Variable object. """ return self._dielectric_length @dielectric_length.setter def dielectric_length(self, expression): self._dielectric_length.expression = expression @property def layer_names(self): """List of all layer names. Returns ------- list """ return self._layer_name @property def layer_positions(self): """List of all layer positions. Returns ------- List """ return self._layer_position @property def stackup_layers(self): """Dictionary of all stackup layers. Returns ------- dict """ return self._stackup @property def z_position_offset(self): """Elevation. Returns ------- """ return self._z_position_offset @pyaedt_function_handler() def add_padstack(self, name, material="copper"): """Add a new padstack definition. Parameters ---------- name : str padstack name material : str, optional Padstack material. The default is ``"copper"``. Returns ------- :class:`pyaedt.modeler.stackup_3d.Padstack` """ p = Padstack(self._app, self, name, material) self._padstacks.append(p) return p @pyaedt_function_handler() def add_layer(self, name, layer_type="S", material="copper", thickness=0.035, fill_material="FR4_epoxy"): """Add a new layer to the stackup. The new layer can be a signal (S), ground (G), or dielectric (D). The layer is entirely filled with the specified fill material. Anything will be drawn wmaterial. Parameters ---------- name : str Layer name. layer_type : str, optional Layer type. Options are ``"S"``, ``"D"``, and ``"G"``. The default is ``"S"``. material : str, optional Material name. The default is ``"copper"``. The material will be parametrized. thickness : float, optional Thickness value. The default is ``0.035``. The thickness will be parametrized. fill_material : str, optional Fill material name. The default is ``"FR4_epoxy"``. The fill material will be parametrized. This parameter is not valid for dielectrics. Returns ------- :class:`pyaedt.modeler.stackup_3d.Layer3D` Layer object. """ self._shifted_index += 1 if not layer_type: raise ValueError("Layer type has to be an S, D, or G string.") self._layer_name.append(name) lay = Layer3D( stackup=self, app=self._app, name=name, layer_type=layer_type, material=material, thickness=thickness, fill_material=fill_material, index=self._shifted_index, ) self._layer_position_manager(lay) if layer_type == "D": self._dielectric_list.extend(lay._obj_3d) self._dielectric_name_list.append(lay._name) lay._obj_3d[-1].transparency = "0.8" elif layer_type == "G": self._ground_list.extend(lay._obj_3d) self._ground_name_list.append(lay._name) self._ground_fill_material.append(lay._fill_material) lay._obj_3d[-1].transparency = "0.6" lay._obj_3d[-1].color = (255, 0, 0) elif layer_type == "S": self._signal_list.extend(lay._obj_3d) self._signal_name_list.append(lay._name) self._signal_material.append(lay._material_name) # With the function _layer_position_manager i think this part is not needed anymore or has to be reworked lay._obj_3d[-1].transparency = "0.8" self._stackup[lay._name] = lay return lay @pyaedt_function_handler() def add_signal_layer(self, name, material="copper", thickness=0.035, fill_material="FR4_epoxy"): """Add a new ground layer to the stackup. A signal layer is positive. The layer is entirely filled with the fill material. Anything will be drawn wmaterial. Parameters ---------- name : str Layer name. material : str Material name. Material will be parametrized. thickness : float Thickness value. Thickness will be parametrized. fill_material : str Fill Material name. Material will be parametrized.= material : str, optional Material name. Material will be parametrized. Default value is `"copper"`. thickness : float, optional Thickness value. Thickness will be parametrized. Default value is `0.035`. fill_material : str, optional Fill material name. Material will be parametrized. Default value is `"FR4_epoxy"`. Returns ------- :class:`pyaedt.modeler.stackup_3d.Layer3D` Layer object. """ return self.add_layer( name=name, layer_type="S", material=material, thickness=thickness, fill_material=fill_material ) @pyaedt_function_handler() def add_dielectric_layer( self, name, material="FR4_epoxy", thickness=0.035, ): """Add a new dielectric layer to the stackup. Parameters ---------- name : str Layer name. material : str Material name. The default is ``"FR4_epoxy"``. The material will be parametrized. thickness : float, optional Thickness value. The default is ``0.035``. The thickness will be parametrized. Returns ------- :class:`pyaedt.modeler.stackup_3d.Layer3D` Layer 0bject. """ return self.add_layer(name=name, layer_type="D", material=material, thickness=thickness, fill_material=None) @pyaedt_function_handler() def add_ground_layer(self, name, material="copper", thickness=0.035, fill_material="air"): """Add a new ground layer to the stackup. A ground layer is negative. The layer is entirely filled with metal. Any polygon will draw a void in it. Parameters ---------- name : str Layer name. material : str Material name. Material will be parametrized. thickness : float Thickness value. Thickness will be parametrized. fill_material : str Fill Material name. Material will be parametrized. Returns ------- :class:`pyaedt.modeler.stackup_3d.Layer3D` Layer Object. """ return self.add_layer( name=name, layer_type="G", material=material, thickness=thickness, fill_material=fill_material ) @pyaedt_function_handler() def _layer_position_manager(self, layer): """ Parameters ---------- layer Returns ------- """ previous_layer_end = self._end_of_stackup3D.expression layer.elevation.expression = previous_layer_end if layer.thickness: self._end_of_stackup3D.expression = layer.elevation.name + " + " + layer.thickness.name else: self._end_of_stackup3D.expression = layer.elevation.name # if we call this function instantiation of the Layer, the first call, previous_layer_end is "0mm", and # layer.position.expression is also "0mm" and self._end_of_stackup becomes the first layer.position + thickness # if it has thickness, and so the second call, previous_layer_end is the previous layer position + thickness # so the current layer position is the previous_layer_end and the end_of_stackup is the current layer position + # thickness, and we just need to call this function after the construction of a layer3D. @pyaedt_function_handler() def resize(self, percentage_offset): """Resize the stackup around objects created by a percentage offset. Parameters ---------- percentage_offset : float Offset of resize. The value must be greater than 0. Returns ------- bool """ list_of_2d_points = [] list_of_x_coordinates = [] list_of_y_coordinates = [] for obj3d in self._object_list: points_list_by_object = obj3d.points_on_layer list_of_2d_points = points_list_by_object + list_of_2d_points for via in self._vias: for v in via._vias_objects: list_of_x_coordinates.append(v.bounding_box[0] - v.bounding_dimension[0]) list_of_x_coordinates.append(v.bounding_box[3] - v.bounding_dimension[0]) list_of_y_coordinates.append(v.bounding_box[1] - v.bounding_dimension[1]) list_of_y_coordinates.append(v.bounding_box[4] - v.bounding_dimension[1]) list_of_x_coordinates.append(v.bounding_box[0] + v.bounding_dimension[0]) list_of_x_coordinates.append(v.bounding_box[4] + v.bounding_dimension[0]) list_of_y_coordinates.append(v.bounding_box[4] + v.bounding_dimension[1]) list_of_y_coordinates.append(v.bounding_box[1] + v.bounding_dimension[1]) for point in list_of_2d_points: list_of_x_coordinates.append(point[0]) list_of_y_coordinates.append(point[1]) maximum_x = max(list_of_x_coordinates) minimum_x = min(list_of_x_coordinates) maximum_y = max(list_of_y_coordinates) minimum_y = min(list_of_y_coordinates) variation_x = abs(maximum_x - minimum_x) variation_y = abs(maximum_y - minimum_y) self._app["dielectric_x_position"] = str(minimum_x - variation_x * percentage_offset / 100) + "mm" self._app["dielectric_y_position"] = str(minimum_y - variation_y * percentage_offset / 100) + "mm" self._app["dielectric_length"] = str(maximum_x - minimum_x + 2 * variation_x * percentage_offset / 100) + "mm" self._app["dielectric_width"] = str(maximum_y - minimum_y + 2 * variation_y * percentage_offset / 100) + "mm" return True def resize_around_element(self, element, percentage_offset=0.25): """Resize the stackup around objects and make it parametrize. Parameters ---------- element : :class:`pyaedt.modeler.stackup_3d.Patch Element around which the resizing is done. percentage_offset : float, optional Offset of resize. Value accepted are greater than 0. O.25 by default. Returns ------- bool """ self._app["dielectric_x_position"] = ( element.position_x.name + " - " + element.length.name + " * " + str(percentage_offset) ) self._app["dielectric_y_position"] = ( element.position_y.name + " - " + element.width.name + " * (0.5 + " + str(percentage_offset) + ")" ) self._app["dielectric_length"] = element.length.name + " * (1 + " + str(percentage_offset) + " * 2)" self._app["dielectric_width"] = element.width.name + " * (1 + " + str(percentage_offset) + " * 2)" return True class CommonObject(object): """CommonObject Class in Stackup3D.""" def __init__(self, application): self._application = application self._name = None self._dielectric_layer = None self._signal_layer = None self._aedt_object = None self._layer_name = None self._layer_number = None self._material_name = None self._reference_system = None @property def reference_system(self): """Coordinate system of the object. Returns ------- str """ return self._reference_system @property def dielectric_layer(self): """Dielectric layer that the object belongs to. Returns ------- :class:`pyaedt.modeler.stackup_3d.Layer3D` """ return self._dielectric_layer @property def signal_layer(self): """Signal layer that the object belongs to. Returns ------- :class:`pyaedt.modeler.stackup_3d.Layer3D` """ return self._signal_layer @property def name(self): """Object name. Returns ------- str """ return self._name @property def application(self): """App object.""" return self._application @property def aedt_object(self): """PyAEDT object 3D. Returns ------- :class:`pyaedt.modeler.Object3d.Object3d` """ return self._aedt_object @property def layer_name(self): """Layer name. Returns ------- str """ return self._layer_name @property def layer_number(self): """Layer ID. Returns ------- int """ return self._layer_number @property def material_name(self): """Material name. Returns ------- str """ return self._material_name @property def points_on_layer(self): """Object bounding box. Returns ------- List List of [x,y] coordinate of the bounding box. """ bb = self._aedt_object.bounding_box return [[bb[0], bb[1]], [bb[0], bb[4]], [bb[3], bb[4]], [bb[3], bb[1]]] class Patch(CommonObject, object): """Patch Class in Stackup3D.""" def __init__( self, application, frequency, patch_width, signal_layer, dielectric_layer, patch_length=None, patch_position_x=0, patch_position_y=0, patch_name="patch", reference_system=None, axis="X", ): CommonObject.__init__(self, application) self._frequency = NamedVariable(application, patch_name + "_frequency", str(frequency) + "Hz") self._signal_layer = signal_layer self._dielectric_layer = dielectric_layer self._substrate_thickness = dielectric_layer.thickness self._width = NamedVariable(application, patch_name + "_width", application.modeler._arg_with_dim(patch_width)) self._position_x = NamedVariable( application, patch_name + "_position_x", application.modeler._arg_with_dim(patch_position_x) ) self._position_y = NamedVariable( application, patch_name + "_position_y", application.modeler._arg_with_dim(patch_position_y) ) self._position_z = signal_layer.elevation self._dielectric_layer = dielectric_layer self._signal_layer = signal_layer self._dielectric_material = dielectric_layer.material self._material_name = signal_layer.material_name self._layer_name = signal_layer.name self._layer_number = signal_layer.number self._name = patch_name self._patch_thickness = signal_layer.thickness self._application = application self._aedt_object = None try: self._permittivity = NamedVariable( application, patch_name + "_permittivity", float(self._dielectric_material.permittivity.value) ) except ValueError: self._permittivity = NamedVariable( application, patch_name + "_permittivity", float(application.variable_manager[self._dielectric_material.permittivity.value].value), ) if isinstance(patch_length, float) or isinstance(patch_length, int): self._length = NamedVariable( application, patch_name + "_length", application.modeler._arg_with_dim(patch_length) ) self._effective_permittivity = self._effective_permittivity_calcul self._wave_length = self._wave_length_calcul elif patch_length is None: self._effective_permittivity = self._effective_permittivity_calcul self._added_length = self._added_length_calcul self._wave_length = self._wave_length_calcul self._length = self._length_calcul self._impedance_l_w, self._impedance_w_l = self._impedance_calcul if reference_system: application.modeler.set_working_coordinate_system(reference_system) if axis == "X": start_point = [ "{0}_position_x".format(self._name), "{0}_position_y-{0}_width/2".format(self._name), 0, ] else: start_point = [ "{0}_position_x-{0}_width/2".format(self._name), "{}_position_y".format(self._name), 0, ] self._reference_system = reference_system else: application.modeler.create_coordinate_system( origin=[ "{0}_position_x".format(patch_name), "{}_position_y".format(patch_name), signal_layer.elevation.name, ], reference_cs="Global", name=patch_name + "_CS", ) if axis == "X": start_point = [0, "-{}_width/2".format(patch_name), 0] else: start_point = ["-{}_width/2".format(patch_name), 0, 0] application.modeler.set_working_coordinate_system(patch_name + "_CS") self._reference_system = patch_name + "_CS" if signal_layer.thickness: self._aedt_object = application.modeler.create_box( position=start_point, dimensions_list=[ "{}_length".format(patch_name), "{}_width".format(patch_name), signal_layer.thickness.name, ], name=patch_name, matname=signal_layer.material_name, ) else: self._aedt_object = application.modeler.create_rectangle( position=start_point, dimension_list=[self.length.name, self.width.name], name=patch_name, matname=signal_layer.material_name, ) application.assign_coating(self._aedt_object.name, signal_layer.material) application.modeler.set_working_coordinate_system("Global") application.modeler.subtract(blank_list=[signal_layer.name], tool_list=[patch_name], keepOriginals=True) @property def frequency(self): """Model frequency. Returns ------- :class:`pyaedt.modeler.stackup_3d.NamedVariable` Variable Object. """ return self._frequency @property def substrate_thickness(self): """Substrate thickness. Returns ------- :class:`pyaedt.modeler.stackup_3d.NamedVariable` Variable Object. """ return self._substrate_thickness @property def width(self): """Width. Returns ------- :class:`pyaedt.modeler.stackup_3d.NamedVariable` Variable object. """ return self._width @property def position_x(self): """Starting position X. Returns ------- :class:`pyaedt.modeler.stackup_3d.NamedVariable` Variable object. """ return self._position_x @property def position_y(self): """Starting position Y. Returns ------- :class:`pyaedt.modeler.stackup_3d.NamedVariable` Variable object. """ return self._position_y @property def permittivity(self): """Permittivity. Returns ------- :class:`pyaedt.modeler.stackup_3d.NamedVariable` Variable object. """ return self._permittivity @property def _permittivity_calcul(self): """Permittivity calculation. Returns ------- :class:`pyaedt.modeler.stackup_3d.NamedVariable` Variable object. """ self._permittivity = self.application.materials[self._dielectric_material].permittivity return self._permittivity @property def effective_permittivity(self): """Effective permittivity. Returns ------- :class:`pyaedt.modeler.stackup_3d.NamedVariable` Variable object. """ return self._effective_permittivity @property def _effective_permittivity_calcul(self): # "(substrat_permittivity + 1)/2 + (substrat_permittivity - # 1)/(2 * sqrt(1 + 10 * substrate_thickness/patch_width))" er = self._permittivity.name h = self._substrate_thickness.name w = self._width.name patch_eff_permittivity_formula = "(" + er + "+ 1)/2 + (" + er + "- 1)/(2 * sqrt(1 + 10 * " + h + "/" + w + "))" self._effective_permittivity = NamedVariable( self.application, self._name + "_eff_permittivity", patch_eff_permittivity_formula ) return self._effective_permittivity @property def added_length(self): """Added length calculation. Returns ------- :class:`pyaedt.modeler.stackup_3d.NamedVariable` Variable object. """ return self._added_length @property def _added_length_calcul(self): """Added length calculation. Returns ------- :class:`pyaedt.modeler.stackup_3d.NamedVariable` Variable object. """ # "0.412 * substrate_thickness * (patch_eff_permittivity + 0.3) * (patch_width/substrate_thickness + 0.264)" # " / ((patch_eff_permittivity - 0.258) * (patch_width/substrate_thickness + 0.813)) " er_e = self._effective_permittivity.name h = self._substrate_thickness.name w = self._width.name patch_added_length_formula = ( "0.412 * " + h + " * (" + er_e + " + 0.3) * (" + w + "/" + h + " + 0.264)/" "((" + er_e + " - 0.258) * (" + w + "/" + h + " + 0.813))" ) self._added_length = NamedVariable(self.application, self._name + "_added_length", patch_added_length_formula) return self._added_length @property def wave_length(self): """Wave length. Returns ------- :class:`pyaedt.modeler.stackup_3d.NamedVariable` Variable Object. """ return self._wave_length @property def _wave_length_calcul(self): """Wave Length Calutation. Returns ------- :class:`pyaedt.modeler.stackup_3d.NamedVariable` Variable Object. """ # "c0 * 1000/(patch_frequency * sqrt(patch_eff_permittivity))" f = self._frequency.name er_e = self._effective_permittivity.name patch_wave_length_formula = "(c0 * 1000/(" + f + "* sqrt(" + er_e + ")))mm" self._wave_length = NamedVariable( self.application, self._name + "_wave_length", self.application.modeler._arg_with_dim(patch_wave_length_formula), ) return self._wave_length @property def length(self): """Length. Returns ------- :class:`pyaedt.modeler.stackup_3d.NamedVariable` Variable Object. """ return self._length @property def _length_calcul(self): """Length Calutation. Returns ------- :class:`pyaedt.modeler.stackup_3d.NamedVariable` Variable Object. """ # "patch_wave_length / 2 - 2 * patch_added_length" d_l = self._added_length.name lbd = self._wave_length.name patch_length_formula = lbd + "/2" + " - 2 * " + d_l self._length = NamedVariable(self.application, self._name + "_length", patch_length_formula) return self._length @property def impedance(self): """Impedance. Returns ------- :class:`pyaedt.modeler.stackup_3d.NamedVariable` Variable Object. """ return self._impedance_l_w, self._impedance_w_l @property def _impedance_calcul(self): """Impedance Calculation. Returns ------- :class:`pyaedt.modeler.stackup_3d.NamedVariable` Variable Object. """ # "45 * (patch_wave_length/patch_width * sqrt(patch_eff_permittivity)) ** 2" # "60 * patch_wave_length/patch_width * sqrt(patch_eff_permittivity)" er_e = self._effective_permittivity.name lbd = self._wave_length.name w = self._width.name patch_impedance_formula_l_w = "45 * (" + lbd + "/" + w + "* sqrt(" + er_e + ")) ** 2" patch_impedance_formula_w_l = "60 * " + lbd + "/" + w + "* sqrt(" + er_e + ")" self._impedance_l_w = NamedVariable( self.application, self._name + "_impedance_l_w", patch_impedance_formula_l_w ) self._impedance_w_l = NamedVariable( self.application, self._name + "_impedance_w_l", patch_impedance_formula_w_l ) self.application.logger.warning( "The closer the ratio between wave length and the width is to 1," " the less correct the impedance calculation is" ) return self._impedance_l_w, self._impedance_w_l def create_lumped_port(self, reference_layer, opposite_side=False, port_name=None, axisdir=None): """Create a parametrized lumped port. Parameters ---------- reference_layer : class:`pyaedt.modeler.stackup_3d.Layer3D The reference layer, in most cases the ground layer. opposite_side : bool, optional Change the side where the port is created. port_name : str, optional Name of the lumped port. axisdir : int or :class:`pyaedt.application.Analysis.Analysis.AxisDir`, optional Position of the port. It should be one of the values for ``Application.AxisDir``, which are: ``XNeg``, ``YNeg``, ``ZNeg``, ``XPos``, ``YPos``, and ``ZPos``. The default is ``Application.AxisDir.XNeg``. Returns ------- bool """ string_position_x = self.position_x.name if opposite_side: string_position_x = self.position_x.name + " + " + self.length.name string_position_y = self.position_y.name + " - " + self.width.name + "/2" string_position_z = reference_layer.elevation.name string_width = self.width.name string_length = ( self._signal_layer.elevation.name + " + " + self._signal_layer.thickness.name + " - " + reference_layer.elevation.name ) port = self.application.modeler.create_rectangle( csPlane=constants.PLANE.YZ, position=[string_position_x, string_position_y, string_position_z], dimension_list=[string_width, string_length], name=self.name + "_port", matname=None, ) if self.application.solution_type == "Modal": if axisdir is None: axisdir = self.application.AxisDir.ZPos port = self.application.create_lumped_port_to_sheet(port.name, portname=port_name, axisdir=axisdir) elif self.application.solution_type == "Terminal": port = self.application.create_lumped_port_to_sheet( port.name, portname=port_name, reference_object_list=[reference_layer.name] ) return port class Trace(CommonObject, object): """Provides a class to create a trace in stackup.""" def __init__( self, application, frequency, line_impedance, line_width, signal_layer, dielectric_layer, line_length=None, line_electrical_length=90, line_position_x=0, line_position_y=0, line_name="line", reference_system=None, axis="X", ): CommonObject.__init__(self, application) self._frequency = NamedVariable(application, line_name + "_frequency", str(frequency) + "Hz") self._signal_layer = signal_layer self._dielectric_layer = dielectric_layer self._substrate_thickness = dielectric_layer.thickness self._position_x = NamedVariable( application, line_name + "_position_x", application.modeler._arg_with_dim(line_position_x) ) self._position_y = NamedVariable( application, line_name + "_position_y", application.modeler._arg_with_dim(line_position_y) ) self._position_z = signal_layer.elevation self._dielectric_material = dielectric_layer.material self._material_name = signal_layer.material_name self._layer_name = signal_layer.name self._layer_number = signal_layer.number self._name = line_name self._line_thickness = signal_layer.thickness self._width = None self._width_h_w = None self._axis = axis try: self._permittivity = NamedVariable( application, line_name + "_permittivity", float(self._dielectric_material.permittivity.value) ) except ValueError: self._permittivity = NamedVariable( application, line_name + "_permittivity", float(application.variable_manager[self._dielectric_material.permittivity.value].value), ) if isinstance(line_width, float) or isinstance(line_width, int): self._width = NamedVariable( application, line_name + "_width", application.modeler._arg_with_dim(line_width) ) self._effective_permittivity = self._effective_permittivity_calcul self._wave_length = self._wave_length_calcul self._added_length = self._added_length_calcul if isinstance(line_electrical_length, float) or isinstance(line_electrical_length, int): self._electrical_length = NamedVariable( application, line_name + "_elec_length", str(line_electrical_length) ) self._length = self._length_calcul elif isinstance(line_length, float) or isinstance(line_length, int): self._length = NamedVariable( application, line_name + "_length", application.modeler._arg_with_dim(line_length) ) self._electrical_length = self._electrical_length_calcul else: application.logger.error("line_length must be a float.") self._charac_impedance_w_h, self._charac_impedance_h_w = self._charac_impedance_calcul elif line_width is None: self._charac_impedance = NamedVariable( self.application, line_name + "_charac_impedance_h_w", str(line_impedance) ) self._width, self._width_h_w = self._width_calcul self._effective_permittivity = self._effective_permittivity_calcul self._wave_length = self._wave_length_calcul self._added_length = self._added_length_calcul if isinstance(line_electrical_length, float) or isinstance(line_electrical_length, int): self._electrical_length = NamedVariable( application, line_name + "_elec_length", str(line_electrical_length) ) self._length = self._length_calcul elif isinstance(line_length, float) or isinstance(line_length, int): self._length = NamedVariable( application, line_name + "_length", application.modeler._arg_with_dim(line_length) ) self._electrical_length = self._electrical_length_calcul else: application.logger.error("line_length must be a float.") if reference_system: application.modeler.set_working_coordinate_system(reference_system) if axis == "X": start_point = [ "{0}_position_x".format(self._name), "{0}_position_y-{0}_width/2".format(self._name), 0, ] else: start_point = [ "{0}_position_x-{0}_width/2".format(self._name), "{}_position_y".format(self._name), 0, ] self._reference_system = reference_system else: application.modeler.create_coordinate_system( origin=[ "{}_position_x".format(self._name), "{}_position_y".format(self._name), signal_layer.elevation.name, ], reference_cs="Global", name=line_name + "_CS", ) application.modeler.set_working_coordinate_system(line_name + "_CS") if axis == "X": start_point = [0, "-{0}_width/2".format(self._name), 0] else: start_point = ["-{0}_width/2".format(self._name), 0, 0] self._reference_system = line_name + "_CS" if signal_layer.thickness: self._aedt_object = application.modeler.create_box( position=start_point, dimensions_list=[ "{}_length".format(self._name), "{}_width".format(self._name), signal_layer.thickness.name, ], name=line_name, matname=signal_layer.material_name, ) else: self._aedt_object = application.modeler.create_rectangle( position=start_point, dimension_list=["{}_length".format(self._name), "{}_width".format(self._name)], name=line_name, matname=signal_layer.material_name, ) application.modeler.set_working_coordinate_system("Global") application.modeler.subtract(blank_list=[signal_layer.name], tool_list=[line_name], keepOriginals=True) @property def frequency(self): """Frequency. Returns ------- :class:`pyaedt.modeler.stackup_3d.NamedVariable` Variable Object. """ return self._frequency @property def substrate_thickness(self): """Substrate Thickness. Returns ------- :class:`pyaedt.modeler.stackup_3d.NamedVariable` Variable Object. """ return self._substrate_thickness @property def width(self): """Width. Returns ------- :class:`pyaedt.modeler.stackup_3d.NamedVariable` Variable Object. """ return self._width @property def width_h_w(self): """Width H W. Returns ------- :class:`pyaedt.modeler.stackup_3d.NamedVariable` Variable Object. """ if self._width_h_w is not None: return self._width_h_w @property def _width_calcul(self): """Width calculation. Returns ------- :class:`pyaedt.modeler.stackup_3d.NamedVariable` Variable Object. """ # if w/h < 2 : # a = z * sqrt((er + 1) / 2) / 60 + (0.23 + 0.11 / er) * (er - 1) / (er + 1) # w/h = 8 * exp(a) / (exp(2 * a) - 2) # else w/h > 2 : # b = 377 * pi / (2 * z * sqrt(er)) # w/h = 2 * (b - 1 - log(2 * b - 1) * (er - 1) * (log(b - 1) + 0.39 - 0.61 / er) / (2 * er)) / pi h = self._substrate_thickness.name z = self._charac_impedance.name er = self._permittivity.name a_formula = ( "(" + z + " * sqrt((" + er + " + 1)/2)/60 + (0.23 + 0.11/" + er + ")" + " * (" + er + "- 1)/(" + er + "+ 1))" ) w_div_by_h_inf_2 = "(8 * exp(" + a_formula + ")/(exp(2 * " + a_formula + ") - 2))" b_formula = "(377 * pi/(2 * " + z + " * " + "sqrt(" + er + ")))" w_div_by_h_sup_2 = ( "(2 * (" + b_formula + " - 1 - log(2 * " + b_formula + " - 1) * (" + er + " - 1) * (log(" + b_formula + " - 1) + 0.39 - 0.61/" + er + ")/(2 * " + er + "))/pi)" ) w_formula_inf = w_div_by_h_inf_2 + " * " + h w_formula_sup = w_div_by_h_sup_2 + " * " + h self._width_h_w = NamedVariable(self.application, self._name + "_width_h_w", w_formula_inf) self._width = NamedVariable(self.application, self._name + "_width", w_formula_sup) return self._width, self._width_h_w @property def position_x(self): """Starting Position X. Returns ------- :class:`pyaedt.modeler.stackup_3d.NamedVariable` Variable Object. """ return self._position_x @property def position_y(self): """Starting Position Y. Returns ------- :class:`pyaedt.modeler.stackup_3d.NamedVariable` Variable Object. """ return self._position_y @property def permittivity(self): """Permittivity. Returns ------- :class:`pyaedt.modeler.stackup_3d.NamedVariable` Variable Object. """ return self._permittivity @property def _permittivity_calcul(self): """Permittivity Calutation. Returns ------- :class:`pyaedt.modeler.stackup_3d.NamedVariable` Variable Object. """ self._permittivity = self.application.materials[self._dielectric_material].permittivity return self._permittivity @property def added_length(self): """Added Length Calutation. Returns ------- :class:`pyaedt.modeler.stackup_3d.NamedVariable` Variable Object. """ return self._added_length @property def _added_length_calcul(self): """Added Length Calutation. Returns ------- :class:`pyaedt.modeler.stackup_3d.NamedVariable` Variable Object. """ # "0.412 * substrate_thickness * (patch_eff_permittivity + 0.3) * (patch_width/substrate_thickness + 0.264)" # " / ((patch_eff_permittivity - 0.258) * (patch_width/substrate_thickness + 0.813)) " er_e = self._effective_permittivity.name h = self._substrate_thickness.name w = self._width.name patch_added_length_formula = ( "0.412 * " + h + " * (" + er_e + " + 0.3) * (" + w + "/" + h + " + 0.264)/" "((" + er_e + " - 0.258) * (" + w + "/" + h + " + 0.813))" ) self._added_length = NamedVariable(self.application, self._name + "_added_length", patch_added_length_formula) return self._added_length @property def length(self): """Length. Returns ------- :class:`pyaedt.modeler.stackup_3d.NamedVariable` Variable Object. """ return self._length @property def _length_calcul(self): """Length Calutation. Returns ------- :class:`pyaedt.modeler.stackup_3d.NamedVariable` Variable Object. """ # "patch_wave_length / 2 - 2 * patch_added_length" d_l = self._added_length.name lbd = self._wave_length.name e_l = self._electrical_length.name line_length_formula = lbd + "* (" + e_l + "/360)" + " - 2 * " + d_l self._length = NamedVariable(self.application, self._name + "_length", line_length_formula) return self._length @property def charac_impedance(self): """Characteristic Impedance. Returns ------- :class:`pyaedt.modeler.stackup_3d.NamedVariable` Variable Object. """ return self._charac_impedance @property def _charac_impedance_calcul(self): """Characteristic Impedance Calutation. Returns ------- :class:`pyaedt.modeler.stackup_3d.NamedVariable` Variable Object. """ # if w / h > 1: 60 * log(8 * h / w + w / (4 * h)) / sqrt(er_e) # if w / h < 1: 120 * pi / (sqrt(er_e) * (w / h + 1.393 + 0.667 * log(w / h + 1.444))) w = self._width.name h = self._dielectric_layer.thickness.name er_e = self.effective_permittivity.name charac_impedance_formula_w_h = ( "60 * log(8 * " + h + "/" + w + " + " + w + "/(4 * " + h + "))/sqrt(" + er_e + ")" ) charac_impedance_formula_h_w = ( "120 * pi / (sqrt(" + er_e + ") * (" + w + "/" + h + "+ 1.393 + 0.667 * log(" + w + "/" + h + " + 1.444)))" ) self._charac_impedance_w_h = NamedVariable( self.application, self._name + "_charac_impedance_w_h", charac_impedance_formula_w_h ) self._charac_impedance_h_w = NamedVariable( self.application, self._name + "_charac_impedance_h_w", charac_impedance_formula_h_w ) return self._charac_impedance_w_h, self._charac_impedance_h_w @property def effective_permittivity(self): """Effective Permittivity. Returns ------- :class:`pyaedt.modeler.stackup_3d.NamedVariable` Variable Object. """ return self._effective_permittivity @property def _effective_permittivity_calcul(self): """Effective Permittivity Calutation. Returns ------- :class:`pyaedt.modeler.stackup_3d.NamedVariable` Variable Object. """ # "(substrat_permittivity + 1)/2 + # (substrat_permittivity - 1)/(2 * sqrt(1 + 10 * substrate_thickness/patch_width))" er = self._permittivity.name h = self._substrate_thickness.name w = self._width.name patch_eff_permittivity_formula = ( "(" + er + " + 1)/2 + (" + er + " - 1)/(2 * sqrt(1 + 10 * " + h + "/" + w + "))" ) self._effective_permittivity = NamedVariable( self.application, self._name + "_eff_permittivity", patch_eff_permittivity_formula ) return self._effective_permittivity @property def wave_length(self): """Wave Length. Returns ------- :class:`pyaedt.modeler.stackup_3d.NamedVariable` Variable Object. """ return self._wave_length @property def _wave_length_calcul(self): """Wave Length Calutation. Returns ------- :class:`pyaedt.modeler.stackup_3d.NamedVariable` Variable Object. """ # "c0 * 1000/(patch_frequency * sqrt(patch_eff_permittivity))" # TODO it is currently only available for mm f = self._frequency.name er_e = self._effective_permittivity.name patch_wave_length_formula = "(c0 * 1000/(" + f + "* sqrt(" + er_e + ")))mm" self._wave_length = NamedVariable( self.application, self._name + "_wave_length", self.application.modeler._arg_with_dim(patch_wave_length_formula), ) return self._wave_length @property def electrical_length(self): """Electrical Length. Returns ------- :class:`pyaedt.modeler.stackup_3d.NamedVariable` Variable Object. """ return self._electrical_length @property def _electrical_length_calcul(self): """Electrical Length calculation. Returns ------- :class:`pyaedt.modeler.stackup_3d.NamedVariable` Variable Object. """ lbd = self._wave_length.name length = self._length.name d_l = self._added_length.name elec_length_formula = "360 * (" + length + " + 2 * " + d_l + ")/" + lbd self._electrical_length = NamedVariable(self.application, self._name + "_elec_length", elec_length_formula) return self._electrical_length @pyaedt_function_handler() def create_lumped_port(self, reference_layer_name, change_side=False): """Create a lumped port on the specified line. Parameters ---------- reference_layer_name : str Name of the layer on which attach the reference. change_side : bool, optional Either if apply the port on one direction or the opposite. Default it is on Positive side. Returns ------- :class:`pyaedt.modules.Boundary.BoundaryObject` Boundary object. """ if self._axis == "X": if change_side: axisdir = self.application.AxisDir.XNeg else: axisdir = self.application.AxisDir.XPos else: if change_side: axisdir = self.application.AxisDir.YNeg else: axisdir = self.application.AxisDir.YPos p1 = self.application.create_lumped_port_between_objects( reference_layer_name, self.aedt_object.name, axisdir=axisdir ) z_elev = "" start_count = False for k, v in self._signal_layer._stackup.stackup_layers.items(): if k == reference_layer_name or k == self._signal_layer.name: if not start_count: start_count = True else: start_count = False elif start_count: z_elev += "-" + v.thickness.name self.application.modeler.oeditor.ChangeProperty( [ "NAME:AllTabs", [ "NAME:Geometry3DCmdTab", ["NAME:PropServers", self._name + ":Move:1"], ["NAME:ChangedProps", ["NAME:Move Vector", "X:=", "0mm", "Y:=", "0mm", "Z:=", z_elev]], ], ] ) return p1 class Polygon(CommonObject, object): """Polygon Class in Stackup3D.""" def __init__( self, application, point_list, thickness, signal_layer_name, poly_name="poly", mat_name="copper", is_void=False, reference_system=None, ): CommonObject.__init__(self, application) self._is_void = is_void self._layer_name = signal_layer_name self._app = application pts = [] for el in point_list: pts.append( [ application.modeler._arg_with_dim(el[0]), application.modeler._arg_with_dim(el[1]), "layer_" + str(signal_layer_name) + "_position", ] ) if reference_system: application.modeler.set_working_coordinate_system(reference_system) self._reference_system = reference_system else: application.modeler.create_coordinate_system( origin=[0, 0, 0], reference_cs="Global", name=poly_name + "_CS" ) application.modeler.set_working_coordinate_system(poly_name + "_CS") self._reference_system = poly_name + "_CS" self._aedt_object = application.modeler.create_polyline( position_list=pts, name=poly_name, matname=mat_name, cover_surface=True ) if thickness: if isinstance(thickness, (float, int)): application.modeler.sweep_along_vector(self._aedt_object, [0, 0, thickness], draft_type="Natural") else: application.modeler.sweep_along_vector(self._aedt_object, [0, 0, thickness.name], draft_type="Natural") application.modeler.set_working_coordinate_system("Global") @property def points_on_layer(self): """Object Bounding Box. Returns ------- List List of [x,y] coordinate of bounding box. """ bb = self._aedt_object.bounding_box return [[bb[0], bb[1]], [bb[0], bb[4]], [bb[3], bb[4]], [bb[3], bb[1]]] class MachineLearningPatch(Patch, object): """MachineLearningPatch Class in Stackup3D.""" def __init__( self, application, frequency, patch_width, signal_layer, dielectric_layer, patch_position_x=0, patch_position_y=0, patch_name="patch", reference_system=None, axis="X", ): Patch.__init__( self, application, frequency, patch_width, signal_layer, dielectric_layer, patch_length=None, patch_position_x=patch_position_x, patch_position_y=patch_position_y, patch_name=patch_name, reference_system=reference_system, axis=axis, ) if not is_ironpython: try: joblib except NameError: # pragma: no cover raise ImportError("joblib package is needed to run ML.") path_file = os.path.dirname(__file__) path_folder = os.path.split(path_file)[0] training_file = os.path.join(path_folder, "misc", "patch_svr_model_100MHz_1GHz.joblib") model = joblib.load(training_file) list_for_array = [ [ self.frequency.numeric_value, self.width.numeric_value, self._permittivity.numeric_value, self.dielectric_layer.thickness.numeric_value, ] ] array_for_prediction = np.array(list_for_array, dtype=np.float32) length = model.predict(array_for_prediction)[0] self.length.expression = application.modeler._arg_with_dim(length) else: # pragma: no cover self.application.logger.warning("Machine learning algorithm aren't covered in IronPython.")
33.91604
119
0.574768
8,956
85,638
5.197298
0.058062
0.019851
0.023202
0.026941
0.638344
0.566438
0.513352
0.46843
0.441124
0.414248
0
0.010979
0.324646
85,638
2,524
120
33.929477
0.793831
0.222506
0
0.480932
0
0
0.055673
0.009202
0.000706
0
0
0.000396
0
1
0.088983
false
0.001412
0.008475
0.000706
0.184322
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
bd47557928bc51ca7d2e89e0a88949b5b7b0aaa5
1,511
py
Python
data/train/python/bd47557928bc51ca7d2e89e0a88949b5b7b0aaa5urls.py
harshp8l/deep-learning-lang-detection
2a54293181c1c2b1a2b840ddee4d4d80177efb33
[ "MIT" ]
84
2017-10-25T15:49:21.000Z
2021-11-28T21:25:54.000Z
data/train/python/bd47557928bc51ca7d2e89e0a88949b5b7b0aaa5urls.py
vassalos/deep-learning-lang-detection
cbb00b3e81bed3a64553f9c6aa6138b2511e544e
[ "MIT" ]
5
2018-03-29T11:50:46.000Z
2021-04-26T13:33:18.000Z
data/train/python/bd47557928bc51ca7d2e89e0a88949b5b7b0aaa5urls.py
vassalos/deep-learning-lang-detection
cbb00b3e81bed3a64553f9c6aa6138b2511e544e
[ "MIT" ]
24
2017-11-22T08:31:00.000Z
2022-03-27T01:22:31.000Z
from django.conf.urls.defaults import * urlpatterns = patterns('clwmail.admin.views', (r'user/manage/page/(?P<page_num>\d{1,})/$' ,'usermanage'), (r'user/manage/page/$' ,'usermanage'), (r'user/add/$' ,'useradd'), (r'user/(?P<userid>.*)/domain/(?P<domain>.*)/edit/$' ,'useredit'), (r'user/(?P<userid>.*)/domain/(?P<domain>.*)/hide/$' ,'userhide'), (r'user/(?P<userid>.*)/domain/(?P<domain>.*)/unhide/$' ,'userunhide'), (r'group/manage/$' ,'groupmanage'), (r'group/manage/page/(?P<page_num>\d{1,})/$' ,'groupmanage'), (r'group/(?P<alias>.*)/domain/(?P<domain>.*)/edit/$' ,'groupedit'), (r'group/(?P<alias>.*)/domain/(?P<domain>.*)/delete/$' ,'groupdelete'), (r'group/add/$' ,'groupadd'), (r'domain/(?P<domain_name>.*)/userget/$' ,'getaliasusers'), (r'domain/manage/$' ,'domainmanage'), (r'domain/manage/page/(?P<page_num>\d{1,})/$' ,'domainmanage'), (r'domain/(?P<domain_name>.*)/edit/$' ,'domainedit'), (r'domain/(?P<domain_name>.*)/delete/$' ,'domaindelete'), (r'domain/add/$' ,'domainadd'), (r'genpass/$' ,'genpass'), (r'' ,'usermanage'), )
65.695652
77
0.420913
137
1,511
4.59854
0.328467
0.088889
0.165079
0.071429
0.379365
0.293651
0.293651
0
0
0
0
0.003012
0.340834
1,511
22
78
68.681818
0.629518
0
0
0
0
0
0.505625
0.309729
0
0
0
0
0
1
0
false
0.045455
0.045455
0
0.045455
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
bd49a1d92154f5da9b36b624b1f7c5c860a48554
346
py
Python
remove_duplicates_from_sorted_array.py
lutianming/leetcode
848c7470ff5fd23608cc954be23732f60488ed8a
[ "MIT" ]
null
null
null
remove_duplicates_from_sorted_array.py
lutianming/leetcode
848c7470ff5fd23608cc954be23732f60488ed8a
[ "MIT" ]
null
null
null
remove_duplicates_from_sorted_array.py
lutianming/leetcode
848c7470ff5fd23608cc954be23732f60488ed8a
[ "MIT" ]
null
null
null
class Solution: # @param a list of integers # @return an integer def removeDuplicates(self, A): length = len(A) if length <= 1: return length index = 1 for i in range(1, length): if A[i] != A[i-1]: A[index] = A[i] index += 1 return index
24.714286
34
0.459538
44
346
3.613636
0.5
0.037736
0
0
0
0
0
0
0
0
0
0.025773
0.439306
346
13
35
26.615385
0.793814
0.127168
0
0
0
0
0
0
0
0
0
0
0
1
0.090909
false
0
0
0
0.363636
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
bd49d7f152ceeb7bc9bb00c813b8cb8af0d1c6dc
3,704
py
Python
visan/plot/datasetattributespanel.py
ercumentaksoy/visan
57c9257d80622fc0ab03591db48cc2155bd12f1b
[ "MIT", "BSD-3-Clause" ]
7
2020-04-09T05:21:03.000Z
2022-01-23T18:39:02.000Z
visan/plot/datasetattributespanel.py
ercumentaksoy/visan
57c9257d80622fc0ab03591db48cc2155bd12f1b
[ "MIT", "BSD-3-Clause" ]
7
2020-01-05T19:19:20.000Z
2020-05-27T09:41:49.000Z
visan/plot/datasetattributespanel.py
ercumentaksoy/visan
57c9257d80622fc0ab03591db48cc2155bd12f1b
[ "MIT", "BSD-3-Clause" ]
4
2020-04-18T14:11:22.000Z
2021-11-10T02:27:49.000Z
# Copyright (C) 2002-2021 S[&]T, The Netherlands. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. import wx def _isList(obj): try: iter(obj) except Exception: return False else: try: import numpy if isinstance(obj, numpy.ndarray): return True except Exception: pass try: obj + '' except Exception: return True else: return False class DataSetAttributesPanel(wx.Panel): def __init__(self, parent): panelstyle = wx.TAB_TRAVERSAL if wx.Platform == '__WXGTK__': panelstyle |= wx.SUNKEN_BORDER wx.Panel.__init__(self, parent, -1, style=panelstyle) # Create and configure all widgets self.CreateControls() self.CreateLayout() def CreateControls(self): # Create the two column list for showing attributes self.attributeList = wx.ListCtrl(self, -1, style=(wx.LC_REPORT | wx.LC_NO_HEADER | wx.LC_VRULES), size=(100, -1)) self.attributeList.InsertColumn(0, "attribute") self.attributeList.InsertColumn(1, "value") def CreateLayout(self): sizer = wx.BoxSizer(wx.HORIZONTAL) sizer.Add(self.attributeList, 1, wx.EXPAND) self.SetSizer(sizer) def UpdateAttributes(self, attributes, keyframe): self.attributeList.DeleteAllItems() keys = sorted(attributes.keys()) for key in keys: value = attributes[key] if _isList(value): # try to see if we can use a keyframe index for the value try: value = value[keyframe] except IndexError: # if the keyframe is out of range, just use the final value value = value[-1] except Exception: pass self.attributeList.Append([key, value]) self.attributeList.SetColumnWidth(0, wx.LIST_AUTOSIZE) if wx.Platform == '__WXMSW__': self.attributeList.SetColumnWidth(0, self.attributeList.GetColumnWidth(0) + 5) self.attributeList.SetColumnWidth(1, wx.LIST_AUTOSIZE)
38.583333
105
0.660907
450
3,704
5.382222
0.457778
0.07019
0.038398
0.018993
0.07597
0.056152
0.056152
0.056152
0.056152
0.056152
0
0.009605
0.269168
3,704
95
106
38.989474
0.885113
0.455994
0
0.301887
0
0
0.016145
0
0
0
0
0
0
1
0.09434
false
0.037736
0.037736
0
0.226415
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
bd49f05f95bdcec75ece665e2dc35ecf557cf5b9
3,473
py
Python
iscc_registry/observe.py
titusz/iscc-registry
def03f420e671ec470070bb09b6a78099f7827da
[ "MIT" ]
3
2020-07-06T16:01:54.000Z
2020-08-06T11:03:25.000Z
iscc_registry/observe.py
titusz/iscc-registry
def03f420e671ec470070bb09b6a78099f7827da
[ "MIT" ]
null
null
null
iscc_registry/observe.py
titusz/iscc-registry
def03f420e671ec470070bb09b6a78099f7827da
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """Watching for registration events""" import time from dataclasses import dataclass, asdict import iscc_registry from loguru import logger as log import iscc from iscc_registry.conn import db_client from iscc_registry.publish import get_live_contract from iscc_registry import tools from iscc_registry.tools import build_iscc_id @dataclass class RegEntry: iscc: str actor: str cid: str = "" tx_hash: str = "" block_hash: str = "" block_num: int = 0 def parse_event(evt): # encode ISCC iscc_codes = [] for code_type in ("mc", "cc", "dc", "ic"): iscc_codes.append(iscc.encode(getattr(evt.args, code_type))) # encode CIDv0 cid = tools.sha256_to_cid(evt.args.cid) return RegEntry( iscc="-".join(iscc_codes), actor=evt.args.actor, cid=cid, tx_hash=evt.transactionHash.hex(), block_hash=evt.blockHash.hex(), block_num=evt.blockNumber, ) def observe(from_block=None, rebuild=False): """Watch ISCC-Registry contract events and index new registartion events.""" meta_index = db_client() if rebuild: meta_index.clear() from_block = 0 if from_block is None: if "height_eth" not in meta_index: meta_index["height_eth"] = 0 from_block = meta_index["height_eth"] log.info(f"start observing from block {from_block}") co = get_live_contract() event_filter = co.events.Registration.createFilter(fromBlock=from_block) reg_entry = None log.info("observe historic registration events") for event in event_filter.get_all_entries(): reg_entry = parse_event(event) log.info(f"observing historic {reg_entry}") index(reg_entry) if reg_entry: meta_index["height_eth"] = reg_entry.block_num log.info("start watching new registration events") while True: for event in event_filter.get_new_entries(): reg_entry = parse_event(event) log.info(f"observing {reg_entry}") index(reg_entry) if reg_entry: meta_index["height_eth"] = reg_entry.block_num time.sleep(2) def index(reg_entry: RegEntry) -> str: meta_index = db_client() counter = 0 iscc_id = build_iscc_id(iscc_registry.LEDGER_ID_ETH, reg_entry.iscc, counter) while iscc_id in meta_index: if meta_index[iscc_id]["actor"] == reg_entry.actor: log.info(f"updateing {iscc_id} -> {reg_entry}") meta_index[iscc_id] = asdict(reg_entry) break counter += 1 log.info(f"counting up {iscc_id}") iscc_id = build_iscc_id(iscc_registry.LEDGER_ID_ETH, reg_entry.iscc, counter) meta_index[iscc_id] = asdict(reg_entry) log.info(f"indexed {iscc_id} -> {reg_entry}") return iscc_id def find_next(reg_entry: RegEntry) -> str: meta_index = db_client() counter = 0 iscc_id = build_iscc_id(iscc_registry.LEDGER_ID_ETH, reg_entry.iscc, counter) while iscc_id in meta_index: if meta_index[iscc_id]["actor"] == reg_entry.actor: log.info( f"Previously registered by same actor. This will be an update: {iscc_id} -> {reg_entry}" ) return iscc_id counter += 1 log.info(f"counting up {iscc_id}") iscc_id = build_iscc_id(iscc_registry.LEDGER_ID_ETH, reg_entry.iscc, counter) return iscc_id if __name__ == "__main__": observe()
30.734513
104
0.657357
486
3,473
4.415638
0.244856
0.089469
0.029823
0.033551
0.419385
0.419385
0.397018
0.34576
0.34576
0.34576
0
0.004919
0.238986
3,473
112
105
31.008929
0.807037
0.043478
0
0.333333
0
0
0.131157
0
0
0
0
0
0
1
0.044444
false
0
0.1
0
0.266667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
bd4c2d2d1aecd9d7ef7769f96a47de90c8225163
6,400
py
Python
src/CNN_models/train_model.py
ChrisPedder/Medieval_Manuscripts
40bfcf9c273385cfd8aa66e63b2fb80078fef33b
[ "MIT" ]
null
null
null
src/CNN_models/train_model.py
ChrisPedder/Medieval_Manuscripts
40bfcf9c273385cfd8aa66e63b2fb80078fef33b
[ "MIT" ]
5
2020-12-28T15:28:35.000Z
2022-02-10T03:26:44.000Z
src/CNN_models/train_model.py
ChrisPedder/Medieval_Manuscripts
40bfcf9c273385cfd8aa66e63b2fb80078fef33b
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Fri Aug 10 11:07:05 2018 @author: chrispedder To train the model, run from the top-level dir as: python3 -m src.CNN_models.train_model --args ... """ import numpy as np import os import argparse import json import tensorflow as tf from abc import ABC, abstractmethod from datetime import datetime from .TFRecordsReader import TFRecordsReader from ..data.Predictors import ( predictors_options, VGG16Predictor, embedding_sizes) # Helper function for writing to JSON def jsonify(obj): if isinstance(obj, np.ndarray): return obj.tolist() elif isinstance(obj, np.float32) or isinstance(obj, np.float64): return float(obj) elif isinstance(obj, np.int32) or isinstance(obj, np.int64): return int(obj) return obj class ModelTrainer(object): def __init__(self, args): self.args = args self.model = self.build_model() self.datasets = self.get_train_test_datasets() self.predictor = predictors_options[args.embedding_model] @abstractmethod def build_model(self): pass @abstractmethod def create_model_training_folder(self): pass def safe_folder_create(self, folder): if not os.path.isdir(folder): os.mkdir(folder) @abstractmethod def get_train_test_datasets(self): pass @abstractmethod def write_config_to_json(self): pass @abstractmethod def train(self): pass @abstractmethod def predict(self, data): pass class DeterministicModel(ModelTrainer): def __init__(self, args): self.epochs = args.epochs self.batch_size = args.batch_size self.dropout = args.dropout self.log_dir = args.log_dir self.embed_size = embedding_sizes[args.embedding_model] self.hidden_size = args.hidden_size super().__init__(args) def create_model_training_folder(self): # Check that top level log dir exists, if not, create it self.safe_folder_create(self.log_dir) # Next-level log dir based on date, if not already present, create it now = datetime.now() date = now.strftime("%d_%m_%Y") date_dir = os.path.join(self.log_dir, date) self.safe_folder_create(date_dir) # Lowest-level log dir based on numbering, if date_dir not empty, # check that the previous highest index was, and increment by one. last_index = 0 if len(os.listdir(date_dir)) != 0: subfolder_list = [x[0] for x in os.walk(date_dir) if os.path.isdir(x[0])] last_index = max([int(x.split('_')[-1]) for x in subfolder_list[1:]]) model_dir = os.path.join(date_dir, 'model_' + str(last_index + 1)) self.safe_folder_create(model_dir) return model_dir def get_train_test_datasets(self): reader = TFRecordsReader(self.args) return reader.datasets def build_model(self): model = tf.keras.Sequential() model.add(tf.keras.Input(shape=(self.embed_size,))) model.add(tf.keras.layers.Dense(self.hidden_size, activation='relu')) model.add(tf.keras.layers.Dropout(self.dropout)) model.add(tf.keras.layers.Dense(1, activation='sigmoid')) return model def write_config_to_json(self): args_dict = vars(self.args) for key, value in args_dict.items(): args_dict[key] = jsonify(value) json_path = os.path.join(self.logs_folder, 'config.json') with open(json_path, 'w') as f: json.dump(args_dict, f) print(f'Config file written to {json_path}') def train(self): self.logs_folder = self.create_model_training_folder() self.model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy']) checkpointer = tf.keras.callbacks.ModelCheckpoint( os.path.join(self.logs_folder, 'checkpoints'), monitor='val_accuracy', verbose=1, save_best_only=True, save_weights_only=True) # tensorboard = tf.keras.callbacks.TensorBoard( # log_dir = os.path.join(self.logs_folder, 'tensorboard'), # histogram_freq = 1, # write_graph = True, # write_images = True) self.model.fit( x=self.datasets['train'], epochs=self.epochs, batch_size=self.batch_size, validation_data=self.datasets['test'], callbacks = [checkpointer]) # callbacks = [checkpointer, tensorboard]) self.write_config_to_json() def predict(self, data): args_copy = self.args args_copy.batch_size = 1 pred = self.predictor(args_copy) outputs = [] for entry in data: embedding = pred.predict(entry) out = self.model.predict(embedding) outputs.append(out) return outputs def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('--batch_size', help='The size of the batches to use ' 'when training the models', type=int, default=32) parser.add_argument('--embedding_model', help='which embeddings to ' 'use when training the model', type=str, default='vgg16') parser.add_argument('--data_dir', help='Path to the data', type=str, required=True) parser.add_argument('--epochs', help='How many epochs to train the model ' 'for.', type=int, default=50) parser.add_argument('--dropout', help='How much dropout to apply to model ', type=float, default=0.5) parser.add_argument('--log_dir', help='Where to save model weights and ' 'config.', type=str, required=True) parser.add_argument('--hidden_size', help='What hidden sizes to use in ' 'model.', type=int, default=256) parser.add_argument('--learning_rate', help='What learning rate to use in ' 'training the model.', type=float, default=0.0001) args = parser.parse_args() return args if __name__ == '__main__': args = parse_args() model = DeterministicModel(args) model.train()
33.333333
85
0.621563
805
6,400
4.767702
0.28323
0.01407
0.035435
0.026055
0.159979
0.09432
0.01876
0
0
0
0
0.011164
0.272188
6,400
191
86
33.507853
0.812795
0.11125
0
0.188406
0
0
0.098288
0
0
0
0
0
0
1
0.123188
false
0.043478
0.065217
0
0.268116
0.007246
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
bd4e4bb56c05d5afc00c0ccb424743f1c99a0f0b
8,063
py
Python
pfb_exporter/transform/sqla.py
znatty22/pfb-edu
24e606895c192b92493c0808d00a10fdf6f5ffa4
[ "Apache-2.0" ]
null
null
null
pfb_exporter/transform/sqla.py
znatty22/pfb-edu
24e606895c192b92493c0808d00a10fdf6f5ffa4
[ "Apache-2.0" ]
null
null
null
pfb_exporter/transform/sqla.py
znatty22/pfb-edu
24e606895c192b92493c0808d00a10fdf6f5ffa4
[ "Apache-2.0" ]
null
null
null
""" Transform SQLAlchemy Models to PFB Schema """ import os import logging import inspect import subprocess from collections import defaultdict import timeit from pprint import pformat from sqlalchemy.dialects.postgresql import UUID from sqlalchemy.inspection import inspect as sqla_inspect from sqlalchemy.orm.properties import ColumnProperty from sqlalchemy.ext.declarative.api import DeclarativeMeta from sqlalchemy.exc import NoInspectionAvailable from pfb_exporter.utils import import_module_from_file, seconds_to_hms from pfb_exporter.transform.base import Transformer SQLA_AVRO_TYPE_MAP = { 'primitive': { 'Text': 'string', 'Boolean': 'boolean', 'Float': 'float', 'Integer': 'int', 'String': 'string', 'UUID': 'string', 'DateTime': 'string', }, 'logical': { 'UUID': 'uuid', 'DateTime': None } } class SqlaTransformer(Transformer): def __init__(self, models_filepath, output_dir, db_conn_url=None): """ Constructor :param models_filepath: path to where the SQLAlchemy models are stored or will be written if they are generated :type models_filepath: str :param output_dir: path where PFB Schema will be written :type output_dir: str :param db_conn_url: Connection URL for database. Format depends on database. See SQLAlchemy documentation for supported databases """ super().__init__(models_filepath, output_dir) self.logger = logging.getLogger(type(self).__name__) self.db_conn_url = db_conn_url self.data_dict = {} self.model_dict = {} def _transform(self): """ Entry point for PFB schema generation. Called by pfb_exporter.transform.base.Transformer 1. (Optional) Generate SQLAlchemy models from database 2. Import model classes from dir or file 2. Transform SQLAlchemy models to PFB Schema """ self.logger.info('Build PFB Schema from SqlAlchemy models') if self.db_conn_url: self._generate_models() self._import_models() if not (self.db_conn_url or self.model_dict): raise RuntimeError( 'There are 0 models to generate the PFB file. You must ' 'provide a DB connection URL that can be used to ' 'connect to a database to generate the models or ' 'provide a dir or file path to where the models reside' ) return self._create_pfb_schema() def _generate_models(self): """ Generate SQLAlchemy models from database Uses sqlacodegen CLI to generate models See https://github.com/agronholm/sqlacodegen """ # sqlacodegen requires the models to be written to a file if os.path.isdir(self.models_filepath): self.models_filepath = os.path.join( self.models_filepath, 'models.py' ) # Generate SQLAlchemy models cmd_str = ( f'sqlacodegen {self.db_conn_url} --outfile {self.models_filepath}' ) self.logger.debug(f'Building SQLAlchemy models:\n{cmd_str}') start_time = timeit.default_timer() output = subprocess.run( cmd_str, shell=True, stdout=subprocess.PIPE ) total_time = timeit.default_timer() - start_time output.check_returncode() self.logger.debug(f'Time elapsed: {seconds_to_hms(total_time)}') def _import_models(self): """ Import the SQLAlchemy model classes from the Python modules in models_filepath """ self.logger.debug( f'Importing SQLAlchemy models from {self.models_filepath}' ) def _import_model_classes_from_file(filepath): """ Import the SQLAlchemy models from the Python module at `filepath` """ imported_model_classes = [] mod = import_module_from_file(filepath) # NOTE - We cannot use # pfb_exporter.utils.import_subclass_from_module here because # we are unable to use issubclass to test if the SQLAlchemy model # class is a subclass of its parent # (sqlalchemy.ext.declarative.api.Base) # The best we can do is make sure the class is a SQLAlchemy object # and check that the object is a DeclarativeMeta type for cls_name, cls_path in inspect.getmembers(mod, inspect.isclass): cls = getattr(mod, cls_name) try: sqla_inspect(cls) except NoInspectionAvailable: # Not a SQLAlchemy object pass else: if type(cls) == DeclarativeMeta: imported_model_classes.append(cls) return imported_model_classes if (os.path.isfile(self.models_filepath) and os.path.splitext(self.models_filepath)[-1] == '.py'): filepaths = [self.models_filepath] else: filepaths = [ os.path.join(root, fn) for root, dirs, files in os.walk(self.models_filepath) for fn in files if os.path.splitext(fn)[-1] == '.py' ] self.logger.debug( f'Found {len(filepaths)} Python modules:\n{pformat(filepaths)}' ) # Add the imported modules to a dict for fp in filepaths: classes = _import_model_classes_from_file(fp) for cls in classes: self.model_dict[cls.__name__] = cls self.logger.info( f'Imported {len(self.model_dict)} SQLAlchemy models:' f'\n{pformat(list(self.model_dict.keys()))}' ) def _create_pfb_schema(self): """ Transform SQLAlchemy models into PFB schema """ self.logger.info('Creating PFB schema from SQLAlchemy models ...') relational_model = {} for model_name, model_cls in self.model_dict.items(): self.logger.info( f'Building schema for {model_name} ...' ) # Inspect model columns and types for p in sqla_inspect(model_cls).iterate_properties: model_schema = defaultdict(list) if not isinstance(p, ColumnProperty): continue if not hasattr(p, 'columns'): continue column_obj = p.columns[0] # Check if foreign key if column_obj.foreign_keys: fkname = column_obj.foreign_keys.pop().target_fullname model_schema['foreign_keys'].append( {'table': fkname.split('.')[0], 'name': p.key} ) # Convert SQLAlchemy column type to avro type stype = type(column_obj.type).__name__ # Get avro primitive type ptype = SQLA_AVRO_TYPE_MAP['primitive'].get(stype) if not ptype: self.logger.warn( f'⚠️ Could not find avro type for {p}, ' f'SQLAlchemy type: {stype}' ) attr_dict = {'name': p.key, 'type': ptype} # Get avro logical type if applicable ltype = SQLA_AVRO_TYPE_MAP['logical'].get(stype) if ltype: attr_dict.update({'logicalType': ltype}) # Get default value for attr # if column_obj.default: # attr_dict.update({'default': column_obj.default}) # if column_obj.nullable: # attr_dict.update({'nullable': column_obj.nullable}) model_schema['attributes'].append(attr_dict) relational_model[model_cls.__tablename__] = model_schema return relational_model
34.60515
79
0.583902
902
8,063
5.037694
0.268293
0.043134
0.039613
0.011444
0.087808
0.029049
0
0
0
0
0
0.001493
0.335607
8,063
232
80
34.75431
0.846369
0.219273
0
0.058824
0
0
0.1556
0.027459
0
0
0
0
0
1
0.044118
false
0.007353
0.176471
0
0.25
0.007353
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
bd55c1befc97ceb37b6df37eb99994c9d21b2ba9
773
py
Python
python/206.reverse-linked-list.py
Wanger-SJTU/leetcode-solutions
eb7f2fb142b8a30d987c5ac8002a96ead0aa56f4
[ "MIT" ]
2
2019-05-13T17:09:15.000Z
2019-09-08T15:32:42.000Z
python/206.reverse-linked-list.py
Wanger-SJTU/leetcode
eb7f2fb142b8a30d987c5ac8002a96ead0aa56f4
[ "MIT" ]
null
null
null
python/206.reverse-linked-list.py
Wanger-SJTU/leetcode
eb7f2fb142b8a30d987c5ac8002a96ead0aa56f4
[ "MIT" ]
null
null
null
# # @lc app=leetcode id=206 lang=python3 # # [206] Reverse Linked List # # Definition for singly-linked list. # class ListNode: # def __init__(self, x): # self.val = x # self.next = None class Solution: def reverseList(self, head: ListNode) -> ListNode: def iterative(head): pre,cur = None, head while cur: nxt = cur.next cur.next = pre pre = cur cur = nxt return pre def recursively(head): if not head or not head.next: return head node = recursively(head.next) head.next.next = head head.next = None return node return iterative(head)
23.424242
54
0.500647
85
773
4.505882
0.423529
0.083551
0
0
0
0
0
0
0
0
0
0.015521
0.416559
773
32
55
24.15625
0.833703
0.240621
0
0
0
0
0
0
0
0
0
0
0
1
0.166667
false
0
0
0
0.444444
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1f9c7aa01ba17d2af64bca27a27081040ab187d0
2,521
py
Python
tests/default_tags.py
GrAndSE/lighty-template
63834fbb2421506205745bb596ff8ac726361f2a
[ "BSD-3-Clause" ]
1
2018-05-09T19:56:15.000Z
2018-05-09T19:56:15.000Z
tests/default_tags.py
GrAndSE/lighty-template
63834fbb2421506205745bb596ff8ac726361f2a
[ "BSD-3-Clause" ]
null
null
null
tests/default_tags.py
GrAndSE/lighty-template
63834fbb2421506205745bb596ff8ac726361f2a
[ "BSD-3-Clause" ]
null
null
null
'''Module to test default template tags such as if, for, with, include, etc. ''' import unittest from lighty.templates import Template from lighty.templates.loaders import FSLoader class DefaultTagsTestCase(unittest.TestCase): """Test case for if template tag """ def assertResult(self, name, result, value): assert result == value, 'Error on tag "%s" applying to: %s' % ( name, ' '.join((str(result), 'except', str(value)))) def testSpacelless(self): '''Test spaceless template tag''' template = Template() template.parse('''{% spaceless %} Some broken text {% endspaceless %}''') result = template({}) right = 'Some broken text' assert result == right, 'Spaceless tag error:\n%s' % ( "\n".join(result, 'except', right)) def testSimpleWith(self): '''Test with template tag''' template = Template() template.parse('{% with user.name as name %}{{ name }}{% endwith %}') result = template({'user': {'name': 'John'}}) self.assertResult('with', result.strip(), 'John') def testSimpleIf(self): '''Test if template tag''' template = Template() template.parse('{% if a %}Foo{% endif %}') result = template({'a': 1}) self.assertResult('if', result.strip(), 'Foo') result = template({'a': 0}) self.assertResult('if', result.strip(), '') def testSimpleFor(self): '''Test for template tag''' template = Template() template.parse('{% for a in list %}{{ a }} {% endfor %}') result = template({'list': [1, 2, 3, 4, 5]}) self.assertResult('for', result.strip(), '1 2 3 4 5') def testSimpleInclude(self): '''Test include template tag''' template = Template('{% include "simple.html" %}', name="test.html", loader=FSLoader(['tests/templates'])) result = template({'name': 'Peter'}) self.assertResult('include', result.strip(), 'Hello, Peter') def test(): suite = unittest.TestSuite() suite.addTest(DefaultTagsTestCase('testSpacelless')) suite.addTest(DefaultTagsTestCase('testSimpleWith')) suite.addTest(DefaultTagsTestCase('testSimpleIf')) suite.addTest(DefaultTagsTestCase('testSimpleFor')) suite.addTest(DefaultTagsTestCase('testSimpleInclude')) return suite
36.536232
79
0.568029
249
2,521
5.751004
0.313253
0.100559
0.066341
0.094274
0.159218
0.111732
0
0
0
0
0
0.006572
0.275684
2,521
68
80
37.073529
0.777656
0.09044
0
0.083333
0
0
0.238032
0
0
0
0
0
0.166667
1
0.145833
false
0
0.0625
0
0.25
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1f9c9104d3d243f4e10cfdbb1fb0326c74424885
3,038
py
Python
tests/test_calibration.py
SoyGema/NannyML
323ff404e0e06c479b01d2a63c1c3af9680d95ab
[ "Apache-2.0" ]
null
null
null
tests/test_calibration.py
SoyGema/NannyML
323ff404e0e06c479b01d2a63c1c3af9680d95ab
[ "Apache-2.0" ]
null
null
null
tests/test_calibration.py
SoyGema/NannyML
323ff404e0e06c479b01d2a63c1c3af9680d95ab
[ "Apache-2.0" ]
null
null
null
# Author: Niels Nuyttens <niels@nannyml.com> # # License: Apache Software License 2.0 """Unit tests for the calibration module.""" import numpy as np import pandas as pd import pytest from nannyml.calibration import IsotonicCalibrator, _get_bin_index_edges, needs_calibration from nannyml.exceptions import InvalidArgumentsException @pytest.mark.parametrize('vector_size,bin_count', [(0, 0), (0, 1), (1, 1), (2, 1), (3, 5)]) def test_get_bin_edges_raises_invalid_arguments_exception_when_given_too_few_samples( # noqa: D103 vector_size, bin_count ): with pytest.raises(InvalidArgumentsException): _ = _get_bin_index_edges(vector_size, bin_count) @pytest.mark.parametrize( 'vector_length,bin_count,edges', [ (20, 4, [(0, 5), (5, 10), (10, 15), (15, 20)]), (10, 3, [(0, 3), (3, 6), (6, 10)]), ], ) def test_get_bin_edges_works_correctly(vector_length, bin_count, edges): # noqa: D103 sut = _get_bin_index_edges(vector_length, bin_count) assert len(sut) == len(edges) assert sorted(sut) == sorted(edges) def test_needs_calibration_returns_false_when_calibration_does_not_always_improves_ece(): # noqa: D103 y_true = pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]) y_pred_proba = y_true shuffled_indexes = np.random.permutation(len(y_true)) y_true, y_pred_proba = y_true[shuffled_indexes], y_pred_proba[shuffled_indexes] sut = needs_calibration(y_true, y_pred_proba, IsotonicCalibrator(), bin_count=2, split_count=3) assert not sut def test_needs_calibration_returns_true_when_calibration_always_improves_ece(): # noqa: D103 y_true = pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]) y_pred_proba = abs(1 - y_true) shuffled_indexes = np.random.permutation(len(y_true)) y_true, y_pred_proba = y_true[shuffled_indexes], y_pred_proba[shuffled_indexes] sut = needs_calibration(y_true, y_pred_proba, IsotonicCalibrator()) assert sut def test_needs_calibration_raises_invalid_args_exception_when_y_true_contains_nan(): # noqa: D103 y_true = pd.Series([0, 0, 0, 0, 0, np.NaN, 1, 1, 1, 1, 1, 1]) y_pred_proba = np.asarray([0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]) with pytest.raises(InvalidArgumentsException, match='target values contain NaN.'): _ = needs_calibration(y_true, y_pred_proba, IsotonicCalibrator()) def test_needs_calibration_raises_invalid_args_exception_when_y_pred_proba_contains_nan(): # noqa: D103 y_true = pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]) y_pred_proba = pd.Series(np.asarray([0, 0, 0, np.NaN, 0, 0, 1, 1, 1, 1, 1, 1])) with pytest.raises(InvalidArgumentsException, match='predicted probabilities contain NaN.'): _ = needs_calibration(y_true, y_pred_proba, IsotonicCalibrator()) def test_needs_calibration_returns_false_when_roc_auc_score_equals_one(): # noqa: D103 y_true = pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]) y_pred_proba = y_true sut = needs_calibration(y_true, y_pred_proba, IsotonicCalibrator()) assert sut is False
41.616438
104
0.71264
480
3,038
4.18125
0.208333
0.036871
0.043348
0.041854
0.63727
0.540608
0.537618
0.510214
0.510214
0.502242
0
0.056559
0.161949
3,038
72
105
42.194444
0.731736
0.066162
0
0.27451
0
0
0.039688
0.017718
0
0
0
0
0.098039
1
0.137255
false
0
0.098039
0
0.235294
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1f9d448358740aaa0c055882926c57c97ff59db8
3,962
py
Python
code/utils.py
liudaizong/IA-Net
f19295d13d1468eb582521131cde3de83dfd18f6
[ "MIT" ]
4
2021-11-02T10:57:12.000Z
2022-02-13T17:53:03.000Z
code/utils.py
liudaizong/IA-Net
f19295d13d1468eb582521131cde3de83dfd18f6
[ "MIT" ]
null
null
null
code/utils.py
liudaizong/IA-Net
f19295d13d1468eb582521131cde3de83dfd18f6
[ "MIT" ]
null
null
null
import copy import nltk import json from gensim.models import KeyedVectors import h5py import numpy as np from torch import nn def clones(module, N): return nn.ModuleList([copy.deepcopy(module) for _ in range(N)]) def load_feature(filename, dataset='ActivityNet'): if dataset == 'ActivityNet': with h5py.File(filename, 'r') as fr: return np.asarray(fr['feature']).astype(np.float32) elif dataset == 'TACOS': return np.load(filename).astype(np.float32) elif dataset == 'Charades': return np.load(filename).astype(np.float32) elif dataset == 'Didemo': with h5py.File(filename, 'r') as fr: return np.asarray(fr['feature']).astype(np.float32) return None def load_json(filename): with open(filename, encoding='utf8') as fr: return json.load(fr) def load_word2vec(filename, binary=True): word2vec = KeyedVectors.load_word2vec_format(filename, binary=binary) return word2vec def tokenize(sentence, word2vec): punctuations = ['.', '?', ',', '', '(', ')'] raw_text = sentence.lower() words = nltk.word_tokenize(raw_text) words = [word for word in words if word not in punctuations] return [word for word in words if word in word2vec] def generate_anchors(dataset='ActivityNet'): if dataset == 'ActivityNet': widths = np.array([16, 32, 64, 96, 128, 160, 192]) center = 7.5 start = center - 0.5 * (widths - 1) end = center + 0.5 * (widths - 1) elif dataset == 'TACOS': widths = np.array([8, 16, 32, 64])#np.array([6, 18, 32]) center = 7.5 start = center - 0.125 * (widths - 1) end = center + 0.125 * (widths - 1) elif dataset == 'Didemo': widths = np.array([8, 16, 32, 64])#np.array([6, 18, 32]) center = 7.5 start = center - 0.125 * (widths - 1) end = center + 0.125 * (widths - 1) elif dataset == 'Charades': widths = np.array([16, 24, 32, 40])#np.array([6, 18, 32]) center = 7.5 start = center - 0.125 * (widths - 1) end = center + 0.125 * (widths - 1) else: return None return np.stack([start, end], -1) import time class CountMeter(object): """Computes and stores the average and current value""" def __init__(self): self.reset() def reset(self): self.val = np.zeros([2, 4],dtype=np.float32) self.count = 0 def update(self, val, n=1): self.val += val self.count += n class AverageMeter(object): """Computes and stores the average and current value""" def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count class TimeMeter(object): """Computes the average occurrence of some event per second""" def __init__(self, init=0): self.reset(init) def reset(self, init=0): self.init = init self.start = time.time() self.n = 0 def update(self, val=1): self.n += val @property def avg(self): return self.n / self.elapsed_time @property def elapsed_time(self): return self.init + (time.time() - self.start) class StopwatchMeter(object): """Computes the sum/avg duration of some event in seconds""" def __init__(self): self.reset() def start(self): self.start_time = time.time() def stop(self, n=1): if self.start_time is not None: delta = time.time() - self.start_time self.sum += delta self.n += n self.start_time = None def reset(self): self.sum = 0 self.n = 0 self.start_time = None @property def avg(self): return self.sum / self.n
25.397436
73
0.579253
537
3,962
4.212291
0.22905
0.024757
0.026525
0.04244
0.443413
0.385942
0.342175
0.320955
0.320955
0.280283
0
0.047805
0.287229
3,962
155
74
25.56129
0.753187
0.069409
0
0.477876
0
0
0.029203
0
0
0
0
0
0
1
0.19469
false
0
0.070796
0.035398
0.424779
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1f9e4501c0a3ac77cc15f6de9e5e460d7fd997df
2,654
py
Python
aps_purchasing/tests/forms_tests.py
bitmazk/django-aps-purchasing
ff0316f0eaff5bd39ae40aaa861543d125f33dae
[ "MIT" ]
4
2015-05-18T13:51:16.000Z
2015-05-18T14:47:32.000Z
aps_purchasing/tests/forms_tests.py
bitmazk/django-aps-purchasing
ff0316f0eaff5bd39ae40aaa861543d125f33dae
[ "MIT" ]
null
null
null
aps_purchasing/tests/forms_tests.py
bitmazk/django-aps-purchasing
ff0316f0eaff5bd39ae40aaa861543d125f33dae
[ "MIT" ]
null
null
null
"""Tests for the forms of the ``aps_purchasing`` app.""" import os from django.conf import settings from django.core.files.uploadedfile import SimpleUploadedFile from django.test import TestCase from django.utils.timezone import now from ..forms import QuotationUploadForm from ..models import MPN, Price, Quotation, QuotationItem from .factories import ( CurrencyFactory, DistributorFactory, ManufacturerFactory, ) class QuotationUploadFormTestCase(TestCase): """Tests for the ``QuotationUpoadForm`` form class.""" longMessage = True def setUp(self): self.distributor = DistributorFactory() self.quotation_file = open(os.path.join( settings.APP_ROOT, 'tests/files/Quotation.csv')) self.data = { 'distributor': self.distributor.pk, 'ref_number': 'REF123', 'issuance_date': now(), 'expiry_date': now(), 'is_completed': True, } self.files = { 'quotation_file': SimpleUploadedFile('Quotation.csv', self.quotation_file.read()), } def test_form(self): form = QuotationUploadForm(data=self.data) self.assertFalse(form.is_valid(), msg='The form should not be valid.') form = QuotationUploadForm(data=self.data, files=self.files) self.assertFalse(form.is_valid(), msg=( 'Without all the currencies in the DB, the form should not be' ' valid.')) self.usd = CurrencyFactory(iso_code='USD') form = QuotationUploadForm(data=self.data, files=self.files) self.assertFalse(form.is_valid(), msg=( 'Without all the manufacturers in the DB, the form should not be' ' valid.')) ManufacturerFactory(name='Samsung') ManufacturerFactory(name='TDK') form = QuotationUploadForm(data=self.data, files=self.files) self.assertTrue(form.is_valid(), msg=( 'The form should be valid. Errors: {0}'.format(form.errors))) form.save() self.assertEqual(Quotation.objects.count(), 1, msg=( 'After form save, there should be one Quotation in the database.')) self.assertEqual(QuotationItem.objects.count(), 2, msg=( 'After form save, there should be four QuotationItems in the' ' database.')) self.assertEqual(Price.objects.count(), 4, msg=( 'Afte form save, there should be three Prices in the database.')) self.assertEqual(MPN.objects.count(), 2, msg=( 'Afte form save, there should be four new MPNs in the database.'))
37.380282
79
0.629239
297
2,654
5.572391
0.3367
0.032628
0.065257
0.074924
0.371601
0.299698
0.279758
0.178248
0.178248
0.10997
0
0.004063
0.258101
2,654
70
80
37.914286
0.836465
0.037302
0
0.125
0
0
0.230346
0.009827
0
0
0
0
0.142857
1
0.035714
false
0
0.142857
0
0.214286
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1f9eb8e2438d5e8851abb15909ddab5b70595c79
1,839
py
Python
test/test_read_embark_fields_json_file.py
ndlib/mellon-search
30f7eb267e35d77ee6d126789866d44d825c3e0c
[ "Apache-2.0" ]
null
null
null
test/test_read_embark_fields_json_file.py
ndlib/mellon-search
30f7eb267e35d77ee6d126789866d44d825c3e0c
[ "Apache-2.0" ]
null
null
null
test/test_read_embark_fields_json_file.py
ndlib/mellon-search
30f7eb267e35d77ee6d126789866d44d825c3e0c
[ "Apache-2.0" ]
null
null
null
# test_read_embark_fields_json_file.py 2/18/19 sm """ test read_embark_fields_json_file.py """ import json import unittest # add parent directory to path import os import inspect import sys CURRENTDIR = os.path.dirname(os.path.abspath(inspect.getfile( inspect.currentframe()))) PARENTDIR = os.path.dirname(CURRENTDIR) sys.path.insert(0, PARENTDIR) from read_embark_fields_json_file import read_embark_fields_json_file class Test(unittest.TestCase): """ Class for test fixtures """ def test_read_embark_fields_json_file(self): """ run all tests in this module """ filename = PARENTDIR + "/EmbArkXMLFields.json" resulting_json = read_embark_fields_json_file(filename) with open(filename, 'r') as input_source: local_json = json.load(input_source) input_source.close() self.assertTrue(local_json == resulting_json) def test_missing_embark_field_definitions_file(self): """ test for missing field definitions file """ self.assertRaises(FileNotFoundError, read_embark_fields_json_file, "./EmbArkXMLFields.jsonx") def test_invalid_embark_field_definitions_file(self): """ test for missing field definitions file """ self.assertRaises(json.decoder.JSONDecodeError, read_embark_fields_json_file, "./InvalidEmbArkXMLFields.json") def test_embark_field_definitions_file_missing_field(self): """ test for missing field definitions file """ self.assertRaises(ValueError, read_embark_fields_json_file, "./EmbArkXMLFieldsMissingField.json") def suite(): """ define test suite """ return unittest.TestLoader().loadTestsFromTestCase(Test) if __name__ == '__main__': suite() unittest.main()
32.839286
74
0.694943
213
1,839
5.676056
0.352113
0.074442
0.119107
0.148883
0.3689
0.249793
0.226634
0.177006
0.177006
0.132341
0
0.004152
0.214247
1,839
55
75
33.436364
0.832526
0.169657
0
0
0
0
0.07822
0.072151
0
0
0
0
0.121212
1
0.151515
false
0
0.181818
0
0.393939
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1fa0d3c9b6fdeba10b20b2a6b065d708f3d43858
8,928
py
Python
menu/show_results.py
Jcollier722/PageRemoval
ec14cd3927bbb754883a6a3dcff312ba90cd45db
[ "Apache-2.0" ]
null
null
null
menu/show_results.py
Jcollier722/PageRemoval
ec14cd3927bbb754883a6a3dcff312ba90cd45db
[ "Apache-2.0" ]
null
null
null
menu/show_results.py
Jcollier722/PageRemoval
ec14cd3927bbb754883a6a3dcff312ba90cd45db
[ "Apache-2.0" ]
null
null
null
"""This file is the results window""" import sys sys.path.insert(0, 'menu/') sys.path.insert(1, 'util/') sys.path.insert(2, 'sim/') import tkinter as tk import menu import import_jobs as ij import validate_jobs as validate import show_results as sr import export_results as xr import compare_sim import const import simulation from tkinter import ttk from tkinter.filedialog import askopenfile from tkinter.filedialog import asksaveasfile def make_results(self): #if job list is too long, just export to spreadsheet and show comparison if(len(self.job_list)>11): tk.messagebox.showwarning('Warning','Your job list is large and will be exported to a spreadsheet instead. Please select a save location.') files = [('Spreadsheet','.xlsx')] path = asksaveasfile(filetypes = files, defaultextension = files) xr.export(path,self.fifo_events,self.fifo_inter,self.lru_events,self.lru_inter,self.job_list) tk.messagebox.showinfo('Saved','Spreadsheet generated successfully') compare_sim.compare(['FIFO','LRU'],[self.fifo_num_inter,self.lru_num_inter]) return self.count = self.count + 1 self.window=tk.Toplevel(self) self.window.geometry("825x900") self.window.config(bg='#bfd7ff') self.window.resizable(width=False, height=False) root = self.window menu = tk.Canvas(root,width=815,height=const.MAX_HEIGHT/8,bg=const.BLUE,bd=2) menu.config(highlightbackground='black') menu.place(relx=0) #Title title = tk.Label(menu,text=const.RESULT_TITLE,font='arial 30 bold ',bg=const.BLUE).place(relx=.5,rely=0.40,anchor="center") fifo_view = tk.Button(menu,text=const.FIFO_TITLE,font='arial 12 bold',height=1,width=10,bg=const.GREEN,command=self.show_fifo).place(relx=0.3,rely=0.75,anchor="w") lru_view = tk.Button(menu,text=const.LRU_TITLE,font='arial 12 bold',height=1,width=10,bg=const.GREEN,command=self.show_lru).place(relx=0.58,rely=0.75,anchor="w") compare = tk.Button(root,text="Compare Algorithms",font='arial 12 bold',height=3,width=30,bg=const.GREEN,command=self.compare_sim).place(relx=0.3,rely=0.95,anchor="w") #**********************************************************************************************************************************************fifo frame self.fifo = tk.Canvas(root,width=815,height=const.MAX_HEIGHT/1,bg=const.BLUE,bd=2) fifo = self.fifo fifo.config(highlightbackground='black') fifo.place(relx=0,rely=.10) #fifo title title = tk.Label(fifo,text=const.FIFO_TITLE,font='arial 20 bold underline',bg=const.BLUE).place(relx=0.01,rely=0.10,anchor="w") fifo_y = const.START_Y+.10 #print each page frame for i in range(self.page_frame_count): this_text = "Page Frame "+str(i+1) this_label = tk.Label(fifo,text=this_text,font= "arial 15 bold",borderwidth=3,relief='groove',pady=7,padx=10) this_label.place(relx=0.01,rely=fifo_y) fifo_y = fifo_y + 0.07 """ Lots of magic numbers here, will move to const.py if time allows for this assignment. """ #print the jobs each page frame has at each moment y_fifo_jobs = const.START_Y+.10 x_fifo_jobs = self.x+.17 for i in range(self.page_frame_count): for event_list in self.fifo_events: if(str(event_list.frame) == str(i+1)): for e in event_list.event: if e is None: e="-" tk.Label(fifo,text=str(e),font= "arial 10 bold",borderwidth=3,relief='groove',pady=7,padx=10).place(relx=x_fifo_jobs,rely=y_fifo_jobs) x_fifo_jobs = x_fifo_jobs + .07 y_fifo_jobs = y_fifo_jobs +0.07 x_fifo_jobs = self.x+.17 #move jobs to right of labels x_fifo_jobs = self.x+.17 y_fifo_jobs = y_fifo_jobs +0.05 tk.Label(fifo,text=const.REQ,font= "arial 12 bold",borderwidth=3,relief='groove',pady=7,padx=10).place(relx=0.01,rely=y_fifo_jobs) for job in self.job_list: tk.Label(fifo,text=str(job),font= "arial 10 bold",borderwidth=3,relief='groove',pady=7,padx=10).place(relx=x_fifo_jobs,rely=y_fifo_jobs) x_fifo_jobs = x_fifo_jobs + .07 x_fifo_jobs = self.x+.17 y_fifo_jobs=y_fifo_jobs +0.07 tk.Label(fifo,text=const.INTER,font= "arial 12 bold",borderwidth=3,relief='groove',pady=7,padx=40).place(relx=0.01,rely=y_fifo_jobs) for inter in self.fifo_inter: tk.Label(fifo,text=str(inter),font= "arial 13 bold",borderwidth=3,relief='groove',pady=7,padx=10).place(relx=x_fifo_jobs,rely=y_fifo_jobs) x_fifo_jobs = x_fifo_jobs + .07 y_fifo_jobs=y_fifo_jobs +0.07 x_fifo_jobs = self.x+.17 tk.Label(fifo,text=const.TIME,font= "arial 12 bold",borderwidth=3,relief='groove',pady=7,padx=15).place(relx=0.01,rely=y_fifo_jobs) for i in range(len(self.job_list)): tk.Label(fifo,text=str(i+1),font= "arial 11 ",borderwidth=3,relief='groove',pady=7,padx=10).place(relx=x_fifo_jobs,rely=y_fifo_jobs) x_fifo_jobs = x_fifo_jobs + .07 y_fifo_jobs=y_fifo_jobs +0.07 y_fifo_jobs=y_fifo_jobs +0.07 num_inter = str((self.fifo_num_inter)) num_req = str(len(self.job_list)) fifo_fail = str(self.fifo_fail*100)+"%" results = "Total Interrupts: "+num_inter+"\n"+"Total Requests: "+ num_req + "\n" + "Failure Rate: "+fifo_fail tk.Label(fifo,text=results,font= "arial 15 bold ").place(relx=0.01,rely=y_fifo_jobs) #**********************************************************************************************************************************************lru frame self.lru = tk.Canvas(root,width=815,height=const.MAX_HEIGHT/1,bg=const.BLUE,bd=2) lru = self.lru lru.config(highlightbackground='black') #lru.place(relx=0,rely=.10) #lru title title = tk.Label(lru,text=const.LRU_TITLE,font='arial 20 bold underline',bg=const.BLUE).place(relx=0.01,rely=0.10,anchor="w") lru_y = const.START_Y+.10 #print each page frame for i in range(self.page_frame_count): this_text = "Page Frame "+str(i+1) this_label = tk.Label(lru,text=this_text,font= "arial 15 bold",borderwidth=3,relief='groove',pady=7,padx=10) this_label.place(relx=0.01,rely=lru_y) lru_y = lru_y + 0.07 """ Lots of magic numbers here, will move to const.py if time allows for this assignment. """ #print the jobs each page frame has at each moment y_lru_jobs = const.START_Y+.10 x_lru_jobs = self.x+.17 for i in range(self.page_frame_count): for event_list in self.lru_events: if(str(event_list.frame) == str(i+1)): for e in event_list.event: if e is None: e="-" tk.Label(lru,text=str(e),font= "arial 10 bold",borderwidth=3,relief='groove',pady=7,padx=10).place(relx=x_lru_jobs,rely=y_lru_jobs) x_lru_jobs = x_lru_jobs + .07 y_lru_jobs = y_lru_jobs +0.07 x_lru_jobs = self.x+.17 #move jobs to right of labels x_lru_jobs = self.x+.17 y_lru_jobs = y_lru_jobs +0.05 tk.Label(lru,text=const.REQ,font= "arial 12 bold",borderwidth=3,relief='groove',pady=7,padx=10).place(relx=0.01,rely=y_lru_jobs) for job in self.job_list: tk.Label(lru,text=str(job),font= "arial 10 bold",borderwidth=3,relief='groove',pady=7,padx=10).place(relx=x_lru_jobs,rely=y_lru_jobs) x_lru_jobs = x_lru_jobs + .07 x_lru_jobs = self.x+.17 y_lru_jobs=y_lru_jobs +0.07 tk.Label(lru,text=const.INTER,font= "arial 12 bold",borderwidth=3,relief='groove',pady=7,padx=40).place(relx=0.01,rely=y_lru_jobs) for inter in self.lru_inter: tk.Label(lru,text=str(inter),font= "arial 13 bold",borderwidth=3,relief='groove',pady=7,padx=10).place(relx=x_lru_jobs,rely=y_lru_jobs) x_lru_jobs = x_lru_jobs + .07 y_lru_jobs=y_lru_jobs +0.07 x_lru_jobs = self.x+.17 tk.Label(lru,text=const.TIME,font= "arial 12 bold",borderwidth=3,relief='groove',pady=7,padx=15).place(relx=0.01,rely=y_lru_jobs) for i in range(len(self.job_list)): tk.Label(lru,text=str(i+1),font= "arial 11 ",borderwidth=3,relief='groove',pady=7,padx=10).place(relx=x_lru_jobs,rely=y_lru_jobs) x_lru_jobs = x_lru_jobs + .07 y_lru_jobs=y_lru_jobs +0.07 y_lru_jobs=y_lru_jobs +0.07 num_inter = str((self.lru_num_inter)) num_req = str(len(self.job_list)) lru_fail = str(self.lru_fail*100)+"%" results = "Total Interrupts: "+num_inter+"\n"+"Total Requests: "+ num_req + "\n" + "Failure Rate: "+lru_fail tk.Label(lru,text=results,font= "arial 15 bold ").place(relx=0.01,rely=y_lru_jobs)
46.020619
172
0.635529
1,451
8,928
3.742247
0.121985
0.055985
0.034807
0.070718
0.725599
0.678269
0.646777
0.641252
0.635175
0.591529
0
0.044128
0.197917
8,928
193
173
46.259067
0.714146
0.073029
0
0.360902
0
0
0.100979
0
0.015038
0
0
0
0
1
0.007519
false
0
0.097744
0
0.112782
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1fa40e3d5ffd5031f4b30a989255c4474dd77b5f
9,440
py
Python
POP909-Dataset-master/data_process/processor.py
agurdins/RTU_Bachelor
28ed4bf90a8ffdb2b599e549bae5f2b12a795ff1
[ "Apache-2.0" ]
140
2020-08-06T12:15:56.000Z
2022-03-26T11:02:36.000Z
POP909-Dataset-master/data_process/processor.py
agurdins/RTU_Bachelor
28ed4bf90a8ffdb2b599e549bae5f2b12a795ff1
[ "Apache-2.0" ]
5
2020-08-18T08:29:46.000Z
2021-09-25T16:56:49.000Z
POP909-Dataset-master/data_process/processor.py
agurdins/RTU_Bachelor
28ed4bf90a8ffdb2b599e549bae5f2b12a795ff1
[ "Apache-2.0" ]
18
2020-09-21T07:13:44.000Z
2022-03-19T14:30:09.000Z
""" Representation Processor ============ These are core classes of representation processor. Repr Processor: the basic representation processor - Event Processor """ import numpy as np from abc import ABC, abstractmethod import pretty_midi as pyd class ReprProcessor(ABC): """Abstract base class severing as the representation processor. It provides the following abstract methods. - encode(self, note_seq): encode the note sequence into the representation sequence. - decode(self, repr_seq): decode the representation sequence into the note sequence. Notes ----- The base representation processor class includes the convertion between the note sequence and the representation sequence. In general, we assume the input note sequence has already been quantized. In that, the smallest unit of the quantization is actually 1 tick no matter what resolution is. If you init "min_step" to be larger than 1, we assume you wish to compress all the base tick. e.g. min_step = 2, then the whole ticks will be convertd half. If you do this, the representation convertion may not be 100% correct. ----- """ def __init__(self, min_step: int = 1): self.min_step = min_step def _compress(self, note_seq=None): """Return the compressed note_seq based on the min_step > 1. Parameters ---------- note_seq : Note Array. ---------- WARNING: If you do this, the representation convertion may not be 100% correct. """ new_note_seq = [ Note( start=int(d.start / self.min_step), end=int(d.end / self.min_step), pitch=d.pitch, velocity=d.velocity, ) for d in note_seq ] return new_note_seq def _expand(self, note_seq=None): """Return the expanded note_seq based on the min_step > 1. Parameters ---------- note_seq : Note Array. ---------- WARNING: If you do this, the representation convertion may not be 100% correct. """ new_note_seq = [ Note( start=int(d.start * self.min_step), end=int(d.end * self.min_step), pitch=d.pitch, velocity=d.velocity, ) for d in note_seq ] return new_note_seq @abstractmethod def encode(self, note_seq=None): """encode the note sequence into the representation sequence. Parameters ---------- note_seq= the input {Note} sequence Returns ---------- repr_seq: the representation numpy sequence """ @abstractmethod def decode(self, repr_seq=None): """decode the representation sequence into the note sequence. Parameters ---------- repr_seq: the representation numpy sequence Returns ---------- note_seq= the input {Note} sequence """ class MidiEventProcessor(ReprProcessor): """Midi Event Representation Processor. Representation Format: ----- Size: L * D: - L for the sequence (event) length - D = 1 { 0-127: note-on event, 128-255: note-off event, 256-355(default): tick-shift event 256 for one tick, 355 for 100 ticks the maximum number of tick-shift can be specified 356-388 (default): velocity event the maximum number of quantized velocity can be specified } Parameters: ----- min_step(optional): minimum quantification step decide how many ticks to be the basic unit (default = 1) tick_dim(optional): tick-shift event dimensions the maximum number of tick-shift (default = 100) velocity_dim(optional): velocity event dimensions the maximum number of quantized velocity (default = 32, max = 128) e.g. [C5 - - - E5 - - / G5 - - / /] -> [380, 60, 259, 188, 64, 258, 192, 256, 67, 258, 195, 257] """ def __init__(self, **kwargs): self.name = "midievent" min_step = 1 if "min_step" in kwargs: min_step = kwargs["min_step"] super(MidiEventProcessor, self).__init__(min_step) self.tick_dim = 100 self.velocity_dim = 32 if "tick_dim" in kwargs: self.tick_dim = kwargs["tick_dim"] if "velocity_dim" in kwargs: self.velocity_dim = kwargs["velocity_dim"] if self.velocity_dim > 128: raise ValueError( "velocity_dim cannot be larger than 128", self.velocity_dim ) self.max_vocab = 256 + self.tick_dim + self.velocity_dim self.start_index = { "note_on": 0, "note_off": 128, "time_shift": 256, "velocity": 256 + self.tick_dim, } def encode(self, note_seq=None): """Return the note token Parameters ---------- note_seq : Note List. Returns ---------- repr_seq: Representation List """ if note_seq is None: return [] if self.min_step > 1: note_seq = self._compress(note_seq) notes = note_seq events = [] meta_events = [] for note in notes: token_on = { "name": "note_on", "time": note.start, "pitch": note.pitch, "vel": note.velocity, } token_off = { "name": "note_off", "time": note.end, "pitch": note.pitch, "vel": None, } meta_events.extend([token_on, token_off]) meta_events.sort(key=lambda x: x["pitch"]) meta_events.sort(key=lambda x: x["time"]) time_shift = 0 cur_vel = 0 for me in meta_events: duration = int((me["time"] - time_shift) * 100) while duration >= self.tick_dim: events.append( self.start_index["time_shift"] + self.tick_dim - 1 ) duration -= self.tick_dim if duration > 0: events.append(self.start_index["time_shift"] + duration - 1) if me["vel"] is not None: if cur_vel != me["vel"]: cur_vel = me["vel"] events.append( self.start_index["velocity"] + int(round(me["vel"] * self.velocity_dim / 128)) ) events.append(self.start_index[me["name"]] + me["pitch"]) time_shift = me["time"] return events def decode(self, repr_seq=None): """Return the note seq Parameters ---------- repr_seq: Representation Sequence List Returns ---------- note_seq : Note List. """ if repr_seq is None: return [] time_shift = 0.0 cur_vel = 0 meta_events = [] note_on_dict = {} notes = [] for e in repr_seq: if self.start_index["note_on"] <= e < self.start_index["note_off"]: token_on = { "name": "note_on", "time": time_shift, "pitch": e, "vel": cur_vel, } meta_events.append(token_on) if ( self.start_index["note_off"] <= e < self.start_index["time_shift"] ): token_off = { "name": "note_off", "time": time_shift, "pitch": e - self.start_index["note_off"], "vel": cur_vel, } meta_events.append(token_off) if ( self.start_index["time_shift"] <= e < self.start_index["velocity"] ): time_shift += (e - self.start_index["time_shift"] + 1) * 0.01 if self.start_index["velocity"] <= e < self.max_vocab: cur_vel = int(round( (e - self.start_index["velocity"]) * 128 / self.velocity_dim) ) skip_notes = [] for me in meta_events: if me["name"] == "note_on": note_on_dict[me["pitch"]] = me elif me["name"] == "note_off": try: token_on = note_on_dict[me["pitch"]] token_off = me if token_on["time"] == token_off["time"]: continue notes.append( pyd.Note( velocity=token_on["vel"], pitch=int(token_on["pitch"]), start=token_on["time"], end=token_off["time"], ) ) except: skip_notes.append(me) notes.sort(key=lambda x: x.start) if self.min_step > 1: notes = self._expand(notes) return notes
30.550162
126
0.500953
1,033
9,440
4.408519
0.181026
0.036891
0.046113
0.019763
0.410408
0.347826
0.197189
0.157664
0.113746
0.113746
0
0.024527
0.395339
9,440
308
127
30.649351
0.773301
0.310699
0
0.301205
0
0
0.073663
0
0
0
0
0
0
1
0.048193
false
0
0.018072
0
0.114458
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1fa5b81e8ddb69f6e5c8f48345327239689cae22
19,461
py
Python
xtb_trading.py
lemassykoi/XTBApi
3b159f0b711e0d445a9cd7fec5c7a499cc623140
[ "MIT" ]
null
null
null
xtb_trading.py
lemassykoi/XTBApi
3b159f0b711e0d445a9cd7fec5c7a499cc623140
[ "MIT" ]
null
null
null
xtb_trading.py
lemassykoi/XTBApi
3b159f0b711e0d445a9cd7fec5c7a499cc623140
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # adaptation du script FXCM pour XTB ## debug = 1 ## DEBUG ENABLED OR DISABLED from XTBApi.api import * import time import pandas as pd import datetime as dt import talib.abstract as ta ## Maths modules import pyti.bollinger_bands as bb from pyti.relative_strength_index import relative_strength_index as rsi from pyti.bollinger_bands import upper_bollinger_band as ubb from pyti.bollinger_bands import middle_bollinger_band as mbb from pyti.bollinger_bands import lower_bollinger_band as lbb from pyti.bollinger_bands import percent_bandwidth as percent_b import requests import sys, traceback from os import system from pprint import pprint ## ## SPINNER FUNC ## import threading import itertools class Spinner: def __init__(self, message, delay=0.05): #self.spinner = itertools.cycle(['-', '/', '|', '\\']) # anti horaire self.spinner = itertools.cycle(['-', '\\', '|', '/']) # horaire self.delay = delay self.busy = False self.spinner_visible = False sys.stdout.write(message) def write_next(self): with self._screen_lock: if not self.spinner_visible: sys.stdout.write(next(self.spinner)) self.spinner_visible = True sys.stdout.flush() def remove_spinner(self, cleanup=False): with self._screen_lock: if self.spinner_visible: sys.stdout.write('\b') self.spinner_visible = False if cleanup: sys.stdout.write(' ') # overwrite spinner with blank sys.stdout.write('\r') # move to next line sys.stdout.flush() def spinner_task(self): while self.busy: self.write_next() time.sleep(self.delay) self.remove_spinner() def __enter__(self): if sys.stdout.isatty(): self._screen_lock = threading.Lock() self.busy = True self.thread = threading.Thread(target=self.spinner_task) self.thread.start() def __exit__(self, exception, value, tb): if sys.stdout.isatty(): self.busy = False self.remove_spinner(cleanup=True) else: sys.stdout.write('\r') ## class bcolors: HEADER = '\033[95m' OKBLUE = '\033[94m' OKCYAN = '\033[96m' OKGREEN = '\033[92m' WARNING = '\033[93m' FAIL = '\033[91m' ENDC = '\033[0m' BOLD = '\033[1m' UNDERLINE = '\033[4m' def NotifyLogDebug(Message): LOGGER.debug(Message) requests.get('https://api.telegram.org/bot' + TG_token + '/sendMessage?chat_id=' + TG_chat_id + '&text=' + Message) def NotifyLogInfo(Message): LOGGER.info(Message) requests.get('https://api.telegram.org/bot' + TG_token + '/sendMessage?chat_id=' + TG_chat_id + '&text=' + Message) def NotifyLogWarning(Message): LOGGER.warning(Message) requests.get('https://api.telegram.org/bot' + TG_token + '/sendMessage?chat_id=' + TG_chat_id + '&text=' + Message) def NotifyLogError(Message): LOGGER.error(Message) requests.get('https://api.telegram.org/bot' + TG_token + '/sendMessage?chat_id=' + TG_chat_id + '&text=' + Message) def NotifyLogCritical(Message): LOGGER.critical(Message) requests.get('https://api.telegram.org/bot' + TG_token + '/sendMessage?chat_id=' + TG_chat_id + '&text=' + Message) def NormalExit(): client.logout() LOGGER.info('Logged Out : Script Exited Normally') sys.exit() if debug == 1: print(f"{bcolors.WARNING} DEBUG IS ON{bcolors.ENDC}") ## LOGGER LEVEL LOGGER.setLevel(logging.INFO) ## pricedata = None timeframe = 'm1' ## TIMEFRAME (m1, m5, m15, m30, H1,H2,H3,H4,H6,H8,D1, W1, M1) mn_timeframe = 60 ## Minutes (60, 300, 900, 1800, 3600, 14400, 86400, 604800, 2592000) numberofcandles = 300 ## minimum 35 pour calcul MACD symbol = 'EURUSD' xtb_login = '1234567' xtb_pass = 'myComplexPassword' TG_chat_id='123456789' TG_token='1234567890:aBcDeFgHiJkLmNoPqRsTuVwXyZ012345678' amount = 0.1 objectif_percent_sell = 1.02 objectif_percent_buy = 0.98 min_objectif_amount_sell = 50 trailing_step = 150 ## rsi_periods = 14 bb_periods = 20 bb_standard_deviations = 2.0 upper_rsi = 72 lower_rsi = 28 version = '20210127-0110' ## INIT XTB CONNEXION NotifyLogInfo('Starting XTB Bot Tests') client = Client() client.login(xtb_login, xtb_pass, mode='real') ## Check if Market is Opened or Closed # return an array with 'symbol : Bool' is_opened = client.check_if_market_open([symbol]) if is_opened[symbol] == False: print('==MARKET IS CLOSED==') NormalExit() # This function runs once at the beginning of the strategy to run initial one-time processes def Prepare(): global pricedata if debug == 1: print(f"{bcolors.HEADER}Requesting Initial Price Data...{bcolors.ENDC}") d = client.get_lastn_candle_history([symbol], mn_timeframe, numberofcandles) pricedata = pd.DataFrame(data=d) if debug == 1: print(f"{bcolors.OKGREEN}Initial Price Data Received...{bcolors.ENDC}") print('') ## DEBUG LIGHT #print(pricedata) ## DEBUG FULL #print(pricedata.to_string()) print('') # Get latest close bar prices and run Update() function every close of bar/candle def StrategyHeartBeat(): while True: currenttime = dt.datetime.now() if timeframe == "m1" and currenttime.second == 0 and getLatestPriceData(): Update() elif timeframe == "m5" and currenttime.second == 0 and currenttime.minute % 5 == 0 and getLatestPriceData(): Update() with Spinner('Waiting for m5 bar...'): time.sleep(240) elif timeframe == "m15" and currenttime.second == 0 and currenttime.minute % 15 == 0 and getLatestPriceData(): Update() with Spinner('Waiting for m15 bar...'): time.sleep(840) elif timeframe == "m30" and currenttime.second == 0 and currenttime.minute % 30 == 0 and getLatestPriceData(): Update() with Spinner('Waiting for m30 bar...'): time.sleep(1740) elif currenttime.second == 0 and currenttime.minute == 0 and getLatestPriceData(): Update() with Spinner('Waiting for H1 bar...'): time.sleep(3540) with Spinner('Waiting for m1 bar...'): time.sleep(1) # Returns True when pricedata is properly updated def getLatestPriceData(): global pricedata # Normal operation will update pricedata on first attempt d = client.get_lastn_candle_history([symbol], mn_timeframe, numberofcandles) new_pricedata = pd.DataFrame(data=d) if new_pricedata['timestamp'][len(new_pricedata['timestamp'])-1] != pricedata['timestamp'][len(pricedata['timestamp'])-1]: pricedata = new_pricedata return True counter = 0 # If data is not available on first attempt, try up to 6 times to update pricedata while new_pricedata['timestamp'][len(new_pricedata['timestamp'])-1] == pricedata['timestamp'][len(pricedata['timestamp'])-1] and counter < 6: print(f"{bcolors.BOLD}No updated prices found, trying again in 10 seconds...{bcolors.ENDC}") print("") counter+=1 with Spinner('Still waiting for next bar...'): time.sleep(10) d = client.get_lastn_candle_history([symbol], mn_timeframe, numberofcandles) new_pricedata = pd.DataFrame(data=d) if new_pricedata['timestamp'][len(new_pricedata['timestamp'])-1] != pricedata['timestamp'][len(pricedata['timestamp'])-1]: pricedata = new_pricedata return True else: return False # Returns true if stream1 crossed over stream2 in most recent candle, stream2 can be integer/float or data array def crossesOver(stream1, stream2): # If stream2 is an int or float, check if stream1 has crossed over that fixed number if isinstance(stream2, int) or isinstance(stream2, float): if stream1[len(stream1)-1] <= stream2: return False else: if stream1[len(stream1)-2] > stream2: return False elif stream1[len(stream1)-2] < stream2: return True else: x = 2 while stream1[len(stream1)-x] == stream2: x = x + 1 if stream1[len(stream1)-x] < stream2: return True else: return False # Check if stream1 has crossed over stream2 else: if stream1[len(stream1)-1] <= stream2[len(stream2)-1]: return False else: if stream1[len(stream1)-2] > stream2[len(stream2)-2]: return False elif stream1[len(stream1)-2] < stream2[len(stream2)-2]: return True else: x = 2 while stream1[len(stream1)-x] == stream2[len(stream2)-x]: x = x + 1 if stream1[len(stream1)-x] < stream2[len(stream2)-x]: return True else: return False # Returns true if stream1 crossed under stream2 in most recent candle, stream2 can be integer/float or data array def crossesUnder(stream1, stream2): # If stream2 is an int or float, check if stream1 has crossed under that fixed number if isinstance(stream2, int) or isinstance(stream2, float): if stream1[len(stream1)-1] >= stream2: return False else: if stream1[len(stream1)-2] < stream2: return False elif stream1[len(stream1)-2] > stream2: return True else: x = 2 while stream1[len(stream1)-x] == stream2: x = x + 1 if stream1[len(stream1)-x] > stream2: return True else: return False # Check if stream1 has crossed under stream2 else: if stream1[len(stream1)-1] >= stream2[len(stream2)-1]: return False else: if stream1[len(stream1)-2] < stream2[len(stream2)-2]: return False elif stream1[len(stream1)-2] > stream2[len(stream2)-2]: return True else: x = 2 while stream1[len(stream1)-x] == stream2[len(stream2)-x]: x = x + 1 if stream1[len(stream1)-x] > stream2[len(stream2)-x]: return True else: return False # This function places a market order in the direction BuySell, "B" = Buy, "S" = Sell, uses symbol, amount, stop, limit def enter(BuySell, stop, limit): volume = amount order = 'buy' if BuySell == "S": order = 'sell' try: msg = ' Opening tradeID for symbol ' + symbol NotifyLogInfo(msg) opentrade = client.open_trade(order, symbol, amount) except: msg = ' Error Opening Trade.' NotifyLogError(msg) else: msg = ' Trade Opened Successfully.' LOGGER.info(msg) # This function closes all positions that are in the direction BuySell, "B" = Close All Buy Positions, "S" = Close All Sell Positions, uses symbol def exit(BuySell=None): openpositions = client.get_trades() isbuy = 0 if BuySell == "S": isbuy = 1 for position in openpositions: if position['symbol'] == symbol: if BuySell is None or position['cmd'] == isbuy: msg = ' Closing tradeID : ' + str(position['order']) NotifyLogInfo(msg) try: closetrade = client.close_trade(position['order']) except: msg = " Error Closing Trade." NotifyLogError(msg) else: msg = " Trade Closed Successfully." LOGGER.info(msg) # Returns number of Open Positions for symbol in the direction BuySell, returns total number of both Buy and Sell positions if no direction is specified def countOpenTrades(BuySell=None): openpositions = client.get_trades() counter = 0 isbuy = 0 if BuySell == "S": isbuy = 1 for keys in openpositions: if keys['symbol'] == symbol: if BuySell is None or keys['cmd'] == isbuy: counter+=1 return counter def Update(): print(f"{bcolors.HEADER}==================================================================================={bcolors.ENDC}") print(f"{bcolors.BOLD}" + str(dt.datetime.now()) + f"{bcolors.ENDC}" + " " + timeframe + " Bar Closed - Running Update Function...") print("Version : " + f"{bcolors.BOLD}" + version + ' ' + sys.argv[0] + f"{bcolors.ENDC}") print("Symbol : " + f"{bcolors.BOLD}" + symbol + f"{bcolors.ENDC}") # Calculate Indicators macd = ta.MACD(pricedata['close']) pricedata['cci'] = ta.CCI(pricedata['high'],pricedata['low'],pricedata['close']) iBBUpper = bb.upper_bollinger_band(pricedata['close'], bb_periods, bb_standard_deviations) iBBMiddle = bb.middle_bollinger_band(pricedata['close'], bb_periods, bb_standard_deviations) iBBLower = bb.lower_bollinger_band(pricedata['close'], bb_periods, bb_standard_deviations) iRSI = rsi(pricedata['close'], rsi_periods) # Declare simplified variable names for most recent close candle pricedata['macd'] = macd[0] pricedata['macdsignal'] = macd[1] pricedata['macdhist'] = macd[2] BBUpper = iBBUpper[len(iBBUpper)-1] BBMiddle = iBBMiddle[len(iBBMiddle)-1] BBLower = iBBLower[len(iBBLower)-1] close_price = pricedata['close'][len(pricedata)-1] last_close_price = pricedata['close'][len(pricedata)-2] macd_now = pricedata['macd'][len(pricedata)-1] macdsignal = pricedata['macdsignal'][len(pricedata)-1] macdhist = pricedata['macdhist'][len(pricedata)-1] cci = pricedata['cci'][len(pricedata)-1] rsi_now = iRSI[len(iRSI)-1] ## DEBUG FULL #print(pricedata.to_string()) # Print Price/Indicators if close_price > last_close_price: print(f"Close Price : {bcolors.OKGREEN}" + str(close_price) + f"{bcolors.ENDC}") elif close_price < last_close_price: print(f"Close Price : {bcolors.FAIL}" + str(close_price) + f"{bcolors.ENDC}") else: print(f"Close Price : {bcolors.OKCYAN}" + str(close_price) + f"{bcolors.ENDC}") print("MACD : " + str(macd_now)) print("Signal MACD : " + str(macdsignal)) print("MACD History : " + str(macdhist)) if cci <= -50: print(f"{bcolors.OKGREEN}CCI : " + str(cci) + f"{bcolors.ENDC}") elif cci >= 100: print(f"{bcolors.FAIL}CCI : " + str(cci) + f"{bcolors.ENDC}") else: print(f"{bcolors.OKCYAN}CCI : " + str(cci) + f"{bcolors.ENDC}") print("RSI : " + str(rsi_now)) # Change Any Existing Trades' Limits to Middle Bollinger Band if countOpenTrades()>0: openpositions = client.get_trades() for position in openpositions: if position['symbol'] == symbol and ((position['cmd'] == 0) or (position['cmd'] == 1)): NotifyLogInfo("Changing Limit for tradeID: " + str(position['order'])) try: NotifyLogInfo('client.trade_transaction') #client.trade_transaction(symbol, position['cmd'], trans_type, volume, stop_loss=0, take_profit=0) except: NotifyLogError(" Error Changing Limit :(") else: print(" Limit Changed Successfully. ;)") # # Entry Logic # if countOpenTrades('B') == 0: # if ((crossesOver(pricedata['macd'], macdsignal) & (cci <= -50.0))): # print(f"{bcolors.OKGREEN} BUY SIGNAL ! MACD{bcolors.ENDC}") # NotifyLogInfo(" Opening " + symbol + " Buy Trade... MACD") # stop = round((pricedata['close'][len(pricedata['close'])-1] * buy_stop_loss), 5) # limit = round((pricedata['close'][len(pricedata['close'])-1] * buy_take_profit), 5) # #enter('B', stop, limit) # elif (crossesOver(iRSI, lower_rsi) and close_price < BBLower): # print(f"{bcolors.OKGREEN} BUY SIGNAL ! RSI{bcolors.ENDC}") # NotifyLogInfo(" Opening " + symbol + " Buy Trade... RSI") # #stop = pricedata['close'][len(pricedata['close'])-1] - (BBMiddle - pricedata['close'][len(pricedata['close'])-1]) # stop = round((pricedata['close'][len(pricedata['close'])-1] * buy_stop_loss), 5) # limit = BBMiddle # #enter('B', stop, limit) # if (countOpenTrades('S') == 0 and close_price > BBUpper): # if crossesUnder(iRSI, upper_rsi): # print(f"{bcolors.FAIL} SELL SIGNAL ! RSI{bcolors.ENDC}") # NotifyLogInfo(' Opening ' + symbol + ' Sell Trade... RSI') # stop = pricedata['close'][len(pricedata['close'])-1] + (pricedata['close'][len(pricedata['close'])-1] - BBMiddle) # limit = BBMiddle # #enter('S', stop, limit) # elif (crossesUnder(pricedata['macd'], macdsignal) and macd_now > 0): # print(f"{bcolors.FAIL} SELL SIGNAL ! MACD{bcolors.ENDC}") # NotifyLogInfo(' Opening ' + symbol + ' Sell Trade... MACD') # stop = pricedata['close'][len(pricedata['close'])-1] + (pricedata['close'][len(pricedata['close'])-1] - BBMiddle) # limit = BBMiddle # #enter('S', stop, limit) # # Exit Logic # if countOpenTrades('B') > 0: # if ((crossesUnder(pricedata['macd'], macdsignal) & (cci >= 100.0))): # NotifyLogInfo(' Closing ' + symbol + ' Buy Trade(s)... Reason : MACD') # #exit('B') # elif (crossesUnder(iRSI, upper_rsi)): # NotifyLogInfo(' Closing ' + symbol + ' Buy Trade(s)... Reason : RSI') # #exit('B') # if countOpenTrades('S') > 0: # if (iRSI[len(iRSI)-1] < lower_rsi): # NotifyLogInfo(' Closing ' + symbol + ' SELL Trade because of RSI') # #exit('S') # elif (close_price < BBMiddle): # NotifyLogInfo(' Closing ' + symbol + ' SELL Trade because of BBMiddle') # #exit('S') print(f"{bcolors.BOLD}" + str(dt.datetime.now()) + f"{bcolors.ENDC}" + " " + timeframe + " Update Function Completed.\n") def handle_exception(): NotifyLogError("Exception handled on " + symbol + " ! Restarting...") main() ## STARTING TRADING LOOP def main(): try: Prepare() StrategyHeartBeat() except KeyboardInterrupt: print("") print(f"{bcolors.WARNING}Shutdown requested by Operator... Exiting !{bcolors.ENDC}") print("") NormalExit() except Exception: traceback.print_exc(file=sys.stdout) LOGGER.error("EXCEPTION on Bot XTB " + symbol + " ! Bot Stopped.") handle_exception() except ServerError: traceback.print_exc(file=sys.stdout) NotifyLogError("SERVER ERROR on Bot XTB " + symbol + " ! Bot Stopped.") handle_exception() if __name__ == "__main__": main() NormalExit()
40.459459
152
0.590874
2,250
19,461
5.027556
0.188
0.019095
0.030057
0.020156
0.475248
0.450583
0.378448
0.322843
0.282443
0.262995
0
0.029211
0.277016
19,461
480
153
40.54375
0.774769
0.221314
0
0.380682
0
0
0.156666
0.028805
0
0
0
0
0
1
0.065341
false
0.005682
0.048295
0
0.213068
0.085227
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1fa915f1d01ae50c5c5d775a6b404ccefbb0a1db
23,609
py
Python
datanode/src/storage_interface.py
airmap/InterUSS-Platform
fa19af360826b4dd7b841013c0c569a4f282919d
[ "Apache-2.0" ]
null
null
null
datanode/src/storage_interface.py
airmap/InterUSS-Platform
fa19af360826b4dd7b841013c0c569a4f282919d
[ "Apache-2.0" ]
1
2021-03-26T12:13:17.000Z
2021-03-26T12:13:17.000Z
datanode/src/storage_interface.py
isabella232/InterUSS-Platform
fa19af360826b4dd7b841013c0c569a4f282919d
[ "Apache-2.0" ]
2
2019-08-11T20:20:32.000Z
2021-03-26T12:01:43.000Z
"""The InterUSS Platform Data Node storage API server. This flexible and distributed system is used to connect multiple USSs operating in the same general area to share safety information while protecting the privacy of USSs, businesses, operator and consumers. The system is focused on facilitating communication amongst actively operating USSs with no details about UAS operations stored or processed on the InterUSS Platform. A data node contains all of the API, logic, and data consistency infrastructure required to perform CRUD (Create, Read, Update, Delete) operations on specific grid cells. Multiple data nodes can be executed to increase resilience and availability. This is achieved by a stateless API to service USSs, an information interface to translate grid cell USS information into the correct data storage format, and an information consistency store to ensure data is up to date. This module is the information interface to Zookeeper. Copyright 2018 Google LLC Licensed under the Apache License, Version 2.0 (the 'License'); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import json import logging # Our data structure for the actual metadata stored import uss_metadata # Utilties for validating slippy import slippy_util # Kazoo is the zookeeper wrapper for python from kazoo.client import KazooClient from kazoo.exceptions import KazooException from kazoo.exceptions import BadVersionError from kazoo.exceptions import NoNodeError from kazoo.exceptions import RolledBackError from kazoo.handlers.threading import KazooTimeoutError from kazoo.protocol.states import KazooState # logging is our log infrastructure used for this application logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO) log = logging.getLogger('InterUSS_DataNode_InformationInterface') # CONSTANTS # Lock stores in this format /uss/gridcells/{z}/{x}/{y}/manifest USS_BASE_PREFIX = '/uss/gridcells/' TEST_BASE_PREFIX = '/test/' USS_METADATA_FILE = '/manifest' BAD_CHARACTER_CHECK = '\';(){}[]!@#$%^&*|"<>' CONNECTION_TIMEOUT = 2.5 # seconds DEFAULT_CONNECTION = 'localhost:2181' GRID_PATH = USS_BASE_PREFIX MAX_SAFE_INTEGER = 9007199254740991 class USSMetadataManager(object): """Interfaces with the locking system to get, put, and delete USS metadata. Metadata gets/stores/deletes the USS information for a partiular grid, including current version number, a list of USSs with active operations, and the endpoints to get that information. Locking is assured through a snapshot token received when getting, and used when putting. """ def __init__(self, connectionstring=DEFAULT_CONNECTION, testgroupid=None): """Initializes the class. Args: connectionstring: Zookeeper connection string - server:port,server:port,... testgroupid: ID to use if in test mode, none for normal mode """ if testgroupid: self.set_testmode(testgroupid) if not connectionstring: connectionstring = DEFAULT_CONNECTION log.debug('Creating metadata manager object and connecting to zookeeper...') try: if set(BAD_CHARACTER_CHECK) & set(connectionstring): raise ValueError self.zk = KazooClient(hosts=connectionstring, timeout=CONNECTION_TIMEOUT) self.zk.add_listener(self.zookeeper_connection_listener) self.zk.start() if testgroupid: self.delete_testdata(testgroupid) except KazooTimeoutError: log.error('Unable to connect to zookeeper using %s connection string...', connectionstring) raise except ValueError: log.error('Connection string %s seems invalid...', connectionstring) raise def __del__(self): log.debug('Destroying metadata manager object and disconnecting from zk...') self.zk.stop() def get_state(self): return self.zk.state def get_version(self): try: return True, self.zk.server_version() except KazooException as e: msg = str(e) return False, type(e).__name__ + (' ' + msg if msg else '') def set_verbose(self): log.setLevel(logging.DEBUG) def set_testmode(self, testgroupid='UNDEFINED_TESTER'): """Sets the mode to testing with the specific test ID, cannot be undone. Args: testgroupid: ID to use if in test mode, none for normal mode """ global GRID_PATH global CONNECTION_TIMEOUT # Adjust parameters specifically for the test GRID_PATH = TEST_BASE_PREFIX + testgroupid + USS_BASE_PREFIX log.debug('Setting test path to %s...', GRID_PATH) CONNECTION_TIMEOUT = 1.0 def zookeeper_connection_listener(self, state): if state == KazooState.LOST: # Register somewhere that the session was lost log.error('Lost connection with the zookeeper servers...') elif state == KazooState.SUSPENDED: # Handle being disconnected from Zookeeper log.error('Suspended connection with the zookeeper servers...') elif state == KazooState.CONNECTED: # Handle being connected/reconnected to Zookeeper log.info('Connection restored with the zookeeper servers...') def delete_testdata(self, testgroupid=None): """Removes the test data from the servers. Be careful when using this in parallel as it removes everything under the testgroupid, or everything if no tetgroupid is provided. Args: testgroupid: ID to use if in test mode, none will remove all test data """ if testgroupid: path = TEST_BASE_PREFIX + testgroupid else: path = TEST_BASE_PREFIX self.zk.delete(path, recursive=True) def get(self, z, x, y): """Gets the metadata and snapshot token for a GridCell. Reads data from zookeeper, including a snapshot token. The snapshot token is used as a reference when writing to ensure the data has not been updated between read and write. Args: z: zoom level in slippy tile format x: x tile number in slippy tile format y: y tile number in slippy tile format Returns: JSend formatted response (https://labs.omniti.com/labs/jsend) """ # TODO(hikevin): Change to use our own error codes and let the server # convert them to http error codes. For now, this is # at least in a standard JSend format. status = 500 if slippy_util.validate_slippy(z, x, y): (content, metadata) = self._get_raw(z, x, y) if metadata: try: m = uss_metadata.USSMetadata(content) status = 200 result = { 'status': 'success', 'sync_token': metadata.last_modified_transaction_id, 'data': m.to_json() } except ValueError: status = 424 else: status = 404 else: status = 400 if status != 200: result = self._format_status_code_to_jsend(status) return result def set(self, z, x, y, sync_token, uss_id, ws_scope, operation_format, operation_ws, earliest_operation, latest_operation): """Sets the metadata for a GridCell. Writes data, using the snapshot token for confirming data has not been updated since it was last read. Args: z: zoom level in slippy tile format x: x tile number in slippy tile format y: y tile number in slippy tile format sync_token: token retrieved in the original GET GridCellMetadata, uss_id: plain text identifier for the USS, ws_scope: scope to use to obtain OAuth token, operation_format: output format for operation ws (i.e. NASA, GUTMA), operation_ws: submitting USS endpoint where all flights in this cell can be retrieved from, earliest_operation: lower bound of active or planned flight timestamp, used for quick filtering conflicts. latest_operation: upper bound of active or planned flight timestamp, used for quick filtering conflicts. Returns: JSend formatted response (https://labs.omniti.com/labs/jsend) """ if slippy_util.validate_slippy(z, x, y): # first we have to get the cell (content, metadata) = self._get_raw(z, x, y) if metadata: # Quick check of the token, another is done on the actual set to be sure # but this check fails early and fast if str(metadata.last_modified_transaction_id) == str(sync_token): try: m = uss_metadata.USSMetadata(content) log.debug('Setting metadata for %s...', uss_id) if not m.upsert_operator(uss_id, ws_scope, operation_format, operation_ws, earliest_operation, latest_operation, z, x, y): log.error('Failed setting operator for %s with token %s...', uss_id, str(sync_token)) raise ValueError status = self._set_raw(z, x, y, m, metadata.version) except ValueError: status = 424 else: status = 409 else: status = 404 else: status = 400 if status == 200: # Success, now get the metadata back to send back result = self.get(z, x, y) else: result = self._format_status_code_to_jsend(status) return result def delete(self, z, x, y, uss_id): """Sets the metadata for a GridCell by removing the entry for the USS. Args: z: zoom level in slippy tile format x: x tile number in slippy tile format y: y tile number in slippy tile format uss_id: is the plain text identifier for the USS Returns: JSend formatted response (https://labs.omniti.com/labs/jsend) """ status = 500 if slippy_util.validate_slippy(z, x, y): # first we have to get the cell (content, metadata) = self._get_raw(z, x, y) if metadata: try: m = uss_metadata.USSMetadata(content) m.remove_operator(uss_id) # TODO(pelletierb): Automatically retry on delete status = self._set_raw(z, x, y, m, metadata.version) except ValueError: status = 424 else: status = 404 else: status = 400 if status == 200: # Success, now get the metadata back to send back (content, metadata) = self._get_raw(z, x, y) result = { 'status': 'success', 'sync_token': metadata.last_modified_transaction_id, 'data': m.to_json() } else: result = self._format_status_code_to_jsend(status) return result def get_multi(self, z, grids): """Gets the metadata and snapshot token for multiple GridCells. Reads data from zookeeper, including a composite snapshot token. The snapshot token is used as a reference when writing to ensure the data has not been updated between read and write. Args: z: zoom level in slippy tile format grids: list of (x,y) tiles to retrieve Returns: JSend formatted response (https://labs.omniti.com/labs/jsend) """ try: combined_meta, syncs = self._get_multi_raw(z, grids) log.debug('Found sync token %s for %d grids...', self._hash_sync_tokens(syncs), len(syncs)) result = { 'status': 'success', 'sync_token': self._hash_sync_tokens(syncs), 'data': combined_meta.to_json() } except ValueError as e: result = self._format_status_code_to_jsend(400, e.message) except IndexError as e: result = self._format_status_code_to_jsend(404, e.message) return result def set_multi(self, z, grids, sync_token, uss_id, ws_scope, operation_format, operation_ws, earliest_operation, latest_operation): """Sets multiple GridCells metadata at once. Writes data, using the hashed snapshot token for confirming data has not been updated since it was last read. Args: z: zoom level in slippy tile format grids: list of (x,y) tiles to update sync_token: token retrieved in the original get_multi, uss_id: plain text identifier for the USS, ws_scope: scope to use to obtain OAuth token, operation_format: output format for operation ws (i.e. NASA, GUTMA), operation_ws: submitting USS endpoint where all flights in this cell can be retrieved from, earliest_operation: lower bound of active or planned flight timestamp, used for quick filtering conflicts. latest_operation: upper bound of active or planned flight timestamp, used for quick filtering conflicts. Returns: JSend formatted response (https://labs.omniti.com/labs/jsend) """ log.debug('Setting multiple grid metadata for %s...', uss_id) try: # first, get the affected grid's sync tokens m, syncs = self._get_multi_raw(z, grids) del m # Quick check of the token, another is done on the actual set to be sure # but this check fails early and fast log.debug('Found sync token %d for %d grids...', self._hash_sync_tokens(syncs), len(syncs)) if str(self._hash_sync_tokens(syncs)) == str(sync_token): log.debug('Composite sync_token matches, continuing...') self._set_multi_raw(z, grids, syncs, uss_id, ws_scope, operation_format, operation_ws, earliest_operation, latest_operation) log.debug('Completed updating multiple grids...') else: raise KeyError('Composite sync_token has changed') combined_meta, new_syncs = self._get_multi_raw(z, grids) result = { 'status': 'success', 'sync_token': self._hash_sync_tokens(new_syncs), 'data': combined_meta.to_json() } except (KeyError, RolledBackError) as e: result = self._format_status_code_to_jsend(409, e.message) except ValueError as e: result = self._format_status_code_to_jsend(400, e.message) except IndexError as e: result = self._format_status_code_to_jsend(404, e.message) return result def delete_multi(self, z, grids, uss_id): """Sets multiple GridCells metadata by removing the entry for the USS. Removes the operator from multiple cells. Does not return 404 on not finding the USS in a cell, since this should be a remove all type function, as some cells might have the ussid and some might not. Args: z: zoom level in slippy tile format grids: list of (x,y) tiles to delete uss_id: is the plain text identifier for the USS Returns: JSend formatted response (https://labs.omniti.com/labs/jsend) """ log.debug('Deleting multiple grid metadata for %s...', uss_id) try: if not uss_id: raise ValueError('Invalid uss_id for deleting multi') for x, y in grids: if slippy_util.validate_slippy(z, x, y): (content, metadata) = self._get_raw(z, x, y) if metadata: m = uss_metadata.USSMetadata(content) m.remove_operator(uss_id) # TODO(pelletierb): Automatically retry on delete status = self._set_raw(z, x, y, m, metadata.version) else: raise ValueError('Invalid slippy grids for lookup') result = self.get_multi(z, grids) except ValueError as e: result = self._format_status_code_to_jsend(400, e.message) return result ###################################################################### ################ INTERNAL FUNCTIONS ######################### ###################################################################### def _get_raw(self, z, x, y): """Gets the raw content and metadata for a GridCell from zookeeper. Args: z: zoom level in slippy tile format x: x tile number in slippy tile format y: y tile number in slippy tile format Returns: content: USS metadata metadata: straight from zookeeper """ path = '%s/%s/%s/%s/%s' % (GRID_PATH, str(z), str(x), str(y), USS_METADATA_FILE) log.debug('Getting metadata from zookeeper@%s...', path) try: c, m = self.zk.get(path) except NoNodeError: self.zk.ensure_path(path) c, m = self.zk.get(path) if c: log.debug('Received raw content and metadata from zookeeper: %s', c) if m: log.debug('Received raw metadata from zookeeper: %s', m) return c, m def _set_raw(self, z, x, y, m, version): """Grabs the lock and updates the raw content for a GridCell in zookeeper. Args: z: zoom level in slippy tile format x: x tile number in slippy tile format y: y tile number in slippy tile format m: metadata object to write version: the metadata version verified from the sync_token match Returns: 200 for success, 409 for conflict, 408 for unable to get the lock """ path = '%s/%s/%s/%s/%s' % (GRID_PATH, str(z), str(x), str(y), USS_METADATA_FILE) try: log.debug('Setting metadata to %s...', str(m)) self.zk.set(path, json.dumps(m.to_json()), version) status = 200 except BadVersionError: log.error('Sync token updated before write for %s...', path) status = 409 return status def _get_multi_raw(self, z, grids): """Gets the raw content and metadata for multiple GridCells from zookeeper. Args: z: zoom level in slippy tile format grids: list of (x,y) tiles to retrieve Returns: content: Combined USS metadata syncs: list of sync tokens in the same order as the grids Raises: IndexError: if it cannot find anything in zookeeper ValueError: if the grid data is not in the right format """ log.debug('Getting multiple grid metadata for %s...', str(grids)) combined_meta = None syncs = [] for x, y in grids: if slippy_util.validate_slippy(z, x, y): (content, metadata) = self._get_raw(z, x, y) if metadata: combined_meta += uss_metadata.USSMetadata(content) syncs.append(metadata.last_modified_transaction_id) else: raise IndexError('Unable to find metadata in platform') else: raise ValueError('Invalid slippy grids for lookup') if len(syncs) == 0: raise IndexError('Unable to find metadata in platform') return combined_meta, syncs def _set_multi_raw(self, z, grids, sync_tokens, uss_id, ws_scope, operation_format, operation_ws, earliest_operation, latest_operation): """Grabs the lock and updates the raw content for multiple GridCells Args: z: zoom level in slippy tile format grids: list of (x,y) tiles to retrieve sync_tokens: list of the sync tokens received during get operation uss_id: plain text identifier for the USS, ws_scope: scope to use to obtain OAuth token, operation_format: output format for operation ws (i.e. NASA, GUTMA), operation_ws: submitting USS endpoint where all flights in this cell can be retrieved from, earliest_operation: lower bound of active or planned flight timestamp, used for quick filtering conflicts. latest_operation: upper bound of active or planned flight timestamp, used for quick filtering conflicts. Raises: IndexError: if it cannot find anything in zookeeper ValueError: if the grid data is not in the right format """ log.debug('Setting multiple grid metadata for %s...', str(grids)) try: contents = [] for i in range(len(grids)): # First, get and update them all in memory, validate the sync_token x = grids[i][0] y = grids[i][1] sync_token = sync_tokens[i] path = '%s/%s/%s/%s/%s' % (GRID_PATH, str(z), str(x), str(y), USS_METADATA_FILE) (content, metadata) = self._get_raw(z, x, y) if str(metadata.last_modified_transaction_id) == str(sync_token): log.debug('Sync_token matches for %d, %d...', x, y) m = uss_metadata.USSMetadata(content) if not m.upsert_operator(uss_id, ws_scope, operation_format, operation_ws, earliest_operation, latest_operation, z, x, y): raise ValueError('Failed to set operator content') contents.append((path, m, metadata.version)) else: log.error( 'Sync token from USS (%s) does not match token from zk (%s)...', str(sync_token), str(metadata.last_modified_transaction_id)) raise KeyError('Composite sync_token has changed') # Now, start a transaction to update them all # the version will catch any changes and roll back any attempted # updates to the grids log.debug('Starting transaction to write all grids at once...') t = self.zk.transaction() for path, m, version in contents: t.set_data(path, json.dumps(m.to_json()), version) log.debug('Committing transaction...') results = t.commit() if isinstance(results[0], RolledBackError): raise KeyError('Rolled back multi-grid transaction due to grid change') log.debug('Committed transaction successfully.') except (KeyError, ValueError, IndexError) as e: log.error('Error caught in set_multi_raw %s.', e.message) raise e def _format_status_code_to_jsend(self, status, message=None): """Formats a response based on HTTP status code. Args: status: HTTP status code message: optional message to override preset message for codes Returns: JSend formatted response (https://labs.omniti.com/labs/jsend) """ if status == 200 or status == 204: result = {'status': 'success', 'code': 204, 'message': 'Empty data set.'} elif status == 400: result = { 'status': 'fail', 'code': status, 'message': 'Parameters are not following the correct format.' } elif status == 404: result = { 'status': 'fail', 'code': status, 'message': 'Unable to pull metadata from lock system.' } elif status == 408: result = { 'status': 'fail', 'code': status, 'message': 'Timeout trying to get lock.' } elif status == 409: result = { 'status': 'fail', 'code': status, 'message': 'Content in metadata has been updated since provided sync token.' } elif status == 424: result = { 'status': 'fail', 'code': status, 'message': 'Content in metadata is not following JSON format guidelines.' } else: result = { 'status': 'fail', 'code': status, 'message': 'Unknown error code occurred.' } if message: result['message'] = message return result @staticmethod def _hash_sync_tokens(syncs): """Hashes a list of sync tokens into a single, positive 64-bit int. For various languages, the limit to integers may be different, therefore we truncate to ensure the hash is the same on all implementations. """ return abs(hash(tuple(sorted(syncs)))) % MAX_SAFE_INTEGER
38.264182
80
0.656275
3,170
23,609
4.782019
0.162461
0.004222
0.00475
0.023748
0.509466
0.488687
0.465004
0.432153
0.399565
0.377663
0
0.008301
0.25503
23,609
616
81
38.326299
0.853593
0.400017
0
0.491228
0
0
0.168211
0.004746
0
0
0
0.00487
0
1
0.05848
false
0
0.032164
0.002924
0.134503
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1fb15d8fc5f2340ec039cd29cb846d5d8253d9c0
9,501
py
Python
scormxblock/scormxblock.py
Pearson-Advance/edx_xblock_scorm
eff4f18963424ac090662e03040dc8f003770cd3
[ "Apache-2.0" ]
null
null
null
scormxblock/scormxblock.py
Pearson-Advance/edx_xblock_scorm
eff4f18963424ac090662e03040dc8f003770cd3
[ "Apache-2.0" ]
1
2020-10-27T20:04:30.000Z
2020-10-27T20:04:30.000Z
scormxblock/scormxblock.py
Pearson-Advance/edx_xblock_scorm
eff4f18963424ac090662e03040dc8f003770cd3
[ "Apache-2.0" ]
null
null
null
import json import re import os import pkg_resources import zipfile import shutil import xml.etree.ElementTree as ET from django.conf import settings from django.template import Context, Template from webob import Response from xblock.core import XBlock from xblock.fields import Scope, String, Float, Boolean, Dict from xblock.fragment import Fragment # Make '_' a no-op so we can scrape strings _ = lambda text: text class ScormXBlock(XBlock): display_name = String( display_name=_("Display Name"), help=_("Display name for this module"), default="Scorm", scope=Scope.settings, ) scorm_file = String( display_name=_("Upload scorm file"), scope=Scope.settings, ) version_scorm = String( default="SCORM_12", scope=Scope.settings, ) # save completion_status for SCORM_2004 lesson_status = String( scope=Scope.user_state, default='not attempted' ) success_status = String( scope=Scope.user_state, default='unknown' ) lesson_location = String( scope=Scope.user_state, default='' ) suspend_data = String( scope=Scope.user_state, default='' ) data_scorm = Dict( scope=Scope.user_state, default={} ) lesson_score = Float( scope=Scope.user_state, default=0 ) weight = Float( default=1, scope=Scope.settings ) has_score = Boolean( display_name=_("Scored"), help=_("Select True if this component will receive a numerical score from the Scorm"), default=False, scope=Scope.settings ) icon_class = String( default="video", scope=Scope.settings, ) has_author_view = True def resource_string(self, path): """Handy helper for getting resources from our kit.""" data = pkg_resources.resource_string(__name__, path) return data.decode("utf8") def student_view(self, context=None): context_html = self.get_context_student() template = self.render_template('static/html/scormxblock.html', context_html) frag = Fragment(template) frag.add_css(self.resource_string("static/css/scormxblock.css")) frag.add_javascript(self.resource_string("static/js/src/scormxblock.js")) settings = { 'version_scorm': self.version_scorm } frag.initialize_js('ScormXBlock', json_args=settings) return frag def studio_view(self, context=None): context_html = self.get_context_studio() template = self.render_template('static/html/studio.html', context_html) frag = Fragment(template) frag.add_css(self.resource_string("static/css/scormxblock.css")) frag.add_javascript(self.resource_string("static/js/src/studio.js")) frag.initialize_js('ScormStudioXBlock') return frag def author_view(self, context): html = self.resource_string("static/html/author_view.html") frag = Fragment(html) return frag @XBlock.handler def studio_submit(self, request, suffix=''): self.display_name = request.params['display_name'] self.has_score = request.params['has_score'] self.icon_class = 'problem' if self.has_score == 'True' else 'video' if hasattr(request.params['file'], 'file'): file = request.params['file'].file zip_file = zipfile.ZipFile(file, 'r') path_to_file = os.path.join(settings.PROFILE_IMAGE_BACKEND['options']['location'], self.location.block_id) if os.path.exists(path_to_file): shutil.rmtree(path_to_file) zip_file.extractall(path_to_file) self.set_fields_xblock(path_to_file) return Response(json.dumps({'result': 'success'}), content_type='application/json') @XBlock.json_handler def scorm_get_value(self, data, suffix=''): name = data.get('name') if name in ['cmi.core.lesson_status', 'cmi.completion_status']: return {'value': self.lesson_status} elif name == 'cmi.success_status': return {'value': self.success_status} elif name == 'cmi.core.lesson_location': return {'value': self.lesson_location} elif name == 'cmi.suspend_data': return {'value': self.suspend_data} else: return {'value': self.data_scorm.get(name, '')} @XBlock.json_handler def scorm_set_value(self, data, suffix=''): context = {'result': 'success'} name = data.get('name') if name in ['cmi.core.lesson_status', 'cmi.completion_status']: self.lesson_status = data.get('value') if self.has_score and data.get('value') in ['completed', 'failed', 'passed']: self.publish_grade() context.update({"lesson_score": self.lesson_score}) elif name == 'cmi.success_status': self.success_status = data.get('value') if self.has_score: if self.success_status == 'unknown': self.lesson_score = 0 self.publish_grade() context.update({"lesson_score": self.lesson_score}) elif name in ['cmi.core.score.raw', 'cmi.score.raw'] and self.has_score: self.lesson_score = int(data.get('value', 0))/100.0 context.update({"lesson_score": self.lesson_score}) elif name == 'cmi.core.lesson_location': self.lesson_location = data.get('value', '') elif name == 'cmi.suspend_data': self.suspend_data = data.get('value', '') else: self.data_scorm[name] = data.get('value', '') context.update({"completion_status": self.get_completion_status()}) return context def publish_grade(self): if self.lesson_status == 'failed' or (self.version_scorm == 'SCORM_2004' and self.success_status in ['failed', 'unknown']): self.runtime.publish( self, 'grade', { 'value': 0, 'max_value': self.weight, }) else: self.runtime.publish( self, 'grade', { 'value': self.lesson_score, 'max_value': self.weight, }) def max_score(self): """ Return the maximum score possible. """ return self.weight if self.has_score else None def get_context_studio(self): return { 'field_display_name': self.fields['display_name'], 'display_name_value': self.display_name, 'field_scorm_file': self.fields['scorm_file'], 'field_has_score': self.fields['has_score'], 'has_score_value': self.has_score } def get_context_student(self): scorm_file_path = '' if self.scorm_file: scheme = 'https' if settings.HTTPS == 'on' else 'http' scorm_file_path = '{}://{}{}'.format(scheme, settings.ENV_TOKENS.get('LMS_BASE'), self.scorm_file) return { 'scorm_file_path': scorm_file_path, 'lesson_score': self.lesson_score, 'weight': self.weight, 'has_score': self.has_score, 'completion_status': self.get_completion_status() } def render_template(self, template_path, context): template_str = self.resource_string(template_path) template = Template(template_str) return template.render(Context(context)) def set_fields_xblock(self, path_to_file): path_index_page = 'index.html' try: tree = ET.parse('{}/imsmanifest.xml'.format(path_to_file)) except IOError: pass else: namespace = '' for node in [node for _, node in ET.iterparse('{}/imsmanifest.xml'.format(path_to_file), events=['start-ns'])]: if node[0] == '': namespace = node[1] break root = tree.getroot() if namespace: resource = root.find('{{{0}}}resources/{{{0}}}resource'.format(namespace)) schemaversion = root.find('{{{0}}}metadata/{{{0}}}schemaversion'.format(namespace)) else: resource = root.find('resources/resource') schemaversion = root.find('metadata/schemaversion') if resource: path_index_page = resource.get('href') if (not schemaversion is None) and (re.match('^1.2$', schemaversion.text) is None): self.version_scorm = 'SCORM_2004' self.scorm_file = os.path.join(settings.PROFILE_IMAGE_BACKEND['options']['base_url'], '{}/{}'.format(self.location.block_id, path_index_page)) def get_completion_status(self): completion_status = self.lesson_status if self.version_scorm == 'SCORM_2004' and self.success_status != 'unknown': completion_status = self.success_status return completion_status @staticmethod def workbench_scenarios(): """A canned scenario for display in the workbench.""" return [ ("ScormXBlock", """<vertical_demo> <scormxblock/> </vertical_demo> """), ]
35.059041
131
0.592674
1,064
9,501
5.080827
0.193609
0.020718
0.017758
0.021088
0.303367
0.252312
0.181095
0.167037
0.155198
0.104514
0
0.00533
0.289127
9,501
270
132
35.188889
0.795084
0.022313
0
0.242291
0
0
0.151611
0.044347
0
0
0
0
0
1
0.066079
false
0.008811
0.057269
0.004405
0.259912
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1fb35315892b484eea92d588c1ea5a815edbedc1
4,861
py
Python
src/core/modules/stt.py
pyVoice/pyVoice
62e42a5c6307df2dd2d74bcd20ca64fd81c58851
[ "MIT" ]
1
2020-12-12T12:06:12.000Z
2020-12-12T12:06:12.000Z
src/core/modules/stt.py
pyVoice/pyVoice
62e42a5c6307df2dd2d74bcd20ca64fd81c58851
[ "MIT" ]
24
2021-02-08T19:44:44.000Z
2021-04-10T11:54:53.000Z
src/core/modules/stt.py
pyVoice/pyVoice
62e42a5c6307df2dd2d74bcd20ca64fd81c58851
[ "MIT" ]
null
null
null
""" **Speech to Text (STT) engine** Converts the user speech (audio) into text. """ import threading import traceback import speech_recognition as sr from src import settings from src.core.modules import log, tts, replying def setup() -> None: """ Initializes the STT engine Steps: 1. Creates a new `Recognizer` object 2. Configures the energy threshold """ global recognizer recognizer = sr.Recognizer() recognizer.dynamic_energy_threshold = False recognizer.energy_threshold = settings.SR_ENERGY_THRESHOLD def listen() -> sr.AudioData: """ Listens for user input (voice) and returns it Returns: sr.AudioData: The raw input data """ with sr.Microphone() as raw_microphone_input: log.debug("Listening to ambient...") audio = recognizer.listen(raw_microphone_input) return audio def recognize(audio: sr.AudioData) -> str: """ Transcribes human voice data from a `AudioData` object (from `listen`) Args: audio (sr.AudioData): The raw audio data from the user Returns: str: A sentence/phrase with the user intent """ output = None log.debug("Recognizing audio...") if settings.STT_ENGINE == "google": try: output = recognizer.recognize_google(audio, language=settings.LANGUAGE) except sr.UnknownValueError: log.debug("Speech engine could not resolve audio") except sr.RequestError: log.error("An error ocurred with the Google services, try again") except: traceback.print_exc() log.error("A unknown error ocurred...") finally: return output def recognize_keyword() -> None: """ Listens for the keyword, to activate the assistant. Steps: 1. Listens for audio from the microphone 2. Recognizes the audio using `gTTS` 3. Checks if the keyword (as in `settings.KEYWORD`) is in the audio data (if True, break loop) """ global keyword_detected global new_process audio = listen() new_process = True log.debug("Recognizing keyword...") try: rec_input = recognizer.recognize_google(audio, language=settings.LANGUAGE) if settings.KEYWORD in rec_input.lower(): log.debug("Keyword detected!") # stop listening keyword_detected = True else: log.debug("Keyword not detected in '{0}'".format(rec_input)) except sr.UnknownValueError: log.debug("Speech engine could not resolve audio") except sr.RequestError: log.error("An error ocurred with the Google services, try again") except: traceback.print_exc() log.error("A unknown error ocurred...") def listen_for_keyword() -> bool: """ Loops until the keyword is recognized from the user input (from `recognize_keyword`). Steps: 1. Enters the loop (keyword detection) 2. Creates a new thread (using `recognize_keyword` as target) 3. If the keywork is detected, break the loop and play the activation sound Returns: bool: Whether the keyword is recognizes or not. If not, continue the loop. """ global keyword_detected global new_process log.debug("Keyword loop...") keyword_detected = False new_process = True log.info("Waiting for '{0}'...".format(settings.KEYWORD)) while True: if keyword_detected: break if new_process: new_process = False threading.Thread(target=recognize_keyword).start() tts.play_mp3(settings.ACTIVATION_SOUND_PATH) return True def listen_for_binary() -> bool: """ Checks if a binary/boolean value (Yes/No) is present in the transcribed audio. Used in Yes/No questions (e.g. *"Do you want X?"*) Steps: 1. Listens for audio from the microphone 2. Recognizes the audio using `gTTS` 3. Checks if a boolean value (Yes, No, True, False) is present in the audio data Returns: bool: Wheter a boolean value is present in the audio data """ yes_reply = replying.get_reply(["stt", "yn_y"], system=True, module=True) no_reply = replying.get_reply(["stt", "yn_n"], system=True, module=True) log.info("Waiting for {0} or {1}".format(yes_reply, no_reply)) while True: audio = listen() rec_input = recognize(audio) if rec_input: if yes_reply in rec_input.lower(): log.debug("'{0}' detected".format(yes_reply)) return True elif no_reply in rec_input.lower(): log.debug("'{0}' detected".format(no_reply)) return False else: log.debug("Not detected binary answer in {0}".format(rec_input))
27.619318
102
0.632174
610
4,861
4.944262
0.254098
0.029178
0.009947
0.013926
0.331233
0.31996
0.265252
0.202255
0.202255
0.202255
0
0.005376
0.272989
4,861
175
103
27.777143
0.848048
0.307138
0
0.37037
0
0
0.151295
0
0
0
0
0
0
1
0.074074
false
0
0.061728
0
0.197531
0.024691
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1fb73cc1aa55107790f427e4e1e4f03476a6ace6
1,493
py
Python
packages/w3af/w3af/core/controllers/profiling/scan_log_analysis/data/errors.py
ZooAtmosphereGroup/HelloPackages
0ccffd33bf927b13d28c8f715ed35004c33465d9
[ "Apache-2.0" ]
null
null
null
packages/w3af/w3af/core/controllers/profiling/scan_log_analysis/data/errors.py
ZooAtmosphereGroup/HelloPackages
0ccffd33bf927b13d28c8f715ed35004c33465d9
[ "Apache-2.0" ]
null
null
null
packages/w3af/w3af/core/controllers/profiling/scan_log_analysis/data/errors.py
ZooAtmosphereGroup/HelloPackages
0ccffd33bf927b13d28c8f715ed35004c33465d9
[ "Apache-2.0" ]
null
null
null
import re from utils.output import KeyValueOutput ERRORS_RE = [re.compile('Unhandled exception "(.*?)"'), re.compile('traceback', re.IGNORECASE), re.compile('w3af-crash'), re.compile('scan was able to continue by ignoring those'), re.compile('The scan will stop')] IGNORES = [u'The fuzzable request router loop will break'] # Original log line without any issues: # # AuditorWorker worker pool internal thread state: (worker: True, task: True, result: True) # # When there is ONE missing True, we have issues, when the pool finishes all three are False POOL_INTERNAL = 'pool internal thread state' def matches_ignore(line): for ignore in IGNORES: if ignore in line: return True return False def get_errors(scan_log_filename, scan): scan.seek(0) errors = [] for line in scan: for error_re in ERRORS_RE: match = error_re.search(line) if match and not matches_ignore(line): line = line.strip() errors.append(line) scan.seek(0) for line in scan: if POOL_INTERNAL not in line: continue if line.count('True') in (0, 3): continue line = line.strip() errors.append(line) output = KeyValueOutput('errors', 'errors and exceptions', {'count': len(errors), 'errors': errors}) return output
25.305085
95
0.592096
184
1,493
4.744565
0.440217
0.051546
0.041237
0.052692
0.066438
0.066438
0
0
0
0
0
0.004888
0.314802
1,493
58
96
25.741379
0.848485
0.148694
0
0.294118
0
0
0.172332
0
0
0
0
0
0
1
0.058824
false
0
0.058824
0
0.205882
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1fb8c0338db15cdfd4d8333778bf52ca725b2f55
5,925
py
Python
__main__.py
Naruto0/fplyst
af5c30a5bbd91ace21c3c5305c8e202ba016ba09
[ "MIT" ]
null
null
null
__main__.py
Naruto0/fplyst
af5c30a5bbd91ace21c3c5305c8e202ba016ba09
[ "MIT" ]
3
2021-03-22T17:12:14.000Z
2021-12-13T19:39:39.000Z
__main__.py
Naruto0/fplyst
af5c30a5bbd91ace21c3c5305c8e202ba016ba09
[ "MIT" ]
null
null
null
#! /usr/bin/python3 # # Usage: # # path/to/script$ python3 __main__.py -c <config_file> # # Will create 'YYYY_MM_DD_STREAMNAME_PLAYLIST.txt' file # which will contain currently captured song # # HH:MM Interpret - Song Name # # To capture whole playlist you have to # make crontab scheldule or widows/mac equivalent. # # Crontab job should run every minute # which is enough to make sure the timing is # correct. # You may like to be sure that the files are # saved at the directory, config file is optional: # # */1 * * * * cd <path to script> && python3 __main__.py [-c myConfig.json] # # If you want to make your own config file # edit the variables which make the _dictionary # underneath the imports. # (e.g. _station, _url, _interpret_path, _song_name_path) # # Then run: # # you@host~/.../fplyst$ python3 -i __main__.py # # In python prompt you either call method # without any attributes, which overwrites # original config file... # # >>> make_config() # # ...or you feed it with a filename, # which you may than use to import # config for various stations. # # >>> make_config("myConfig.json") # # (json extension is optional) # # If you are familiar enough with xpath syntax, # it shouldn't be hard for you to easily # setup html xpaths to interpret and song. # # TODO: include selenium to support javascript generated <html> import sys import json import getopt import time as _t from requests import get from requests.exceptions import ConnectionError, SSLError with open("requirements.txt", "r") as _req_file: _req = _req_file.readlines() try: from lxml import html from selenium import webdriver from pyvirtualdisplay import Display except ImportError: if _req: print("You have to install modules: ") for module in _req: print("\t%s"%module) else: print("Unexpected error") sys.exit(2) _config = {} _selenium = False _station = 'EVROPA2' _url = 'https://www.evropa2.cz' _interpret_path = '//h3[@class="author"]' _song_name_path = '//h4[@class="song"]' _dictionary = { 'station':_station, 'web_page':_url, \ 'interpret_xpath':_interpret_path,\ 'song_xpath':_song_name_path} def write_last(song): song_info = song[:2] station = song[2] last_name = ".last_on_%s.json"%(station) with open(last_name, 'w') as f: json.dump(song, f) def read_last(station=None): try: last_name = ".last_on_%s.json"%(station) with open(last_name, 'r') as f: data = json.load(f) return data except IOError: return [] def make_config(filename=None): if filename: config_file = filename else: filename = 'config.json' with open(filename, 'w') as f: json.dump(_dictionary, f) def read_config(filename): try: with open(filename, 'r') as f: global _config _config = json.load(f) except EnvironmentError: print('bad config file "%s"'%filename) sys.exit(2) def get_time(): '''What time it is now?''' now = _t.localtime() date = _t.strftime("%Y_%m_%d", now) hour_minute = _t.strftime("%H:%M", now) return [date, hour_minute] def save(args): '''We are definitely saving this song.''' file_name = "%s_%s_PLAYLIST.txt"%(args[3],args[2]) string = "%s\t%s - %s\n"%(args[4],args[0],args[1]) with open(file_name, "a") as myfile: myfile.write(string) def record(*args,**kwargs): '''Do we really need to save current song?''' playing = fetch(*args,**kwargs) print(playing) current = read_last(playing[2]) if playing: if current != playing: save(playing+get_time()) write_last(playing) else: # print("[log-%s]not saving %s - %s"%(get_time()[1],current[0],current[1])) pass def fetch(web_page, interpret_xpath, song_xpath, station): '''What are they playing?''' global _selenium if _selenium: display = Display(visible=0, size=(800, 600)) display.start() browser = webdriver.Firefox() browser.get(web_page) try: interpret = browser.find_element_by_xpath(interpret_xpath).text except: interpret = '' try: song = browser.find_element_by_xpath(song_xpath).text except: song = '' browser.quit() display.stop() if interpret and song: return [interpret, song, station] else: return ['','',station] else: try: page = get(web_page) except SSLError: page = get(web_page, verify=False) except ConnectionError: print ("No internet connection aviable") sys.exit(2) tree = html.fromstring(page.content) interpret_list = tree.xpath(interpret_xpath) song_list = tree.xpath(song_xpath) if interpret_list and song_list: return [interpret_list[0], song_list[0], station] else: return [] def job(name): print(name) record() def main(argv): read_config('config.json') global _selenium help_string = '''__main__.py -c <config_file.json> \t -or we load default config.json -h \t\t - help -s \t\t - use selenium instead of requests (for javascript generated html)''' if argv: try: opts, args = getopt.getopt(argv,"hsc:",["conf="]) except getopt.GetoptError: print(help_string) sys.exit(2) for opt, arg in opts: if opt == '-h': print(help_string) sys.exit(2) elif opt == '-s': _selenium = True elif opt in('-c','--conf'): read_config(arg) record(**_config) if __name__ == '__main__': main(sys.argv[1:])
25.320513
89
0.606076
771
5,925
4.485084
0.324254
0.020243
0.011567
0.010989
0.081839
0.052632
0.039329
0.024292
0.024292
0.024292
0
0.008121
0.272574
5,925
233
90
25.429185
0.7942
0.244388
0
0.2
0
0
0.116886
0.004766
0
0
0
0.004292
0
1
0.071429
false
0.007143
0.071429
0
0.192857
0.064286
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1fbb637cd9392b8a2ffe427325fa61c758a9f423
14,341
py
Python
1_ps4/ps4b.py
gyalpodongo/6.0001_psets
b2e12d572d3382921a073e6712a337f98ade7c4a
[ "MIT" ]
null
null
null
1_ps4/ps4b.py
gyalpodongo/6.0001_psets
b2e12d572d3382921a073e6712a337f98ade7c4a
[ "MIT" ]
null
null
null
1_ps4/ps4b.py
gyalpodongo/6.0001_psets
b2e12d572d3382921a073e6712a337f98ade7c4a
[ "MIT" ]
null
null
null
# Problem Set 4B # Name: Gyalpo Dongo # Collaborators: # Time Spent: 9:00 # Late Days Used: 1 import string ### HELPER CODE ### def load_words(file_name): ''' file_name (string): the name of the file containing the list of words to load Returns: a list of valid words. Words are strings of lowercase letters. Depending on the size of the word list, this function may take a while to finish. ''' print("Loading word list from file...") # inFile: file inFile = open(file_name, 'r') # wordlist: list of strings wordlist = [] for line in inFile: wordlist.extend([word.lower() for word in line.split(' ')]) print(" ", len(wordlist), "words loaded.") return wordlist def is_word(word_list, word): ''' Determines if word is a valid word, ignoring capitalization and punctuation word_list (list): list of words in the dictionary. word (string): a possible word. Returns: True if word is in word_list, False otherwise Example: >>> is_word(word_list, 'bat') returns True >>> is_word(word_list, 'asdf') returns False ''' word = word.lower() word = word.strip(" !@#$%^&*()-_+={}[]|\:;'<>?,./\"") return word in word_list def get_story_string(): """ Returns: a story in encrypted text. """ f = open("story.txt", "r") story = str(f.read()) f.close() return story def get_digit_shift(input_shift, decrypt): ''' calculate the digit shift based on if decrypting or not decrypt: boolean, if decrypting or not Returns: digit_shift, the digit shift based on if decrypting or not ''' if decrypt: digit_shift = 10 - (26-input_shift)%10 else: digit_shift = input_shift return digit_shift ### END HELPER CODE ### WORDLIST_FILENAME = 'words.txt' class Message(object): def __init__(self, input_text): ''' Initializes a Message object input_text (string): the message's text a Message object has two attributes: self.message_text (string, determined by input text) self.valid_words (list, determined using helper function load_words) ''' self.message_text = input_text self.valid_words = load_words(WORDLIST_FILENAME) def get_message_text(self): ''' Used to safely access self.message_text outside of the class Returns: self.message_text ''' return self.message_text def get_valid_words(self): ''' Used to safely access a copy of self.valid_words outside of the class. This helps you avoid accidentally mutating class attributes. Returns: a COPY of self.valid_words ''' return self.valid_words.copy() def make_shift_dict(self, input_shift, decrypt=False):#THINK NEG NUMBERS ''' Creates a dictionary that can be used to apply a cipher to a letter and number. The dictionary maps every uppercase and lowercase letter to a character shifted down the alphabet by the input shift, as well as every number to one shifted down by the same amount. If 'a' is shifted down by 2, the result is 'c' and '0' shifted down by 2 is '2'. The dictionary should contain 62 keys of all the uppercase letters, all the lowercase letters, and all numbers mapped to their shifted values. input_shift: the amount by which to shift every letter of the alphabet and every number (0 <= shift < 26) decrypt: if the shift dict will be used for decrypting. affects digit shift function Returns: a dictionary mapping letter/number (string) to another letter/number (string). ''' dig_shift = get_digit_shift(input_shift,decrypt) #gets the new value for the shift in the digits dict_shift = {} for i in range(len(string.ascii_lowercase)): if input_shift > 25: new_input_shift = input_shift - 26 else: new_input_shift = input_shift if (i+new_input_shift) > 25: t = (i+new_input_shift) - 26 dict_shift[string.ascii_lowercase[i]] = string.ascii_lowercase[t] else: dict_shift[string.ascii_lowercase[i]] = string.ascii_lowercase[i+new_input_shift] for i in range(len(string.ascii_uppercase)): if input_shift > 25: new_input_shift = input_shift - 26 else: new_input_shift = input_shift if (i+new_input_shift) > 25: t = (i+new_input_shift) - 26 dict_shift[string.ascii_uppercase[i]] = string.ascii_uppercase[t] else: dict_shift[string.ascii_uppercase[i]] = string.ascii_uppercase[i+new_input_shift] for i in range(len(string.digits)): if dig_shift > 19: new_dig_shift = dig_shift - 20 elif dig_shift > 9: new_dig_shift = dig_shift - 10 else: new_dig_shift = dig_shift if (i+new_dig_shift) > 9: t = (i+new_dig_shift) - 10 dict_shift[string.digits[i]] = string.digits[t] else: dict_shift[string.digits[i]] = string.digits[i+new_dig_shift] return dict_shift def apply_shift(self, shift_dict): ''' Applies the Caesar Cipher to self.message_text with the shift specified in shift_dict. Creates a new string that is self.message_text, shifted down by some number of characters, determined by the shift value that shift_dict was built with. shift_dict: a dictionary with 62 keys, mapping lowercase and uppercase letters and numbers to their new letters (as built by make_shift_dict) Returns: the message text (string) with every letter/number shifted using the input shift_dict ''' new_str = "" for i in self.get_message_text(): if str(i) in shift_dict: #if str(i) is any of the keys in the dictionnary, then #it shifted value will be added to new_str new_str += shift_dict[str(i)] else: new_str += str(i) #this is for when it is either punctuations or other symbols #or spacesso that they are not modified as problem specified return new_str class PlaintextMessage(Message): def __init__(self, input_text, input_shift): ''' Initializes a PlaintextMessage object. input_text (string): the message's text input_shift: the shift associated with this message A PlaintextMessage object inherits from Message. It has five attributes: self.message_text (string, determined by input text) self.valid_words (list, determined using helper function load_words) self.shift (integer, determined by input shift) self.encryption_dict (dictionary, built using the shift) self.encrypted_message_text (string, encrypted using self.encryption_dict) ''' Message.__init__(self,input_text) self.shift = input_shift self.encryption_dict = self.make_shift_dict(self.shift) self.encrypted_message_text = self.apply_shift(self.encryption_dict) def get_shift(self): ''' Used to safely access self.shift outside of the class Returns: self.shift ''' return self.shift def get_encryption_dict(self): ''' Used to safely access a copy of self.encryption_dict outside of the class Returns: a COPY of self.encryption_dict ''' return self.encryption_dict.copy() def get_encrypted_message_text(self): ''' Used to safely access self.encrypted_message_text outside of the class Returns: self.encrypted_message_text ''' return self.encrypted_message_text def modify_shift(self, input_shift): ''' Changes self.shift of the PlaintextMessage, and updates any other attributes that are determined by the shift. input_shift: an integer, the new shift that should be associated with this message. [0 <= shift < 26] Returns: nothing ''' self.__init__(self.message_text,input_shift) self.shift = input_shift class EncryptedMessage(Message): def __init__(self, input_text): ''' Initializes an EncryptedMessage object input_text (string): the message's text an EncryptedMessage object inherits from Message. It has two attributes: self.message_text (string, determined by input text) self.valid_words (list, determined using helper function load_words) ''' Message.__init__(self,input_text) def decrypt_message(self): ''' Decrypts self.message_text by trying every possible shift value and finding the "best" one. We will define "best" as the shift that creates the max number of valid English words when we use apply_shift(shift) on the message text. If a is the original shift value used to encrypt the message, then we would expect (26 - a) to be the value found for decrypting it. Note: if shifts are equally good, such that they all create the max number of valid words, you may choose any of those shifts (and their corresponding decrypted messages) to return. Returns: a tuple of the best shift value used to originally encrypt the message (a) and the decrypted message text using that shift value ''' input_scores = {} #this will be a dictionnary with the different shifts as the keys #and the values of these keys will be a tupple of the respective #amount (score) of valid words found after applying this shift and the text #with this applied shift list_scores = [] list_tuples = [] #use of list for the tuples of best_shift and text as there can be #many of these for i in range(26): #use of range 26 as that is the max t = 0 #use of t as a counter for the amount of valid words in the #decrypted text shift_dict = self.make_shift_dict(26 - i, True).copy() shift_text = self.apply_shift(shift_dict) valid_words_list = self.valid_words.copy() for b in valid_words_list: if b in shift_text.lower(): t += 1 input_scores[i] = (t,shift_text) list_scores.append(t) for i in input_scores: if input_scores[i][0] == max(list_scores): list_tuples.append((i,input_scores[i][1])) import random if len(list_tuples) > 0: return random.choice(list_tuples) else: return list_tuples[0] #return the 0 index because it is the only value, and if # all of them have the same score, as problem stated, any can be #can be returneed so use of random module to choose def test_plaintext_message(): ''' Write two test cases for the PlaintextMessage class here. Each one should handle different cases (see handout for more details.) Write a comment above each test explaining what case(s) it is testing. ''' #Testing for numbers plaintext1 = PlaintextMessage("231.45", 2) print('Expected Output: 453.67') print('Actual Output:', plaintext1.get_encrypted_message_text()) #Testing for Capitals and numbers plaintext1 = PlaintextMessage("HeLLo 23.21", 3) print('Expected Output: KhOOr 56.54') print('Actual Output:', plaintext1.get_encrypted_message_text()) # #### Example test case (PlaintextMessage) ##### # #This test is checking encoding a lowercase string with punctuation in it. # plaintext = PlaintextMessage('hello!', 2) # print('Expected Output: jgnnq!') # print('Actual Output:', plaintext.get_encrypted_message_text()) def test_encrypted_message(): ''' Write two test cases for the EncryptedMessage class here. Each one should handle different cases (see handout for more details.) Write a comment above each test explaining what case(s) it is testing. ''' # #### Example test case (EncryptedMessage) ##### # # This test is checking decoding a lowercase string with punctuation in it. # encrypted = EncryptedMessage('jgnnq!') # print('Expected Output:', (2, 'hello!')) # print('Actual Output:', encrypted.decrypt_message()) #Testing for Capital Letters and lowercase encrypted1 = EncryptedMessage('EQORwVGT') print('Expected Output:', (2, 'COMPuTER')) print('Actual Output:', encrypted1.decrypt_message()) #Testing for Capitals,letters,punctuation and numbers encrypted2 = EncryptedMessage('Jgnnq42!') print('Expected Output:', (2, 'Hello21!')) print('Actual Output:', encrypted2.decrypt_message()) def decode_story(): ''' Write your code here to decode the story contained in the file story.txt. Hint: use the helper function get_story_string and your EncryptedMessage class. Returns: a tuple containing (best_shift, decoded_story) ''' encrypted = EncryptedMessage(get_story_string()) return encrypted.decrypt_message() if __name__ == '__main__': # Uncomment these lines to try running your test cases test_plaintext_message() test_encrypted_message() # Uncomment these lines to try running decode_story_string() best_shift, story = decode_story() print("Best shift:", best_shift) print("Decoded story: ", story)
36.214646
98
0.619064
1,834
14,341
4.690294
0.173391
0.037201
0.019182
0.009765
0.296908
0.255754
0.208207
0.185189
0.13927
0.104394
0
0.010245
0.305767
14,341
395
99
36.306329
0.853757
0.484206
0
0.192857
0
0
0.053052
0.004989
0
0
0
0.002532
0
1
0.135714
false
0
0.014286
0
0.271429
0.085714
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1fbe01d48c418a25dac0b1a8cdfdd4ff5a631b60
13,996
py
Python
tests/integration/cartography/intel/gcp/test_compute.py
sckevmit/cartography
fefb63b5ec97986dcc29038331d0e5b027b95d5f
[ "Apache-2.0" ]
2,322
2019-03-02T01:07:20.000Z
2022-03-31T20:39:12.000Z
tests/integration/cartography/intel/gcp/test_compute.py
sckevmit/cartography
fefb63b5ec97986dcc29038331d0e5b027b95d5f
[ "Apache-2.0" ]
462
2019-03-07T18:38:11.000Z
2022-03-31T14:55:20.000Z
tests/integration/cartography/intel/gcp/test_compute.py
sckevmit/cartography
fefb63b5ec97986dcc29038331d0e5b027b95d5f
[ "Apache-2.0" ]
246
2019-03-03T02:39:23.000Z
2022-02-24T09:46:38.000Z
import cartography.intel.gcp.compute import tests.data.gcp.compute TEST_UPDATE_TAG = 123456789 def _ensure_local_neo4j_has_test_instance_data(neo4j_session): cartography.intel.gcp.compute.load_gcp_instances( neo4j_session, tests.data.gcp.compute.TRANSFORMED_GCP_INSTANCES, TEST_UPDATE_TAG, ) def _ensure_local_neo4j_has_test_vpc_data(neo4j_session): cartography.intel.gcp.compute.load_gcp_vpcs( neo4j_session, tests.data.gcp.compute.TRANSFORMED_GCP_VPCS, TEST_UPDATE_TAG, ) def _ensure_local_neo4j_has_test_subnet_data(neo4j_session): cartography.intel.gcp.compute.load_gcp_subnets( neo4j_session, tests.data.gcp.compute.TRANSFORMED_GCP_SUBNETS, TEST_UPDATE_TAG, ) def _ensure_local_neo4j_has_test_firewall_data(neo4j_session): cartography.intel.gcp.compute.load_gcp_ingress_firewalls( neo4j_session, tests.data.gcp.compute.TRANSFORMED_FW_LIST, TEST_UPDATE_TAG, ) def test_transform_and_load_vpcs(neo4j_session): """ Test that we can correctly transform and load VPC nodes to Neo4j. """ vpc_res = tests.data.gcp.compute.VPC_RESPONSE vpc_list = cartography.intel.gcp.compute.transform_gcp_vpcs(vpc_res) cartography.intel.gcp.compute.load_gcp_vpcs(neo4j_session, vpc_list, TEST_UPDATE_TAG) query = """ MATCH(vpc:GCPVpc{id:{VpcId}}) RETURN vpc.id, vpc.partial_uri, vpc.auto_create_subnetworks """ expected_vpc_id = 'projects/project-abc/global/networks/default' nodes = neo4j_session.run( query, VpcId=expected_vpc_id, ) actual_nodes = {(n['vpc.id'], n['vpc.partial_uri'], n['vpc.auto_create_subnetworks']) for n in nodes} expected_nodes = { (expected_vpc_id, expected_vpc_id, True), } assert actual_nodes == expected_nodes def test_transform_and_load_subnets(neo4j_session): """ Ensure we can transform and load subnets. """ subnet_res = tests.data.gcp.compute.VPC_SUBNET_RESPONSE subnet_list = cartography.intel.gcp.compute.transform_gcp_subnets(subnet_res) cartography.intel.gcp.compute.load_gcp_subnets(neo4j_session, subnet_list, TEST_UPDATE_TAG) query = """ MATCH(subnet:GCPSubnet) RETURN subnet.id, subnet.region, subnet.gateway_address, subnet.ip_cidr_range, subnet.private_ip_google_access, subnet.vpc_partial_uri """ nodes = neo4j_session.run(query) actual_nodes = { ( n['subnet.id'], n['subnet.region'], n['subnet.gateway_address'], n['subnet.ip_cidr_range'], n['subnet.private_ip_google_access'], n['subnet.vpc_partial_uri'], ) for n in nodes } expected_nodes = { ( 'projects/project-abc/regions/europe-west2/subnetworks/default', 'europe-west2', '10.0.0.1', '10.0.0.0/20', False, 'projects/project-abc/global/networks/default', ), } assert actual_nodes == expected_nodes def test_transform_and_load_gcp_forwarding_rules(neo4j_session): """ Ensure that we can correctly transform and load GCP Forwarding Rules """ fwd_res = tests.data.gcp.compute.LIST_FORWARDING_RULES_RESPONSE fwd_list = cartography.intel.gcp.compute.transform_gcp_forwarding_rules(fwd_res) cartography.intel.gcp.compute.load_gcp_forwarding_rules(neo4j_session, fwd_list, TEST_UPDATE_TAG) fwd_query = """ MATCH(f:GCPForwardingRule) RETURN f.id, f.partial_uri, f.ip_address, f.ip_protocol, f.load_balancing_scheme, f.name, f.network, f.port_range, f.ports, f.project_id, f.region, f.self_link, f.subnetwork, f.target """ objects = neo4j_session.run(fwd_query) actual_nodes = { ( o['f.id'], o['f.ip_address'], o['f.ip_protocol'], o['f.load_balancing_scheme'], o['f.name'], o.get('f.port_range', None), ','.join(o.get('f.ports', None)) if o.get('f.ports', None) else None, o['f.project_id'], o['f.region'], o['f.target'], ) for o in objects } expected_nodes = { ( 'projects/project-abc/regions/europe-west2/forwardingRules/internal-service-1111', '10.0.0.10', 'TCP', 'INTERNAL', 'internal-service-1111', None, '80', 'project-abc', 'europe-west2', 'projects/project-abc/regions/europe-west2/targetPools/node-pool-12345', ), ( 'projects/project-abc/regions/europe-west2/forwardingRules/public-ingress-controller-1234567', '1.2.3.11', 'TCP', 'EXTERNAL', 'public-ingress-controller-1234567', '80-443', None, 'project-abc', 'europe-west2', 'projects/project-abc/regions/europe-west2/targetVpnGateways/vpn-12345', ), ( 'projects/project-abc/regions/europe-west2/forwardingRules/shard-server-22222', '10.0.0.20', 'TCP', 'INTERNAL', 'shard-server-22222', None, '10203', 'project-abc', 'europe-west2', 'projects/project-abc/regions/europe-west2/targetPools/node-pool-234567', ), } assert actual_nodes == expected_nodes def test_transform_and_load_gcp_instances_and_nics(neo4j_session): """ Ensure that we can correctly transform and load GCP instances. """ instance_responses = [tests.data.gcp.compute.GCP_LIST_INSTANCES_RESPONSE] instance_list = cartography.intel.gcp.compute.transform_gcp_instances(instance_responses) cartography.intel.gcp.compute.load_gcp_instances(neo4j_session, instance_list, TEST_UPDATE_TAG) instance_id1 = 'projects/project-abc/zones/europe-west2-b/instances/instance-1-test' instance_id2 = 'projects/project-abc/zones/europe-west2-b/instances/instance-1' nic_query = """ MATCH(i:GCPInstance)-[r:NETWORK_INTERFACE]->(nic:GCPNetworkInterface) OPTIONAL MATCH (i)-[:TAGGED]->(t:GCPNetworkTag) RETURN i.id, i.zone_name, i.project_id, i.hostname, t.value, r.lastupdated, nic.nic_id, nic.private_ip """ objects = neo4j_session.run(nic_query) actual_nodes = { ( o['i.id'], o['i.zone_name'], o['i.project_id'], o['nic.nic_id'], o['nic.private_ip'], o['t.value'], o['r.lastupdated'], ) for o in objects } expected_nodes = { ( instance_id1, 'europe-west2-b', 'project-abc', 'projects/project-abc/zones/europe-west2-b/instances/instance-1-test/networkinterfaces/nic0', '10.0.0.3', None, TEST_UPDATE_TAG, ), ( instance_id2, 'europe-west2-b', 'project-abc', 'projects/project-abc/zones/europe-west2-b/instances/instance-1/networkinterfaces/nic0', '10.0.0.2', 'test', TEST_UPDATE_TAG, ), } assert actual_nodes == expected_nodes def test_transform_and_load_firewalls(neo4j_session): """ Ensure we can correctly transform and load GCP firewalls :param neo4j_session: :return: """ fw_list = cartography.intel.gcp.compute.transform_gcp_firewall(tests.data.gcp.compute.LIST_FIREWALLS_RESPONSE) cartography.intel.gcp.compute.load_gcp_ingress_firewalls(neo4j_session, fw_list, TEST_UPDATE_TAG) query = """ MATCH (vpc:GCPVpc)-[r:RESOURCE]->(fw:GCPFirewall) return vpc.id, fw.id, fw.has_target_service_accounts """ nodes = neo4j_session.run(query) actual_nodes = { ( ( n['vpc.id'], n['fw.id'], n['fw.has_target_service_accounts'], ) ) for n in nodes } expected_nodes = { ( 'projects/project-abc/global/networks/default', 'projects/project-abc/global/firewalls/default-allow-icmp', False, ), ( 'projects/project-abc/global/networks/default', 'projects/project-abc/global/firewalls/default-allow-internal', False, ), ( 'projects/project-abc/global/networks/default', 'projects/project-abc/global/firewalls/default-allow-rdp', False, ), ( 'projects/project-abc/global/networks/default', 'projects/project-abc/global/firewalls/default-allow-ssh', False, ), ( 'projects/project-abc/global/networks/default', 'projects/project-abc/global/firewalls/custom-port-incoming', False, ), } assert actual_nodes == expected_nodes def test_vpc_to_subnets(neo4j_session): """ Ensure that subnets are connected to VPCs. """ _ensure_local_neo4j_has_test_vpc_data(neo4j_session) _ensure_local_neo4j_has_test_subnet_data(neo4j_session) query = """ MATCH(vpc:GCPVpc{id:{VpcId}})-[:RESOURCE]->(subnet:GCPSubnet) RETURN vpc.id, subnet.id, subnet.region, subnet.gateway_address, subnet.ip_cidr_range, subnet.private_ip_google_access """ expected_vpc_id = 'projects/project-abc/global/networks/default' nodes = neo4j_session.run( query, VpcId=expected_vpc_id, ) actual_nodes = { ( n['vpc.id'], n['subnet.id'], n['subnet.region'], n['subnet.gateway_address'], n['subnet.ip_cidr_range'], n['subnet.private_ip_google_access'], ) for n in nodes } expected_nodes = { ( 'projects/project-abc/global/networks/default', 'projects/project-abc/regions/europe-west2/subnetworks/default', 'europe-west2', '10.0.0.1', '10.0.0.0/20', False, ), } assert actual_nodes == expected_nodes def test_nics_to_access_configs(neo4j_session): """ Ensure that network interfaces and access configs are attached """ _ensure_local_neo4j_has_test_instance_data(neo4j_session) ac_query = """ MATCH (nic:GCPNetworkInterface)-[r:RESOURCE]->(ac:GCPNicAccessConfig) return nic.nic_id, ac.access_config_id, ac.public_ip """ nodes = neo4j_session.run(ac_query) nic_id1 = 'projects/project-abc/zones/europe-west2-b/instances/instance-1-test/networkinterfaces/nic0' ac_id1 = f"{nic_id1}/accessconfigs/ONE_TO_ONE_NAT" nic_id2 = 'projects/project-abc/zones/europe-west2-b/instances/instance-1/networkinterfaces/nic0' ac_id2 = f"{nic_id2}/accessconfigs/ONE_TO_ONE_NAT" actual_nodes = {(n['nic.nic_id'], n['ac.access_config_id'], n['ac.public_ip']) for n in nodes} expected_nodes = { (nic_id1, ac_id1, '1.3.4.5'), (nic_id2, ac_id2, '1.2.3.4'), } assert actual_nodes == expected_nodes def test_nic_to_subnets(neo4j_session): """ Ensure that network interfaces are attached to subnets """ _ensure_local_neo4j_has_test_subnet_data(neo4j_session) _ensure_local_neo4j_has_test_instance_data(neo4j_session) subnet_query = """ MATCH (nic:GCPNetworkInterface{id:{NicId}})-[:PART_OF_SUBNET]->(subnet:GCPSubnet) return nic.nic_id, nic.private_ip, subnet.id, subnet.gateway_address, subnet.ip_cidr_range """ nodes = neo4j_session.run( subnet_query, NicId='projects/project-abc/zones/europe-west2-b/instances/instance-1-test/networkinterfaces/nic0', ) actual_nodes = { ( n['nic.nic_id'], n['nic.private_ip'], n['subnet.id'], n['subnet.gateway_address'], n['subnet.ip_cidr_range'], ) for n in nodes } expected_nodes = {( 'projects/project-abc/zones/europe-west2-b/instances/instance-1-test/networkinterfaces/nic0', '10.0.0.3', 'projects/project-abc/regions/europe-west2/subnetworks/default', '10.0.0.1', '10.0.0.0/20', )} assert actual_nodes == expected_nodes def test_instance_to_vpc(neo4j_session): _ensure_local_neo4j_has_test_vpc_data(neo4j_session) _ensure_local_neo4j_has_test_subnet_data(neo4j_session) _ensure_local_neo4j_has_test_instance_data(neo4j_session) instance_id1 = 'projects/project-abc/zones/europe-west2-b/instances/instance-1-test' query = """ MATCH (i:GCPInstance{id:{InstanceId}})-[r:MEMBER_OF_GCP_VPC]->(v:GCPVpc) RETURN i.id, v.id """ nodes = neo4j_session.run( query, InstanceId=instance_id1, ) actual_nodes = { ( n['i.id'], n['v.id'], ) for n in nodes } expected_nodes = {( instance_id1, 'projects/project-abc/global/networks/default', )} assert actual_nodes == expected_nodes def test_vpc_to_firewall_to_iprule_to_iprange(neo4j_session): _ensure_local_neo4j_has_test_vpc_data(neo4j_session) _ensure_local_neo4j_has_test_firewall_data(neo4j_session) query = """ MATCH (rng:IpRange{id:'0.0.0.0/0'})-[m:MEMBER_OF_IP_RULE]->(rule:IpRule{fromport:22}) -[a:ALLOWED_BY]->(fw:GCPFirewall)<-[r:RESOURCE]-(vpc:GCPVpc) RETURN rng.id, rule.id, fw.id, fw.priority, vpc.id """ nodes = neo4j_session.run(query) actual_nodes = { ( n['rng.id'], n['rule.id'], n['fw.id'], n['vpc.id'], ) for n in nodes } expected_nodes = {( '0.0.0.0/0', 'projects/project-abc/global/firewalls/default-allow-ssh/allow/22tcp', 'projects/project-abc/global/firewalls/default-allow-ssh', 'projects/project-abc/global/networks/default', )} assert actual_nodes == expected_nodes
32.85446
118
0.627894
1,720
13,996
4.84593
0.112791
0.063347
0.077744
0.05183
0.716857
0.673905
0.620156
0.55189
0.475465
0.384763
0
0.026151
0.248642
13,996
425
119
32.931765
0.766451
0.034867
0
0.490141
0
0.042254
0.378952
0.2628
0
0
0
0
0.028169
1
0.039437
false
0
0.005634
0
0.053521
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1fc21aa494251b943ab4e4b535ca093a791a6af8
6,208
py
Python
gae/backend/services/slack/slack.py
jlapenna/bikebuds
6e2b54fa2e4fa03e5ff250ca779c269ccc49a2d8
[ "Apache-2.0" ]
9
2018-11-17T00:53:47.000Z
2021-03-16T05:18:01.000Z
gae/backend/services/slack/slack.py
jlapenna/bikebuds
6e2b54fa2e4fa03e5ff250ca779c269ccc49a2d8
[ "Apache-2.0" ]
8
2018-11-28T17:19:07.000Z
2022-02-26T17:46:09.000Z
gae/backend/services/slack/slack.py
jlapenna/bikebuds
6e2b54fa2e4fa03e5ff250ca779c269ccc49a2d8
[ "Apache-2.0" ]
null
null
null
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import re import urllib import urllib.request import flask from google.cloud.datastore.entity import Entity from slack_sdk import WebClient from slack_sdk.errors import SlackApiError from shared import responses from shared import task_util from shared.datastore.bot import Bot from shared.datastore.service import Service from shared.services.slack.installation_store import DatastoreInstallationStore from shared.services.strava.client import ClientWrapper from services.slack.track_blocks import create_track_blocks from services.slack.unfurl_activity import unfurl_activity from services.slack.unfurl_route import unfurl_route from shared import ds_util from shared.config import config _STRAVA_APP_LINK_REGEX = re.compile('(https://www.strava.com/([^/]+)/[0-9]+)') _TRACKS_TEAM_ID = 'T01U8EC3H8T' _TRACKS_CHANNEL_ID = 'C020755FX3L' _DEV_TRACKS_TEAM_ID = 'T01U4PCGSQM' _DEV_TRACKS_CHANNEL_ID = 'C01U82F2STD' module = flask.Blueprint('slack', __name__) @module.route('/tasks/event', methods=['POST']) def tasks_event(): params = task_util.get_payload(flask.request) event = params['event'] logging.info('SlackEvent: %s', event.key) if event['event']['type'] == 'link_shared': return _process_link_shared(event) return responses.OK_SUB_EVENT_UNKNOWN @module.route('/tasks/livetrack', methods=['POST']) def tasks_livetrack(): params = task_util.get_payload(flask.request) track = params['track'] logging.info('process/livetrack: %s', track) return _process_track(track) def _process_link_shared(event): slack_client = _create_slack_client(event) unfurls = _create_unfurls(event) if not unfurls: return responses.OK_NO_UNFURLS try: response = slack_client.chat_unfurl( channel=event['event']['channel'], ts=event['event']['message_ts'], unfurls=unfurls, ) except SlackApiError: logging.exception('process_link_shared: failed: unfurling: %s', unfurls) return responses.INTERNAL_SERVER_ERROR if not response['ok']: logging.error('process_link_shared: failed: %s with %s', response, unfurls) return responses.INTERNAL_SERVER_ERROR logging.debug('process_link_shared: %s', response) return responses.OK def _create_slack_client(event): slack_service = Service.get('slack', parent=Bot.key()) installation_store = DatastoreInstallationStore( ds_util.client, parent=slack_service.key ) slack_bot = installation_store.find_bot( enterprise_id=event.get('authorizations', [{}])[0].get('enterprise_id'), team_id=event.get('authorizations', [{}])[0].get('team_id'), is_enterprise_install=event.get('authorizations', [{}])[0].get( 'is_enterprise_install' ), ) return WebClient(slack_bot.bot_token) def _create_slack_client_for_team(team_id): slack_service = Service.get('slack', parent=Bot.key()) installation_store = DatastoreInstallationStore( ds_util.client, parent=slack_service.key ) slack_bot = installation_store.find_bot( enterprise_id=None, team_id=team_id, is_enterprise_install=False, ) return WebClient(slack_bot.bot_token) def _create_unfurls(event): strava = Service.get('strava', parent=Bot.key()) strava_client = ClientWrapper(strava) unfurls = {} for link in event['event']['links']: alt_url = _resolve_rewrite_link(link) unfurl = _unfurl(strava_client, link, alt_url) if unfurl: unfurls[link['url']] = unfurl logging.warning(f'_create_unfurls: {unfurls}') return unfurls def _resolve_rewrite_link(link): if 'strava.app.link' not in link['url']: return try: logging.info('_resolve_rewrite_link: fetching: %s', link['url']) with urllib.request.urlopen(link['url']) as response: contents = response.read() logging.debug('_resolve_rewrite_link: fetched: %s', link['url']) except urllib.request.HTTPError: logging.exception('Could not fetch %s', link['url']) return match = _STRAVA_APP_LINK_REGEX.search(str(contents)) if match is None: logging.warning('Could not resolve %s', link['url']) return resolved_url = match.group() return resolved_url def _unfurl(strava_client, link, alt_url=None): url = alt_url if alt_url else link['url'] if '/routes/' in url: return unfurl_route(strava_client, url) elif '/activities/' in url: return unfurl_activity(strava_client, url) else: return None def _process_track(track: Entity) -> responses.Response: if config.is_dev: team_id = _DEV_TRACKS_TEAM_ID channel_id = _DEV_TRACKS_CHANNEL_ID else: team_id = _TRACKS_TEAM_ID channel_id = _TRACKS_CHANNEL_ID slack_client = _create_slack_client_for_team(team_id) blocks = create_track_blocks(track) if not blocks: return responses.OK_INVALID_LIVETRACK try: response = slack_client.chat_postMessage( channel=channel_id, blocks=blocks, unfurl_links=False, unfurl_media=False ) except SlackApiError: logging.exception(f'process_track: failed: track: {track}, blocks: {blocks}') return responses.INTERNAL_SERVER_ERROR if not response['ok']: logging.error( f'process_track: failed: response: {response}, track: {track}, blocks: {blocks}' ) return responses.INTERNAL_SERVER_ERROR logging.debug('process_track: %s', response) return responses.OK
32.846561
92
0.704897
789
6,208
5.301648
0.235741
0.017213
0.02032
0.027731
0.282333
0.219938
0.190294
0.160172
0.123835
0.105188
0
0.006578
0.191849
6,208
188
93
33.021277
0.827188
0.088273
0
0.20979
0
0
0.137088
0.011513
0
0
0
0
0
1
0.062937
false
0
0.132867
0
0.342657
0.006993
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1fc244ac9c29079630ffd294e5609b1a6c46e1ff
3,895
py
Python
ooobuild/lo/drawing/framework/tab_bar_button.py
Amourspirit/ooo_uno_tmpl
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
[ "Apache-2.0" ]
null
null
null
ooobuild/lo/drawing/framework/tab_bar_button.py
Amourspirit/ooo_uno_tmpl
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
[ "Apache-2.0" ]
null
null
null
ooobuild/lo/drawing/framework/tab_bar_button.py
Amourspirit/ooo_uno_tmpl
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
[ "Apache-2.0" ]
null
null
null
# coding: utf-8 # # Copyright 2022 :Barry-Thomas-Paul: Moss # # Licensed under the Apache License, Version 2.0 (the "License") # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http: // www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Struct Class # this is a auto generated file generated by Cheetah # Namespace: com.sun.star.drawing.framework # Libre Office Version: 7.3 from ooo.oenv.env_const import UNO_NONE import typing from .x_resource_id import XResourceId as XResourceId_5be3103d class TabBarButton(object): """ Struct Class Descriptor of a tab bar button. Tab bar buttons are typically used to offer the user the choice between different views to be displayed in one pane. For identification only the ResourceId is used, so for some methods of the XTabBar interface only the ResourceId member is evaluated. See Also: `API TabBarButton <https://api.libreoffice.org/docs/idl/ref/structcom_1_1sun_1_1star_1_1drawing_1_1framework_1_1TabBarButton.html>`_ """ __ooo_ns__: str = 'com.sun.star.drawing.framework' __ooo_full_ns__: str = 'com.sun.star.drawing.framework.TabBarButton' __ooo_type_name__: str = 'struct' typeName: str = 'com.sun.star.drawing.framework.TabBarButton' """Literal Constant ``com.sun.star.drawing.framework.TabBarButton``""" def __init__(self, ButtonLabel: typing.Optional[str] = '', HelpText: typing.Optional[str] = '', ResourceId: typing.Optional[XResourceId_5be3103d] = None) -> None: """ Constructor Arguments: ButtonLabel (str, optional): ButtonLabel value. HelpText (str, optional): HelpText value. ResourceId (XResourceId, optional): ResourceId value. """ super().__init__() if isinstance(ButtonLabel, TabBarButton): oth: TabBarButton = ButtonLabel self.ButtonLabel = oth.ButtonLabel self.HelpText = oth.HelpText self.ResourceId = oth.ResourceId return kargs = { "ButtonLabel": ButtonLabel, "HelpText": HelpText, "ResourceId": ResourceId, } self._init(**kargs) def _init(self, **kwargs) -> None: self._button_label = kwargs["ButtonLabel"] self._help_text = kwargs["HelpText"] self._resource_id = kwargs["ResourceId"] @property def ButtonLabel(self) -> str: """ This label is displayed on the UI as button text. The label is expected to be localized. """ return self._button_label @ButtonLabel.setter def ButtonLabel(self, value: str) -> None: self._button_label = value @property def HelpText(self) -> str: """ The localized help text that may be displayed in a tool tip. """ return self._help_text @HelpText.setter def HelpText(self, value: str) -> None: self._help_text = value @property def ResourceId(self) -> XResourceId_5be3103d: """ XResourceId object of the resource that is requested to be displayed when the tab bar button is activated. For some methods of the XTabBar interface only this member is evaluated. That is because only this member is used to identify a tab bar button. """ return self._resource_id @ResourceId.setter def ResourceId(self, value: XResourceId_5be3103d) -> None: self._resource_id = value __all__ = ['TabBarButton']
33.869565
166
0.668293
478
3,895
5.303347
0.382845
0.023669
0.019724
0.033531
0.11716
0.091124
0.076134
0.030769
0
0
0
0.013979
0.246983
3,895
114
167
34.166667
0.850324
0.44647
0
0.066667
0
0
0.102839
0.062132
0
0
0
0
0
1
0.177778
false
0
0.066667
0
0.444444
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1fc41c98a94f4ecb65c5c9b1a3aac7dc614e2662
5,087
py
Python
shared/tools/snapshot/utils.py
DougMahoney/metatools
112340102962ff0c3e323564357cc4e848939cf7
[ "Apache-2.0" ]
12
2020-04-10T07:09:24.000Z
2022-03-04T09:22:40.000Z
shared/tools/snapshot/utils.py
DougMahoney/metatools
112340102962ff0c3e323564357cc4e848939cf7
[ "Apache-2.0" ]
5
2020-05-16T18:22:23.000Z
2022-03-29T13:19:27.000Z
shared/tools/snapshot/utils.py
DougMahoney/metatools
112340102962ff0c3e323564357cc4e848939cf7
[ "Apache-2.0" ]
2
2020-12-10T15:17:40.000Z
2021-12-02T17:34:56.000Z
""" Extraction utilities and supporting functions Some operations are used frequently or repeated enough to be factored out. Note that SQL can be used via the POORSQL_BINARY_PATH Download the binary from http://architectshack.com/PoorMansTSqlFormatter.ashx It's a phenominal utility that brilliantly normalizes SQL code. Have friends/coworkers/peers who missed an indent? This will prevent a diff utility from tripping up on that. """ from shared.tools.yaml.core import dump from java.util import Date # Taken from the Metatools library, copied here for convenience def getDesignerContext(anchor=None): """Attempts to grab the Ignition designer context. This is most easily done with a Vision object, like a window. If no object is provided as a starting point, it will attempt to get one from the designer context. """ from com.inductiveautomation.ignition.designer import IgnitionDesigner if anchor is None: try: return IgnitionDesigner.getFrame().getContext() except: for windowName in system.gui.getWindowNames(): try: anchor = system.gui.getWindow(windowName) break except: pass else: raise LookupError("No open windows were found, so no context was derived by default.") try: anchor = anchor.source except AttributeError: pass # Just making sure we've a live object in the tree, not just an event object for i in range(50): if anchor.parent is None: break else: anchor = anchor.parent if isinstance(anchor,IgnitionDesigner): break else: raise RuntimeError("No Designer Context found in this object's heirarchy") context = anchor.getContext() return context POORSQL_BINARY_PATH = 'C:/Workspace/bin/SqlFormatter.exe' # from https://stackoverflow.com/a/165662/13229100 from subprocess import Popen, PIPE, STDOUT def format_sql(raw_sql): """Normalize the SQL so it is consistent for diffing""" try: raise KeyboardInterrupt poorsql = Popen( [POORSQL_BINARY_PATH, ], stdout=PIPE, stdin=PIPE, stderr=STDOUT) formatted = poorsql.communicate(input=raw_sql)[0] return formatted.replace('\r\n', '\n').strip() except: return raw_sql import java.awt.Point, java.awt.Dimension, java.util.UUID BASE_TYPES = set([bool, float, int, long, None, str, unicode]) COERSION_MAP = { java.awt.Point: lambda v: {'x': v.getX(), 'y': v.getY()}, java.awt.Dimension: lambda v: {'width': v.getWidth(), 'height': v.getHeight()}, java.util.UUID: lambda v: str(v), } def coerceValue(value, default=str): if type(value) in BASE_TYPES: return value else: return COERSION_MAP.get(type(value), default)(value) #ptd = propsetToDict = lambda ps: dict([(p.getName(), ps.get(p)) for p in ps.getProperties()]) def propsetToDict(property_set, recurse=False, coersion=coerceValue, visited=None): if visited is None: visited = set() elif property_set in visited: return None result_dict = {} for prop in property_set.getProperties(): value = property_set.get(prop) if recurse and not type(value) in BASE_TYPES: try: deep = propsetToDict(value, recurse, coersion, visited) except: try: deep = [] for element in value: try: deep.append(propsetToDict(element, recurse, coersion, visited)) except: deep.append(coersion(element)) except: deep = None if deep: value = deep else: value = coersion(value) else: value = coersion(value) result_dict[prop.getName()] = value return result_dict def hashmapToDict(hashmap): return dict( (key, hashmap.get(key)) for key in hashmap.keySet() ) def serializeToXML(obj, context=None): if context is None: context = getDesignerContext() serializer = context.createSerializer() serializer.addObject(obj) return serializer.serializeXML() def stringify(obj): if isinstance(obj, (str, unicode)): return str(obj).replace('\r\n', '\n') elif isinstance(obj, (list, tuple)): return [stringify(item) for item in obj] elif isinstance(obj, dict): return dict((str(key),stringify(value)) for key, value in obj.items()) elif isinstance(obj, Date): return str(obj.toInstant()) # get the ISO8601 format # coerce java and other objects elif not isinstance(obj, (int, float, bool)): return repr(obj) return obj def yamlEncode(obj): return dump(stringify(obj), sort_keys=True, indent=4) def encode(obj): """ Encodes object in a serializing format. Returns tuple of serialization format's file extention and the serialized data. """ return '.yaml', yamlEncode(obj), # return '.json', system.util.jsonEncode(obj, 2), from com.inductiveautomation.ignition.common.xmlserialization import SerializationException def getSerializationCauses(exception): """Many objects may not be able to deserialize if imported from an Ignition instance with additional (but locally missing) modules. This will drag out some of the context in an easier to scan way. """ causes = [] while exception: causes.append(exception) exception = exception.getCause() return causes
25.691919
94
0.716139
692
5,087
5.231214
0.403179
0.017956
0.014088
0.018785
0.01105
0
0
0
0
0
0
0.00553
0.182426
5,087
198
95
25.691919
0.864871
0.275015
0
0.216667
0
0
0.049559
0.009086
0
0
0
0
0
1
0.083333
false
0.016667
0.05
0.016667
0.291667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1fc8f64a1c48e617dc27ddaba536434b9f8ea44b
4,915
py
Python
Configuration/GlobalRuns/python/reco_TLR_311X.py
ckamtsikis/cmssw
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
[ "Apache-2.0" ]
852
2015-01-11T21:03:51.000Z
2022-03-25T21:14:00.000Z
Configuration/GlobalRuns/python/reco_TLR_311X.py
ckamtsikis/cmssw
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
[ "Apache-2.0" ]
30,371
2015-01-02T00:14:40.000Z
2022-03-31T23:26:05.000Z
Configuration/GlobalRuns/python/reco_TLR_311X.py
ckamtsikis/cmssw
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
[ "Apache-2.0" ]
3,240
2015-01-02T05:53:18.000Z
2022-03-31T17:24:21.000Z
import FWCore.ParameterSet.Config as cms def customiseCommon(process): ##################################################################################################### #### #### Top level replaces for handling strange scenarios of early collisions #### ## TRACKING: process.newSeedFromTriplets.OrderedHitsFactoryPSet.GeneratorPSet.maxElement = cms.uint32(100000) process.newSeedFromPairs.OrderedHitsFactoryPSet.maxElement = cms.uint32(100000) process.secTriplets.OrderedHitsFactoryPSet.GeneratorPSet.maxElement = cms.uint32(100000) process.thTripletsA.OrderedHitsFactoryPSet.GeneratorPSet.maxElement = cms.uint32(100000) process.thTripletsB.OrderedHitsFactoryPSet.GeneratorPSet.maxElement = cms.uint32(100000) process.fourthPLSeeds.OrderedHitsFactoryPSet.maxElement = cms.uint32(100000) process.fifthSeeds.OrderedHitsFactoryPSet.maxElement = cms.uint32(100000) ###### FIXES TRIPLETS FOR LARGE BS DISPLACEMENT ###### ### prevent bias in pixel vertex process.pixelVertices.useBeamConstraint = False ### ### end of top level replacements ### ############################################################################################### return (process) ############################################################################## def customisePPData(process): process= customiseCommon(process) ## particle flow HF cleaning process.particleFlowRecHitHCAL.LongShortFibre_Cut = 30. process.particleFlowRecHitHCAL.ApplyPulseDPG = True ## HF cleaning for data only process.hcalRecAlgos.SeverityLevels[3].RecHitFlags.remove("HFDigiTime") process.hcalRecAlgos.SeverityLevels[4].RecHitFlags.append("HFDigiTime") ##beam-halo-id for data only process.CSCHaloData.ExpectedBX = cms.int32(3) ## hcal hit flagging process.hfreco.PETstat.flagsToSkip = 2 process.hfreco.S8S1stat.flagsToSkip = 18 process.hfreco.S9S1stat.flagsToSkip = 26 return process ############################################################################## def customisePPMC(process): process=customiseCommon(process) return process ############################################################################## def customiseCosmicData(process): return process ############################################################################## def customiseCosmicMC(process): return process ############################################################################## def customiseVALSKIM(process): process= customisePPData(process) process.reconstruction.remove(process.lumiProducer) return process ############################################################################## def customiseExpress(process): process= customisePPData(process) import RecoVertex.BeamSpotProducer.BeamSpotOnline_cfi process.offlineBeamSpot = RecoVertex.BeamSpotProducer.BeamSpotOnline_cfi.onlineBeamSpotProducer.clone() return process ############################################################################## def customisePrompt(process): process= customisePPData(process) return process ############################################################################## ############################################################################## def customiseCommonHI(process): ############################################################################################### #### #### Top level replaces for handling strange scenarios of early HI collisions #### ## Offline Silicon Tracker Zero Suppression process.siStripZeroSuppression.Algorithms.CommonModeNoiseSubtractionMode = cms.string("IteratedMedian") process.siStripZeroSuppression.Algorithms.CutToAvoidSignal = cms.double(2.0) process.siStripZeroSuppression.Algorithms.Iterations = cms.int32(3) process.siStripZeroSuppression.storeCM = cms.bool(True) ### ### end of top level replacements ### ############################################################################################### return process ############################################################################## def customiseExpressHI(process): process= customiseCommonHI(process) import RecoVertex.BeamSpotProducer.BeamSpotOnline_cfi process.offlineBeamSpot = RecoVertex.BeamSpotProducer.BeamSpotOnline_cfi.onlineBeamSpotProducer.clone() return process ############################################################################## def customisePromptHI(process): process= customiseCommonHI(process) import RecoVertex.BeamSpotProducer.BeamSpotOnline_cfi process.offlineBeamSpot = RecoVertex.BeamSpotProducer.BeamSpotOnline_cfi.onlineBeamSpotProducer.clone() return process ##############################################################################
36.407407
107
0.545677
321
4,915
8.333333
0.376947
0.053458
0.059813
0.065421
0.437383
0.419813
0.379439
0.279252
0.248598
0.248598
0
0.017907
0.125127
4,915
134
108
36.679104
0.604186
0.086063
0
0.403509
0
0
0.010971
0
0
0
0
0
0
1
0.192982
false
0
0.070175
0.035088
0.45614
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1fcc73246e5b2e2deb6ef1a5498a653dfdea012b
3,094
py
Python
pynm/feature/extract/nmf.py
ohtaman/pynm
b003962201e4270d0dab681ede37f2d8edd560f2
[ "MIT" ]
1
2018-08-16T20:48:52.000Z
2018-08-16T20:48:52.000Z
pynm/feature/extract/nmf.py
ohtaman/pynm
b003962201e4270d0dab681ede37f2d8edd560f2
[ "MIT" ]
5
2015-01-12T20:40:46.000Z
2017-11-17T01:27:41.000Z
pynm/feature/extract/nmf.py
ohtaman/pynm
b003962201e4270d0dab681ede37f2d8edd560f2
[ "MIT" ]
null
null
null
# -*- coding:utf-8 -*- import numpy import numpy.random import numpy.linalg from . import svd def svd_init(matrix, dim, seed=None): u, s, v = svd.svd(matrix, dim) ss = numpy.sqrt(numpy.diag(s)) return numpy.maximum(0.001, u.dot(ss)), numpy.maximum(0.001, ss.dot(v)) def random_init(matrix, dim, seed=None): np_random = numpy.random.RandomState(seed) w = np_random.uniform(size=(matrix.shape[0], dim)) h = np_random.uniform(size=(dim, matrix.shape[1])) return w, h def _improve_beta_divergence(orig, current, w, h, epsilon=1e-9, beta=2.0): if beta < 1: phi = 1.0/(2.0-beta) elif beta <= 2.0: phi = 1.0 else: phi = 1.0/(beta - 1.0) wt = w.transpose() h *= (wt.dot(orig * current**(beta - 2))/(wt.dot(current**(beta - 1)) + epsilon))**phi ht = h.transpose() current = w.dot(h) w *= ((orig * current**(beta - 2)).dot(ht)/((current**(beta - 1)).dot(ht) + epsilon))**phi return w.dot(h), w, h def _improve_euclidean_distance(orig, current, w, h, epsilon=1e-9): wt = w.transpose() h *= wt.dot(orig)/(wt.dot(current) + epsilon) ht = h.transpose() current = w.dot(h) w *= orig.dot(ht)/(current.dot(ht) + epsilon) return w.dot(h), w, h def _improve_kl_diveregence(orig, current, w, h, epsilon=1e-9): ws = w.sum(axis=0) wt = (w/(ws + epsilon)).transpose() h *= wt.dot(orig/(current + epsilon)) ht = h.transpose() hs = ht.sum(axis=0) current = w.dot(h) w *= (orig/(current + epsilon)).dot(ht/(hs + epsilon)) return w.dot(h), w, h def nmf(matrix, dim=None, distance="euclid", init=svd_init, max_iter=10000, threshould=0.001, epsilon=1e-9, seed=None): """Non-negative Matrix Factorization function :param numpy.array matrix: Matrix to decompose :param int dim: dimension of matrix :param float distance: distance to minimize. choose "euclid" or "kl". euclid: Euclid distance k: Kullback Leibler divergence default: "euclid" :param int max_iter: max #iteration of calculation defau:t] 10000 :param float thresould: threshould to regard as converged :param float epsilon: epsilon to avoid zero division :param int seed: random seed :return: factorized matrix w and h """ max_rank = min(matrix.shape) dim = min(dim, max_rank) if dim is not None else max_rank if distance == "euclid": _improve = _improve_euclidean_distance elif distance == "kl": _improve = _improve_kl_diveregence elif distance == "beta": _improve = _improve_beta_divergence w, h = init(matrix, dim, seed) wh = w.dot(h) prev_norm = numpy.linalg.norm(matrix - wh) for _ in range(max_iter): wh, w, h = _improve(matrix, wh, w, h, epsilon) norm = numpy.linalg.norm(matrix - wh) improvement = (prev_norm - norm)/prev_norm if improvement < threshould: break prev_norm = norm return w, h
29.75
94
0.597931
448
3,094
4.042411
0.241071
0.012148
0.019326
0.019879
0.24296
0.201546
0.153506
0.073992
0.032027
0
0
0.023612
0.260827
3,094
103
95
30.038835
0.768255
0.19554
0
0.188406
0
0
0.007407
0
0
0
0
0
0
1
0.086957
false
0
0.057971
0
0.231884
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1fccf8df9831cb035ab2861081b74267181cefc9
6,052
py
Python
examples/demo_livepeer.py
scout-cool/Bubbletea
f0312d6f1c7fde4098d500e811f0503796973d07
[ "Apache-2.0" ]
10
2021-08-29T14:58:09.000Z
2022-02-07T21:03:07.000Z
examples/demo_livepeer.py
scout-cool/Bubbletea
f0312d6f1c7fde4098d500e811f0503796973d07
[ "Apache-2.0" ]
null
null
null
examples/demo_livepeer.py
scout-cool/Bubbletea
f0312d6f1c7fde4098d500e811f0503796973d07
[ "Apache-2.0" ]
null
null
null
import datetime import datetime from altair.vegalite.v4.schema.core import Legend import pandas from pandas.core.frame import DataFrame import streamlit as st import time import bubbletea st.header("LIVEPEER Stake Movement") urlvars = bubbletea.parse_url_var([{'key':'startdate','type':'datetime'}, {'key':'enddate','type':'datetime'}]) try: end_date = urlvars['enddate'] except KeyError: end_date = datetime.date.today() - datetime.timedelta(days=0) try: start_date = urlvars['startdate'] except KeyError: start_date = end_date - datetime.timedelta(days=7) date_range = st.date_input("Date range", (start_date, end_date)) if not len(date_range) == 2: st.warning("*Please select a date range.*") st.stop() start_date = date_range[0] end_date = date_range[1] start_timestamp = int(time.mktime(start_date.timetuple())) end_timestamp = int(time.mktime(end_date.timetuple())) bubbletea.update_url({'startdate': start_date, 'enddate':end_date}) subgraph_url = "https://api.thegraph.com/subgraphs/name/livepeer/livepeer" query_date_clause = "{timestamp_gte:%s,timestamp_lt:%s}" % ( start_timestamp, end_timestamp, ) query = """ { bondEvents(where: %s, bypassPagination:true) { timestamp, bondedAmount, round {id}, newDelegate {id}, oldDelegate {id}, delegator {id}, }, unbondEvents(where: %s, bypassPagination:true) { timestamp, amount, withdrawRound, round {id}, delegate {id}, delegator {id}, }, rebondEvents(where: %s, bypassPagination:true) { timestamp, amount, round {id}, delegate {id}, delegator {id}, } } """ % ( query_date_clause, query_date_clause, query_date_clause, ) with st.spinner("Loading data from the graph"): df = bubbletea.beta_load_subgraph(subgraph_url, query, useBigDecimal=True) df_bond = df["bondEvents"] df_bond.rename(columns={"bondedAmount": "amount"}, inplace=True) df_rebond = df["rebondEvents"] df_unbond = df["unbondEvents"] i = 0 df_amount = DataFrame() for df in [df_bond, df_rebond, df_unbond]: if len(df) > 0: if i == None: df_amount = df[["timestamp", "amount", "round.id"]] else: df_amount = df_amount.append(df[["timestamp", "amount", "round.id"]]) i += 1 if len(df_amount) == 0: st.write('No data vailable') else: df_amount = df_amount.reset_index() df_amount_over_time = bubbletea.beta_aggregate_timeseries( df_amount, time_column="timestamp", interval=bubbletea.TimeseriesInterval.DAILY, columns=[ bubbletea.ColumnConfig( name="amount", type=bubbletea.ColumnType.bigdecimal, aggregate_method=bubbletea.AggregateMethod.SUM, na_fill_value=0.0, ) ], ) df_amount_over_time.index.names = ["time"] st.subheader("Stake moved over time") st.write(df_amount_over_time) bubbletea.beta_plot_line( df_amount_over_time, x={ "field": "time", }, y={ "title":"Amount", "data": [{"title": "Amount", "field": "amount"}], }, legend="none", ) df_amount_over_round = bubbletea.beta_aggregate_groupby( df_amount, by_column="round.id", columns=[ bubbletea.ColumnConfig( name="amount", type=bubbletea.ColumnType.bigdecimal, aggregate_method=bubbletea.AggregateMethod.SUM, na_fill_value=0.0, ) ], ) df_amount_over_round.index.names = ["round"] st.write(df_amount_over_round) bubbletea.beta_plot_line( df_amount_over_round, title='Stake moved over rounds', x={"field": "round", "title": "Round", "type":"ordinal"},# ['quantitative', 'ordinal', 'temporal', 'nominal'] y={ "title":"Amount", "data": [{"title": "Amount", "field": "amount"}], }, legend="none" ) st.subheader("Transcoder Stake Changes") def process_transcoders(): dfs = [] if len(df_bond) > 0: df0 = df_bond[["timestamp", "amount", "round.id", "oldDelegate.id"]] df0.rename(columns={"oldDelegate.id": "transcoder", "amount": "loss"}, inplace=True) df1 = df_bond[["timestamp", "amount", "round.id", "newDelegate.id"]] df1.rename(columns={"newDelegate.id": "transcoder", "amount": "gain"}, inplace=True) dfs.append(df0) dfs.append(df1) if len(df_unbond) > 0: df2 = df_unbond[["timestamp", "amount", "round.id", "delegate.id"]] df2.rename(columns={"delegate.id": "transcoder", "amount": "loss"}, inplace=True) dfs.append(df2) if len(df_rebond) > 0: df3 = df_rebond[["timestamp", "amount", "round.id", "delegate.id"]] df3.rename(columns={"delegate.id": "transcoder", "amount": "gain"}, inplace=True) dfs.append(df3) df = pandas.DataFrame() for d in dfs: if len(df) == 0: df = d else: df = df.append(d) df.fillna(0.0, inplace=True) df.reset_index(inplace=True) return df df_transcoders = process_transcoders() df_loss_gains = bubbletea.beta_aggregate_groupby( df_transcoders, "transcoder", columns=[ bubbletea.ColumnConfig( name="loss", type=bubbletea.ColumnType.bigdecimal, aggregate_method=bubbletea.AggregateMethod.SUM, na_fill_value=0.0, ), bubbletea.ColumnConfig( name="gain", type=bubbletea.ColumnType.bigdecimal, aggregate_method=bubbletea.AggregateMethod.SUM, na_fill_value=0.0, ), ], ) df_loss_gains["total"] = df_loss_gains["loss"] + df_loss_gains["gain"] st.write(df_loss_gains)
28.682464
117
0.594019
676
6,052
5.131657
0.236686
0.039204
0.027674
0.044393
0.400404
0.346786
0.201787
0.182762
0.158547
0.158547
0
0.00811
0.266523
6,052
210
118
28.819048
0.773372
0.008262
0
0.331522
0
0
0.229
0.016667
0
0
0
0
0
1
0.005435
false
0.016304
0.043478
0
0.054348
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1fd17f1089fdee8a486a2a65c3fb934cc9195151
1,072
py
Python
sml_iris_knn_dtc.py
drishtim17/supervisedML
3981d283a9937bfce793237c171fa95764846558
[ "Apache-2.0" ]
null
null
null
sml_iris_knn_dtc.py
drishtim17/supervisedML
3981d283a9937bfce793237c171fa95764846558
[ "Apache-2.0" ]
null
null
null
sml_iris_knn_dtc.py
drishtim17/supervisedML
3981d283a9937bfce793237c171fa95764846558
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/python3 import sklearn from sklearn.datasets import load_iris from sklearn.neighbors import KNeighborsClassifier from sklearn.model_selection import train_test_split from sklearn import tree from sklearn.metrics import accuracy_score #loading iris iris=load_iris() #traning flowers.features is stored in iris.data #output accordingly is stored in iris.target #now splitting into test and train data sets train_iris,test_iris,train_target,test_target=train_test_split(iris.data,iris.target,test_size=0.2) #calling knn algo knnclf=KNeighborsClassifier(n_neighbors=3) #calling dsc algo dsclf=tree.DecisionTreeClassifier() #data training knntrained=knnclf.fit(train_iris,train_target) dsctrained=dsclf.fit(train_iris,train_target) #testing algo #predicted output knnoutput=knntrained.predict(test_iris) print(knnoutput) dscoutput=knntrained.predict(test_iris) print(dscoutput) #original output print(test_target) #calculating accuracy knnpct=accuracy_score(test_target,knnoutput) print(knnpct) dscpct=accuracy_score(test_target,dscoutput) print(dscpct)
24.363636
99
0.841418
152
1,072
5.769737
0.407895
0.062714
0.051311
0.031927
0.120867
0
0
0
0
0
0
0.004065
0.08209
1,072
43
100
24.930233
0.887195
0.251866
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.285714
0
0.285714
0.238095
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1fd3b3ac45b4ed570227a76c3f4f622771cac325
2,762
py
Python
Python/Exercises/Humanize/humanize.py
Gjacquenot/training-material
16b29962bf5683f97a1072d961dd9f31e7468b8d
[ "CC-BY-4.0" ]
115
2015-03-23T13:34:42.000Z
2022-03-21T00:27:21.000Z
Python/Exercises/Humanize/humanize.py
Gjacquenot/training-material
16b29962bf5683f97a1072d961dd9f31e7468b8d
[ "CC-BY-4.0" ]
56
2015-02-25T15:04:26.000Z
2022-01-03T07:42:48.000Z
Python/Exercises/Humanize/humanize.py
Gjacquenot/training-material
16b29962bf5683f97a1072d961dd9f31e7468b8d
[ "CC-BY-4.0" ]
59
2015-11-26T11:44:51.000Z
2022-03-21T00:27:22.000Z
#!/usr/bin/env python def humanize(n, base=10, digits=1, unit=''): '''convert a floating point number to a human-readable format Parameters ---------- n : float or str number to convert, it can a string representation of a floating point number base : int base to use, either 2 or 10, default is 10 digits : int decimal digits to use in format string, default is 1 unit : str unit to use in format string, default is '' Returns ------- str formatted string Raises ------ ValueError raised when base is neither 2 nor 10 Examples -------- >>> humanize(1234) '1.2 K' >>> humanize(1234, digits=2) '1.23 K' >>> humanize(1234, base=2, digits=2) '1.21 K' >>> humanize(1234, unit='B') '1.2 KB' >>> humanize('1234.56', digits=4, unit='B') '1.2346 KB' >>> humanize(0.0123) '12.3 m' ''' import math if base != 2 and base != 10: raise ValueError('base should be 2 or 10, not {:d}'.format(base)) thousands = 3 if base == 10 else 10 orders = { -3: 'n', -2: 'u', -1: 'm', 0: '', 1: 'K', 2: 'M', 3: 'G', 4: 'T', 5: 'P', } fmt_str = '{{0:.{}f}} {{1:s}}{{2:s}}'.format(digits) exp = math.log(math.fabs(float(n)), base**thousands) exp = int(exp - (1 if exp < 0 else 0)) number = float(n)/base**(exp*thousands) return fmt_str.format(number, orders[exp], unit) def check_line(line): try: _ = float(line) return True except: return False if __name__ == '__main__': from argparse import ArgumentParser import sys arg_parser = ArgumentParser(description='convert numbers to ' 'human-readable format') arg_parser.add_argument('n', type=float, nargs='?', help='number to convert') arg_parser.add_argument('-d', type=int, default=1, help='number of significant digits') arg_parser.add_argument('-b', action='store_true', help='use base 2') arg_parser.add_argument('-u', default='', help='unit to display') options = arg_parser.parse_args() base = 2 if options.b else 10 if options.n: print('{0:s}'.format(humanize(options.n, base=base, digits=options.d, unit=options.u))) else: for line in sys.stdin: if check_line(line): print('{0:s}'.format(humanize(line.strip(), base=base, digits=options.d, unit=options.u)))
28.474227
77
0.513034
354
2,762
3.932203
0.330508
0.038793
0.034483
0.057471
0.119253
0.08908
0.08908
0.048851
0
0
0
0.051991
0.345402
2,762
96
78
28.770833
0.71792
0.272991
0
0.04
0
0
0.113137
0
0
0
0
0
0
1
0.04
false
0
0.06
0
0.16
0.04
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1fd529b1fbfbcec29e94685aeef6fbda0d26c559
1,337
py
Python
data/Latent.py
YoungjuNa-KR/Gaze_estimator_implementation
95482db40ddef413870f51dadc907910d624ee6e
[ "MIT" ]
null
null
null
data/Latent.py
YoungjuNa-KR/Gaze_estimator_implementation
95482db40ddef413870f51dadc907910d624ee6e
[ "MIT" ]
null
null
null
data/Latent.py
YoungjuNa-KR/Gaze_estimator_implementation
95482db40ddef413870f51dadc907910d624ee6e
[ "MIT" ]
1
2022-02-03T11:11:21.000Z
2022-02-03T11:11:21.000Z
import os import PIL import torch from glob import glob from torch.utils.data import DataLoader from torchvision.transforms.functional import pil_to_tensor class Latent(torch.utils.data.Dataset): def __init__(self, dir_name, transforms=None): # dataset 디렉토리를 기반으로 parse.data_train, test에 따라서 # 각각 다른 디렉토리에 접근할 수 있도록 한다. self.root_dir = os.path.join("./dataset", dir_name) self.imgs = os.listdir(self.root_dir) self.transform = None # 데이터셋의 개별 텐서의 경로가 저장된다. self.data = [] # 저장된 텐서 경로의 인덱스를 나타낸다. self.label = [] # 개별적으로 텐서에 접근하고, 대응하는 라벨을 저장한다. for i, img in enumerate(self.imgs): img_path = os.path.join(self.root_dir, img) for img in glob(os.path.join(img_path)): self.data.append(img) self.label.append(i) # 클래스 변수로 저장된 이미지와 라벨에 대한 정보를 위한 함수이다. def __getitem__(self, idx): img_path, label = self.data[idx], self.label[idx] # os.path.basename으로 단일 이미지명을 얻을 수 있도록 한다. img_name = os.path.basename(img_path) img = torch.load(img_path) img = img.type('torch.FloatTensor') sample = {"image" : img, "label" : label, "name" : img_name} return sample def __len__(self): return len(self.data)
29.711111
68
0.604338
189
1,337
4.132275
0.460317
0.038412
0.042254
0
0
0
0
0
0
0
0
0
0.293194
1,337
44
69
30.386364
0.826455
0.169783
0
0
0
0
0.036298
0
0
0
0
0
0
1
0.111111
false
0
0.222222
0.037037
0.444444
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1fd676c1868fb5496119162edb66de118a176730
876
py
Python
scripts/mklanguages.py
yasen-m/dosage
81fe088621ad335cac2a53fcbc7b9b37f49ddce2
[ "MIT" ]
null
null
null
scripts/mklanguages.py
yasen-m/dosage
81fe088621ad335cac2a53fcbc7b9b37f49ddce2
[ "MIT" ]
null
null
null
scripts/mklanguages.py
yasen-m/dosage
81fe088621ad335cac2a53fcbc7b9b37f49ddce2
[ "MIT" ]
null
null
null
#!/usr/bin/python # update languages.py from pycountry import os import codecs import pycountry basepath = os.path.dirname(os.path.dirname(__file__)) def main(): """Update language information in dosagelib/languages.py.""" fn =os.path.join(basepath, 'dosagelib', 'languages.py') encoding = 'utf-8' with codecs.open(fn, 'w', encoding) as f: f.write('# -*- coding: %s -*-%s' % (encoding, os.linesep)) f.write('# ISO 693-1 language codes from pycountry%s' % os.linesep) write_languages(f) def write_languages(f): """Write language information.""" f.write("Iso2Language = {%s" % os.linesep) for language in pycountry.languages: if hasattr(language, 'alpha2'): f.write(" %r: %r,%s" % (language.alpha2, language.name, os.linesep)) f.write("}%s" % os.linesep) if __name__ == '__main__': main()
29.2
83
0.634703
115
876
4.713043
0.4
0.066421
0.055351
0.055351
0
0
0
0
0
0
0
0.011396
0.19863
876
29
84
30.206897
0.760684
0.152968
0
0
0
0
0.191781
0
0
0
0
0
0
1
0.105263
false
0
0.157895
0
0.263158
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1fd6b807f6071d9b5d2c510c8209a51bbbc35084
531
py
Python
reference/for_and_while.py
SeanSyue/TensorflowReferences
2c93f4c770e2713ef4769f287e022d03e7097188
[ "MIT" ]
null
null
null
reference/for_and_while.py
SeanSyue/TensorflowReferences
2c93f4c770e2713ef4769f287e022d03e7097188
[ "MIT" ]
null
null
null
reference/for_and_while.py
SeanSyue/TensorflowReferences
2c93f4c770e2713ef4769f287e022d03e7097188
[ "MIT" ]
null
null
null
import tensorflow as tf x = tf.Variable(0, name='x') model = tf.global_variables_initializer() with tf.Session() as session: for i in range(5): session.run(model) x = x + 1 print(session.run(x)) x = tf.Variable(0., name='x') threshold = tf.constant(5.) model = tf.global_variables_initializer() with tf.Session() as session: session.run(model) while session.run(tf.less(x, threshold)): x = x + 1 x_value = session.run(x) print(x_value)
19.666667
46
0.589454
76
531
4.039474
0.355263
0.162866
0.071661
0.078176
0.469055
0.469055
0.358306
0.358306
0.358306
0.358306
0
0.015666
0.278719
531
26
47
20.423077
0.785901
0
0
0.470588
0
0
0.003968
0
0
0
0
0
0
1
0
false
0
0.058824
0
0.058824
0.117647
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1fd7ed8a83b56f175881d6f318fa389d67ee450a
732
py
Python
bewerte/muendlich.py
jupfi81/NotenManager
ee96a41088bb898c025aed7b3c904741cb71d004
[ "MIT" ]
null
null
null
bewerte/muendlich.py
jupfi81/NotenManager
ee96a41088bb898c025aed7b3c904741cb71d004
[ "MIT" ]
null
null
null
bewerte/muendlich.py
jupfi81/NotenManager
ee96a41088bb898c025aed7b3c904741cb71d004
[ "MIT" ]
null
null
null
"""Berechnet die mündliche Note""" import csv with open('bewertung.csv', encoding='utf-8', mode='r') as bewertung: TABELLE = [] DATA = csv.reader(bewertung, delimiter=',') for row in DATA: TABELLE.append([element.strip() for element in row]) OUTPUT = [TABELLE[0] + ["Note"]] del TABELLE[0] for row in TABELLE: if len(row) > 3: note = 20*float(row[2]) + 20*float(row[3]) + 40*float(row[4]) + 20*float(row[5]) note = round(note/25, 0)/4 row = row + [note] OUTPUT.append(row) with open('note.csv', encoding='utf-8', mode='w') as safe: WRITER = csv.writer(safe, delimiter=',') for row in OUTPUT: WRITER.writerow(row)
31.826087
92
0.562842
102
732
4.039216
0.421569
0.07767
0.058252
0.072816
0.092233
0
0
0
0
0
0
0.039179
0.26776
732
22
93
33.272727
0.729478
0.038251
0
0
0
0
0.055874
0
0
0
0
0
0
1
0
false
0
0.055556
0
0.055556
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1fd7f7aa485ce2ad0b848a0e2bbaa8cf36a6c24a
410
py
Python
python3/tests/test_edit_distance.py
qianbinbin/leetcode
915cecab0c940cd13847683ec55b17b77eb0f39b
[ "MIT" ]
4
2018-03-05T02:27:16.000Z
2021-03-15T14:19:44.000Z
python3/tests/test_edit_distance.py
qianbinbin/leetcode
915cecab0c940cd13847683ec55b17b77eb0f39b
[ "MIT" ]
null
null
null
python3/tests/test_edit_distance.py
qianbinbin/leetcode
915cecab0c940cd13847683ec55b17b77eb0f39b
[ "MIT" ]
2
2018-07-22T10:32:10.000Z
2018-10-20T03:14:28.000Z
from unittest import TestCase from leetcodepy.edit_distance import * solution1 = Solution1() word11 = "horse" word12 = "ros" expected1 = 3 word21 = "intention" word22 = "execution" expected2 = 5 class TestEditDistance(TestCase): def test1(self): self.assertEqual(expected1, solution1.minDistance(word11, word12)) self.assertEqual(expected2, solution1.minDistance(word21, word22))
17.083333
74
0.731707
43
410
6.953488
0.627907
0.100334
0
0
0
0
0
0
0
0
0
0.079179
0.168293
410
23
75
17.826087
0.797654
0
0
0
0
0
0.063415
0
0
0
0
0
0.153846
1
0.076923
false
0
0.153846
0
0.307692
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1fd8f8fea0aa37bc2adfbcbf6dda99e537d99a7f
805
py
Python
pageobject/commands/index.py
lukas-linhart/pageobject
6ae83680ae62a94f93cefc394e4f3cc6999aeead
[ "MIT" ]
1
2017-01-12T06:15:36.000Z
2017-01-12T06:15:36.000Z
pageobject/commands/index.py
lukas-linhart/pageobject
6ae83680ae62a94f93cefc394e4f3cc6999aeead
[ "MIT" ]
null
null
null
pageobject/commands/index.py
lukas-linhart/pageobject
6ae83680ae62a94f93cefc394e4f3cc6999aeead
[ "MIT" ]
null
null
null
def index(self, value): """ Return index of the first child containing the specified value. :param str value: text value to look for :returns: index of the first child containing the specified value :rtype: int :raises ValueError: if the value is not found """ self.logger.info('getting index of text "{}" within page object list {}'.format(value, self._log_id_short)) self.logger.debug('getting index of text "{}" within page object list; {}'.format(value, self._log_id_long)) index = self.text_values.index(value) self.logger.info('index of text "{}" within page object list {} is {}'.format(value, self._log_id_short, index)) self.logger.debug('index of text "{}" within page object is {}; {}'.format(value, index, self._log_id_long)) return index
47.352941
116
0.690683
119
805
4.563025
0.336134
0.077348
0.081031
0.12523
0.548803
0.548803
0.443831
0.38674
0.38674
0.213628
0
0
0.185093
805
16
117
50.3125
0.827744
0.284472
0
0
0
0
0.377532
0
0
0
0
0
0
1
0.142857
false
0
0
0
0.285714
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1fda8ca8896b2d1bcde84055f16e53f955e23e9c
2,724
py
Python
vlsopt/data_factory/transaction_factory.py
violas-core/bvexchange
74cf3197aad02e0f5e2dac457266d11c9c8cc746
[ "MIT" ]
null
null
null
vlsopt/data_factory/transaction_factory.py
violas-core/bvexchange
74cf3197aad02e0f5e2dac457266d11c9c8cc746
[ "MIT" ]
null
null
null
vlsopt/data_factory/transaction_factory.py
violas-core/bvexchange
74cf3197aad02e0f5e2dac457266d11c9c8cc746
[ "MIT" ]
1
2022-01-05T04:39:47.000Z
2022-01-05T04:39:47.000Z
#!/usr/bin/python3 import operator import sys import json import os sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "./")) sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "../")) sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../")) sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../lbdiemsdk/src")) from diem import ( jsonrpc, ) from factory_base import ( factory_base, field ) def parse_events(events): datas = [] if events: for event in events: datas.append({ "key":event.key, "sequence_number": event.sequence_number, "data": { "type": event.data.type, "amount": { "amount": event.data.amount.amount, "currency": event.data.amount.currency, }, "sender" : event.data.sender, "receiver": event.data.receiver, } }) return datas def parse_state(state): return state == "executed" class transaction_factory(factory_base): global parse_state tran_fields = [ field("tran_type", "transaction.type"), field("script_type", "transaction.script.type"), field("token_id", "transaction.script.currency"), field("data", "transaction.script.metadata"), field("receiver", "transaction.script.receiver"), field("gas_token", "transaction.gas_currency"), field("gas_unit_price", "transaction.gas_unit_price"), field("max_gas_amount", "transaction.max_gas_amount"), field("amount", "transaction.script.amount"), field("sequence_number", "transaction.sequence_number"), field("vm_status", "vm_status.type"), field("state", "vm_status.type", parse_state), field("gas_used", "gas_used"), field("version", "version"), field("events", "events", parse_events), ] def __init__(self, data): factory_base.__init__(self, data) self.__init_show_fields() def __init_show_fields(self): self.set_fields(self.tran_fields) default_outputs = {"state": "not support", "events_len" : len(self.events)} self.extend_default_outputs(default_outputs) def get_version(self): return self.get_attr_with_path(self.get_field("version").path)
33.62963
96
0.551762
281
2,724
5.074733
0.256228
0.050491
0.036466
0.042076
0.148668
0.148668
0.148668
0.148668
0.148668
0.148668
0
0.000534
0.312041
2,724
80
97
34.05
0.760406
0.006241
0
0
0
0
0.207394
0.085767
0
0
0
0
0
1
0.078125
false
0
0.09375
0.03125
0.25
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1fdadaa704a4a57bab069bbf9519d57e9bc28d25
3,703
py
Python
tests/test_source.py
j18ter/exchangelib
afb0df65c5533999bca92e25be4c00de5c03043c
[ "BSD-2-Clause" ]
null
null
null
tests/test_source.py
j18ter/exchangelib
afb0df65c5533999bca92e25be4c00de5c03043c
[ "BSD-2-Clause" ]
null
null
null
tests/test_source.py
j18ter/exchangelib
afb0df65c5533999bca92e25be4c00de5c03043c
[ "BSD-2-Clause" ]
null
null
null
from exchangelib.errors import ( ErrorAccessDenied, ErrorFolderNotFound, ErrorInvalidOperation, ErrorItemNotFound, ErrorNoPublicFolderReplicaAvailable, ) from exchangelib.properties import EWSElement from .common import EWSTest class CommonTest(EWSTest): def test_magic(self): self.assertIn(self.account.protocol.version.api_version, str(self.account.protocol)) self.assertIn(self.account.protocol.credentials.username, str(self.account.protocol.credentials)) self.assertIn(self.account.primary_smtp_address, str(self.account)) self.assertIn(str(self.account.version.build.major_version), repr(self.account.version)) for item in ( self.account.protocol, self.account.version, ): with self.subTest(item=item): # Just test that these at least don't throw errors repr(item) str(item) for attr in ( "admin_audit_logs", "archive_deleted_items", "archive_inbox", "archive_msg_folder_root", "archive_recoverable_items_deletions", "archive_recoverable_items_purges", "archive_recoverable_items_root", "archive_recoverable_items_versions", "archive_root", "calendar", "conflicts", "contacts", "conversation_history", "directory", "drafts", "favorites", "im_contact_list", "inbox", "journal", "junk", "local_failures", "msg_folder_root", "my_contacts", "notes", "outbox", "people_connect", "public_folders_root", "quick_contacts", "recipient_cache", "recoverable_items_deletions", "recoverable_items_purges", "recoverable_items_root", "recoverable_items_versions", "search_folders", "sent", "server_failures", "sync_issues", "tasks", "todo_search", "trash", "voice_mail", ): with self.subTest(attr=attr): # Test distinguished folder shortcuts. Some may raise ErrorAccessDenied try: item = getattr(self.account, attr) except ( ErrorAccessDenied, ErrorFolderNotFound, ErrorItemNotFound, ErrorInvalidOperation, ErrorNoPublicFolderReplicaAvailable, ): continue else: repr(item) str(item) self.assertTrue(item.is_distinguished) def test_from_xml(self): # Test for all EWSElement classes that they handle None as input to from_xml() import exchangelib for mod in ( exchangelib.attachments, exchangelib.extended_properties, exchangelib.indexed_properties, exchangelib.folders, exchangelib.items, exchangelib.properties, ): for k, v in vars(mod).items(): with self.subTest(k=k, v=v): if type(v) is not type: continue if not issubclass(v, EWSElement): continue # from_xml() does not support None input with self.assertRaises(Exception): v.from_xml(elem=None, account=None)
34.287037
105
0.533081
309
3,703
6.197411
0.430421
0.063185
0.049608
0.036031
0.032376
0
0
0
0
0
0
0
0.387254
3,703
107
106
34.607477
0.843984
0.063192
0
0.214286
0
0
0.174076
0.079099
0
0
0
0
0.061224
1
0.020408
false
0
0.040816
0
0.071429
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1fdb3bda49808628500a9864a821b84e3138f89c
735
py
Python
{{cookiecutter.project_slug}}/app/utils/mail.py
Bexils/fastapi-project-template
1d6937c5adce7603c77e01f8560032082392fdbd
[ "MIT" ]
4
2021-04-04T23:19:06.000Z
2021-04-10T21:32:23.000Z
{{cookiecutter.project_slug}}/app/utils/mail.py
Bexils/fastapi-project-template
1d6937c5adce7603c77e01f8560032082392fdbd
[ "MIT" ]
null
null
null
{{cookiecutter.project_slug}}/app/utils/mail.py
Bexils/fastapi-project-template
1d6937c5adce7603c77e01f8560032082392fdbd
[ "MIT" ]
null
null
null
import os from datetime import datetime from pathlib import Path from pydantic import EmailStr def send_dummy_mail(subject: str, message: str, to: EmailStr): current_path = os.getcwd() filename = f'{datetime.now().timestamp()} - {subject}.txt' email_text = f'''Subject: {subject} From: no-reply@email.com To: {to} {message} ''' email_path = Path(os.path.join(current_path, 'emails')) emails_file = os.path.join(current_path, 'emails', filename) try: with open(emails_file, 'w') as file_obj: file_obj.write(email_text) except FileNotFoundError: email_path.mkdir() with open(emails_file, 'w') as file_obj: file_obj.write(email_text) return 'email sent!'
28.269231
64
0.672109
101
735
4.722772
0.425743
0.0587
0.041929
0.071279
0.318658
0.318658
0.205451
0.205451
0.205451
0.205451
0
0
0.204082
735
26
65
28.269231
0.815385
0
0
0.181818
0
0
0.180707
0.038043
0
0
0
0
0
1
0.045455
false
0
0.181818
0
0.272727
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1fe22fd049d8e5e23653953f62233abe237a47e8
16,692
py
Python
bloodbank_rl/pyomo_models/stochastic_model_runner.py
joefarrington/bloodbank_rl
f285581145034b498f01c9b44f95437ceddb042a
[ "MIT" ]
null
null
null
bloodbank_rl/pyomo_models/stochastic_model_runner.py
joefarrington/bloodbank_rl
f285581145034b498f01c9b44f95437ceddb042a
[ "MIT" ]
null
null
null
bloodbank_rl/pyomo_models/stochastic_model_runner.py
joefarrington/bloodbank_rl
f285581145034b498f01c9b44f95437ceddb042a
[ "MIT" ]
null
null
null
import numpy as np import pandas as pd import pyomo.environ as pyo import mpisppy.utils.sputils as sputils from mpisppy.opt.ef import ExtensiveForm from pathlib import Path import os import sys path_root = Path(os.path.abspath(__file__)).parents[2] sys.path.append(str(path_root)) from bloodbank_rl.environments.platelet_bankSR import PoissonDemandProviderSR import bloodbank_rl.pyomo_models.model_constructors as pyomo_mc class PyomoModelRunner: def __init__( self, model_constructor, model_constructor_params, n_scenarios, demand_provider, demand_provider_kwargs=None, scenario_name_start=0, # Used this as starting seed for Pyomo experiments with sim data solver_string="gurobi_persistent", solver_options={"LogFile": "gurobi.log", "OutputFlag": 1, "LogToConsole": 0}, log=None, ): self.model_constructor = model_constructor self.model_constructor_params = model_constructor_params self.n_scenarios = n_scenarios self.demand_provider = demand_provider self.demand_provider_kwargs = demand_provider_kwargs self.scenario_name_start = scenario_name_start self.solver_string = solver_string self.solver_options = solver_options self.all_scenario_names = [ f"{i+self.scenario_name_start}" for i in range(0, self.n_scenarios) ] self.checks_to_perform = self._determine_checks_to_perform() self.log = log def scenario_creator(self, scenario_name): if self.demand_provider_kwargs: prov = self.demand_provider( **self.demand_provider_kwargs, seed=int(scenario_name) ) else: prov = self.demand_provider(seed=int(scenario_name)) prov.reset() demand = { t: prov.generate_demand() for t in range(1, self.model_constructor_params["t_max"] + 1) } model = self.model_constructor( demand=demand, **self.model_constructor_params ).build_model() # Telling it which decisions belong to first stage - for us this could be all our policy parameters # because we can't change them during a trajectory first_stage_params = self._get_first_stage_decision_params(model) sputils.attach_root_node(model, 0, first_stage_params) # If we don't specify, assume that all equally likely model._mpisppy_probability = 1.0 / self.n_scenarios return model def _get_first_stage_decision_params(self, model): if self.model_constructor.policy_parameters() == ["s", "S"]: return [model.s, model.S] elif self.model_constructor.policy_parameters() == ["s", "Q"]: return [model.s, model.Q] elif self.model_constructor.policy_parameters() == ["s", "S", "alpha", "Q"]: return [model.s, model.S, model.alpha, model.Q] elif self.model_constructor.policy_parameters() == ["s", "S", "beta", "Q"]: return [model.s, model.S, model.beta, model.Q] elif self.model_constructor.policy_parameters() == ["S"]: return [model.S] else: raise ValueError("Policy parameters not recognised") def solve_program(self): options = {"solver": self.solver_string} self.ef = ExtensiveForm( options=options, all_scenario_names=self.all_scenario_names, scenario_creator=self.scenario_creator, ) self.results = self.ef.solve_extensive_form(solver_options=self.solver_options) objval = self.ef.get_objective_value() return objval def construct_results_dfs(self): self.results_list = [] self.costs_df = pd.DataFrame( columns=[ "Seed", "Variable cost", "Holding cost", "Fixed cost", "Wastage cost", "Shortage cost", ] ) for tup in self.ef.scenarios(): scen = tup[0] if self.demand_provider_kwargs: prov = self.demand_provider( **self.demand_provider_kwargs, seed=int(scen) ) else: prov = self.demand_provider(seed=int(scen)) prov.reset() demand = { t: prov.generate_demand() for t in range(1, self.model_constructor_params["t_max"] + 1) } model = tup[1] # Add common variables to output res_dicts = [ { "opening_inventory": [ round(model.IssB[t, a](), 0) for a in model.A ], "received": [round(model.X[t, a](), 0) for a in model.A], "demand": round(demand[t], 0), "DSSR": [round(model.DssR[t, a](), 0) for a in model.A], "wastage": round(model.W[t](), 0), "shortage": round(model.E[t](), 0), "closing inventory": [ round(model.IssE[t, a](), 0) for a in model.A ], "inventory position": round(model.IP[t](), 0), "order quantity": round(model.OQ[t](), 0), } for t in model.T ] # Add policy paramters to results for res_dict, t in zip(res_dicts, model.T): for param in self.model_constructor.policy_parameters(): if self.model_constructor_params["weekly_policy"]: param_string = f"model.{param}[(t-1) % 7]()" else: param_string = f"model.{param}[t]()" res_dict[f"{param}"] = round(eval(param_string), 0) self.results_list.append(pd.DataFrame(res_dicts)) # Record the costs for each scenario and store in a single Pandas DataFrame scen_costs_dict = { "Seed": scen, "Variable cost": round(model.variable_cost(), 0), "Holding cost": round(model.holding_cost(), 0), "Fixed cost": round(model.fixed_cost(), 0), "Wastage cost": round(model.wastage_cost(), 0), "Shortage cost": round(model.shortage_cost(), 0), } self.costs_df = self.costs_df.append(scen_costs_dict, ignore_index=True) if self.log is not None: self.log.info(f"##### Scenario {scen} #####") self.log.info(f"Variable cost: {round(model.variable_cost(),0)}") self.log.info(f"Holding cost: {round(model.holding_cost(),0)}") self.log.info(f"Fixed cost: {round(model.fixed_cost(),0)}") self.log.info(f"Wastage cost: {round(model.wastage_cost(),0)}") self.log.info(f"Shortage cost: {round(model.shortage_cost(),0)}") else: print(f"##### Scenario {scen} #####") # For now, also print the costs as useful for debugging print(f"Variable cost: {round(model.variable_cost(),0)}") print(f"Holding cost: {round(model.holding_cost(),0)}") print(f"Fixed cost: {round(model.fixed_cost(),0)}") print(f"Wastage cost: {round(model.wastage_cost(),0)}") print(f"Shortage cost: {round(model.shortage_cost(),0)}") def save_results(self, directory_path_string): for scen, df in zip(self.all_scenario_names, self.results_list): filename = Path(directory_path_string) / f"scenario_{scen}_output.csv" df.to_csv(filename) filename = Path(directory_path_string) / f"all_costs.csv" self.costs_df.to_csv(filename) def check_outputs(self, directory_path_string): self.results_of_checks_list = [] for scen, scenario_df in zip(self.all_scenario_names, self.results_list): # Ensure that entries in columns with array values are numpy arrays array_cols = ["opening_inventory", "received", "DSSR", "closing inventory"] for col in array_cols: scenario_df[f"{col}"] = scenario_df[f"{col}"].apply( lambda x: np.array(x) ) # Do a merge to easily run checks where we look at consecutive rows merged_results = pd.concat( [ scenario_df, scenario_df.loc[:, ["opening_inventory", "received"]] .shift(-1) .add_prefix("next_"), ], axis=1, ) # Run the necessary checks out_df = pd.DataFrame() for f in self.checks_to_perform: res = merged_results.apply(f, axis=1) out_df = pd.concat([out_df, res], axis=1) # Print the number of rows with failure and store # the results if any failures for a scenario fail_check_rows = out_df[~out_df.all(axis=1)] n_rows_with_fail = fail_check_rows.shape[0] if self.log is not None: self.log.info( f"Scenario {scen}: {n_rows_with_fail} rows with a failed check" ) else: print(f"Scenario {scen}: {n_rows_with_fail} rows with a failed check") if n_rows_with_fail > 0: filename = Path(directory_path_string) / f"scenario_{scen}_checks.csv" out_df.to_csv(filename) self.results_of_checks_list.append(out_df) ### Functions for checking the output is consistent with constraints ### # TODO: Could run a check that policy params same in each scenario def _determine_checks_to_perform(self): checks_to_run = [ self._check_wastage, self._check_shortage, self._check_inventory_during_day, self._check_no_max_age_opening_inventory, self._check_close_to_next_open_inventory, self._check_order_to_next_received, ] if self.model_constructor.policy_parameters() == ["s", "S"]: return checks_to_run + [self._check_sS] elif self.model_constructor.policy_parameters() == ["s", "Q"]: return checks_to_run + [self._check_sQ] elif self.model_constructor.policy_parameters() == ["s", "S", "alpha", "Q"]: return checks_to_run + [self._check_sSaQ] elif self.model_constructor.policy_parameters() == ["s", "S", "beta", "Q"]: return checks_to_run + [self._check_sSbQ] elif self.model_constructor.policy_parameters() == ["S"]: return checks_to_run + [self._check_S] else: raise ValueError("Policy parameters not recognised") # High level wastage check def _check_wastage(self, row): return pd.Series( { "check_wastage": row["wastage"] == max( 0, row["opening_inventory"][0] + row["received"][0] - row["demand"] ) } ) # High level shortage check def _check_shortage(self, row): return pd.Series( { "check_shortage": row["shortage"] == max( 0, row["demand"] - row["opening_inventory"].sum() - row["received"].sum(), ) } ) # Check closing inventory def _calculate_remaining_stock_and_demand(self, row): total_remaining_demand = row["demand"] inventory = row["opening_inventory"] + row["received"] remaining_demand = np.zeros_like(inventory) for idx, stock in enumerate(inventory): demand_filled = min(total_remaining_demand, stock) remaining_stock = stock - demand_filled total_remaining_demand = total_remaining_demand - demand_filled inventory[idx] = remaining_stock remaining_demand[idx] = total_remaining_demand return inventory, remaining_demand def _check_inventory_during_day(self, row): ( calc_closing_inventory, calc_remaining_demand, ) = self._calculate_remaining_stock_and_demand(row) return pd.Series( { "check_closing_inventory": ( row["closing inventory"] == calc_closing_inventory ).all(), "check_DSSR": (row["DSSR"] == calc_remaining_demand).all(), "check_inventory_position": row["inventory position"] == row["closing inventory"][1:].sum(), } ) def _check_no_max_age_opening_inventory(self, row): return pd.Series( {"check_no_max_age_opening_inventory": row["opening_inventory"][-1] == 0} ) def _check_close_to_next_open_inventory(self, row): if row["next_opening_inventory"] is np.nan: return pd.Series({"check_close_to_next_open_inventory": None}) else: return pd.Series( { "check_close_to_next_open_inventory": ( row["closing inventory"][1:] == row["next_opening_inventory"][:-1] ).all() } ) def _check_order_to_next_received(self, row): if row["next_received"] is np.nan: return pd.Series({"check_order_to_next_received": None}) else: return pd.Series( { "check_order_to_next_received": row["order quantity"] == row["next_received"].sum() } ) def _check_sS(self, row): S_gt_s = row["S"] >= row["s"] + 1 if row["inventory position"] < row["s"]: order_quantity_to_params = ( row["order quantity"] == row["S"] - row["inventory position"] ) else: order_quantity_to_params = row["order quantity"] == 0 return pd.Series( { "check_sS_S_gt_s": S_gt_s, "check_sS_order_quantity_to_params": order_quantity_to_params, } ) def _check_S(self, row): if row["inventory position"] < row["S"]: order_quantity_to_params = ( row["order quantity"] == row["S"] - row["inventory position"] ) else: order_quantity_to_params = row["order quantity"] == 0 return pd.Series( {"check_S_order_quantity_to_params": order_quantity_to_params,} ) def _check_sQ(self, row): if row["inventory position"] < row["s"]: order_quantity_to_params = row["order quantity"] == row["Q"] else: order_quantity_to_params = row["order quantity"] == 0 return pd.Series( {"check_sQ_order_quantity_to_params": order_quantity_to_params} ) def _check_sSaQ(self, row): S_gt_s = row["S"] >= row["s"] + 1 s_gt_a = row["s"] >= row["alpha"] + 1 if row["inventory position"] < row["alpha"]: order_quantity_to_params = ( row["order quantity"] == row["S"] - row["inventory position"] ) elif row["inventory position"] < row["s"]: order_quantity_to_params = row["order quantity"] == row["Q"] else: order_quantity_to_params = row["order quantity"] == 0 return pd.Series( { "check_sSaQ_S_gt_s": S_gt_s, "check_sSaQ_s_gt_a": s_gt_a, "check_sSaQ_order_quantity_to_params": order_quantity_to_params, } ) def _check_sSbQ(self, row): S_gt_s = row["S"] >= row["s"] + 1 s_gt_b = row["s"] >= row["beta"] + 1 if row["inventory position"] < row["beta"]: order_quantity_to_params = row["order quantity"] == row["Q"] elif row["inventory position"] < row["s"]: order_quantity_to_params = ( row["order quantity"] == row["S"] - row["inventory position"] ) else: order_quantity_to_params = row["order quantity"] == 0 return pd.Series( { "check_sSbQ_S_gt_s": S_gt_s, "check_sSbQ_s_gt_b": s_gt_b, "check_sSbQ_order_quantity_to_params": order_quantity_to_params, } )
37.679458
107
0.557453
1,917
16,692
4.5759
0.14554
0.053352
0.03762
0.052668
0.499886
0.437187
0.400023
0.337323
0.251026
0.209758
0
0.006013
0.332495
16,692
442
108
37.764706
0.781278
0.054278
0
0.22191
0
0
0.155374
0.051199
0
0
0
0.002262
0
1
0.05618
false
0
0.02809
0.008427
0.160112
0.019663
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1fe41f5dc40be297773f566df8109a75b70ca3b8
3,623
py
Python
ch1/tictactoe.py
T0nyX1ang/Reinforcement-Learning
a86ab92ee628b95c7dbe432c079b7ce04b5e982a
[ "MIT" ]
null
null
null
ch1/tictactoe.py
T0nyX1ang/Reinforcement-Learning
a86ab92ee628b95c7dbe432c079b7ce04b5e982a
[ "MIT" ]
null
null
null
ch1/tictactoe.py
T0nyX1ang/Reinforcement-Learning
a86ab92ee628b95c7dbe432c079b7ce04b5e982a
[ "MIT" ]
null
null
null
import random import json class TTTGame(object): def __init__(self): self._board = [0] * 9 self._end = False with open('learning.json', 'r') as f: self._state = json.loads(f.read()) self._alpha = 0.05 def judge(self, state): if (sum(state[0: 3]) == 3 or \ sum(state[3: 6]) == 3 or \ sum(state[6::]) == 3 or \ sum(state[0::3]) == 3 or \ sum(state[1::3]) == 3 or \ sum(state[2::3]) == 3 or \ sum(state[0::4]) == 3 or \ sum(state[2:7:2]) == 3): self._end = True return 1 elif (sum(state[0: 3]) == -3 or \ sum(state[3: 6]) == -3 or \ sum(state[6::]) == -3 or \ sum(state[0::3]) == -3 or \ sum(state[1::3]) == -3 or \ sum(state[2::3]) == -3 or \ sum(state[0::4]) == -3 or \ sum(state[2:7:2]) == -3): self._end = True return 0 elif 0 not in state: self._end = True return 0.5 # can be set to 0 if you need sharper winning criterion. else: self._end = False if str(state) not in self._state: self._state[str(state)] = 0.5 # move state return self._state[str(state)] # study starts from here ... def random_move(self, move_type=-1): self.judge(self._board) if (self._end): return '[End]' empty = [] count = 0 for val in self._board: if (val == 0): empty.append(count) count += 1 select = empty[random.randint(0, len(empty) - 1)] move_board = self._board.copy() move_board[select] = move_type value = self.judge(move_board) self._state[str(self._board)] = self._state[str(self._board)] + self._alpha * (value - self._state[str(self._board)]) # update move self._board = move_board.copy() return select def greedy_move(self, move_type=1): self.judge(self._board) if (self._end): return '[End]' selects = [] max_value = -1 count = 0 for val in self._board: if (val == 0): move_board = self._board.copy() move_board[count] = move_type value = self.judge(move_board) if (value > max_value): selects = [count] max_value = value elif (value == max_value): selects.append(count) count += 1 select = random.sample(selects, 1)[0] move_board = self._board.copy() move_board[select] = move_type value = self.judge(move_board) self._state[str(self._board)] = self._state[str(self._board)] + self._alpha * (value - self._state[str(self._board)]) # update move self._board = move_board.copy() return select def play(self): self._board = [0] * 9 self._end = False while not self._end: s1 = self.greedy_move() s2 = self.random_move() # print('greedy selection:', s1, 'random selection:', s2) def train(self, epoch=1000): for i in range(0, epoch): self.play() def dump_state(self): with open('learning.json', 'w') as f: f.write(json.dumps(self._state)) def pretty_print_board(self): print(self._board[0], self._board[1], self._board[2]) print(self._board[3], self._board[4], self._board[5]) print(self._board[6], self._board[7], self._board[8]) def combat(self): self._board = [0] * 9 self._end = False while not self._end: s1 = self.greedy_move() self.pretty_print_board() print("Winning prob:", self.judge(self._board)) if (self._end): print('You lose / a tie!') break s2 = input('Please enter your move: ') while self._board[int(s2)] != 0: s2 = input('Please enter your move: ') self._board[int(s2)] = -1 self.pretty_print_board() print("Winning prob:", self.judge(self._board)) self.judge(self._board) if (self._end): print('You win!') if __name__ == '__main__': tttg = TTTGame() tttg.combat() tttg.train(100000) tttg.dump_state()
27.037313
134
0.619928
576
3,623
3.71875
0.184028
0.134454
0.039216
0.071895
0.592437
0.562092
0.537815
0.523343
0.495798
0.471522
0
0.037847
0.205079
3,623
133
135
27.240602
0.705903
0.048027
0
0.425
0
0
0.042139
0
0
0
0
0
0
1
0.075
false
0
0.016667
0
0.166667
0.083333
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1fe6e5bdf88233acf9a9c841722eff52d327f1f2
13,160
py
Python
Server.py
HackintoshwithUbuntu/Python-Chat-App
d5af370e33a092c52702efed6b1074d458c593ac
[ "MIT" ]
2
2021-08-30T03:19:10.000Z
2021-09-06T21:51:02.000Z
Server.py
HackintoshwithUbuntu/Python-Chat-App
d5af370e33a092c52702efed6b1074d458c593ac
[ "MIT" ]
null
null
null
Server.py
HackintoshwithUbuntu/Python-Chat-App
d5af370e33a092c52702efed6b1074d458c593ac
[ "MIT" ]
null
null
null
# Imports import socket # Communication import threading # Communication with multiple users at once import pickle # Serialising data import hashlib # Hashing passwords from Crypto.Cipher import AES # AES encryption algorithms from Crypto.Random import get_random_bytes # For generating random keys and nonces # A list of codes used in this program to prefix messages, so client knows their meaning ''' ______________________________________ | CODE | MEANING | |____________________________________| ? | Signup | ! | Signin | $ | Control | @ | Direct Message | ^ | Everyone Message | * | Request list | + | New user online | - | User logged off | = | Request pics dict | p | New profile pic | _____________________________________| ''' # A dictionary storing usernames and passwords logins = {} # dictionary to store corresponding socket to username record = {} # dictionary to username to socket records = {} # dictionary to store username to server key keys = {} # Dictionary storing profile pictures pics = {} # List to keep track of socket descriptors connected_list = [] # A dictionary for working with logins (note: this is just so we can use the data in the file) loginss = {} # Starting the server socket s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Note: code skips to end as these function are not used until later # A custom made function for sending double-layer encyrpted data to clients def send_to_client(clientsocket, message, key): # encrypt with our own key, they decrypt with ours # Serialising message so it can be encrypted msg = pickle.dumps(message) # Creating a new cipher cipher = AES.new(key, AES.MODE_EAX) # Ciphering the data # NOTE: WE ARE USING A RANDOMLY GENERATED NONCE, for second layer encryption ciphered_data, tag = cipher.encrypt_and_digest(msg) # Packing the data together and serialising it again so it can be sent tosend = [cipher.nonce, tag, ciphered_data] tosend = pickle.dumps(tosend) # Send packaged data clientsocket.send(tosend) return # A custom function to recieve client data, then decrypt, then verify def client_receive(clientsocket, otherkey): # Receive data msg = clientsocket.recv(2048) # Making sure client hasn't disconnected if not msg: return "disconnect" else: # Seperating packaged data msg = pickle.loads(msg) noonce = msg[0] tag = msg[1] data = msg[2] # Creating cipher for decryption cipher = AES.new(otherkey, AES.MODE_EAX, noonce) # Verifying integrity of data using a tag msg = cipher.decrypt_and_verify(data, tag) # Deserialising data msg = pickle.loads(msg) return msg # A custom function for sending data to all clients, except sender def send_all(sender, message): for i in connected_list: if i == sender: continue # Finding the socket receiversoc = records[i] # Send data using above function send_to_client(receiversoc, message, keys[i]) # A custom function for sending a message to all users def msg_all(message, sender): # Constructing so client knows what this message is construct = "^"+ sender + " " + message # Send data using above function send_all(sender, construct) # A custom function for telling all clients about a new logon def new_online(user): # Construciting construct = '+' + user # Sending to all using function send_all(user, construct) # A custom function to check if a file exists without throwing errors def file_exists(name): filename = name + ".txt" try: my_file = open(filename) my_file.close() return True except: return False # A utility function to allow quick updating of saved passwords and profile pictures def updatefile(name, obj): # Open file with open(name, 'wb+') as file: # Dump new data pickle.dump(obj, file) # The main function for communicating with clients on a new thread # This handles most work and messaging duties # NOTE: this is run on one thread per client def on_new_client(clientsocket,addr): # A string for storing username username = '' # Encryption Handshake print("NETOWRK: Attempting handshake with: " + addr[0] + ":" + str(addr[1])) # Generating a new COMPLETELY RANDOM key key = get_random_bytes(16) # Exchanging (not secure) clientsocket.send(key) # Receiving other key otherkey = clientsocket.recv(1024) # Printing it on console print("NETWORK: Server key: " + str(key) + ", "+ str(addr[0]) + ":" + str(addr[1]) + " key:", str(otherkey)) # Wrapped in try except to detect logging off of users try: # Attempting sign in and sing up while True: # Receive data login = client_receive(clientsocket, otherkey) print("DEBUG: login / signup attempt", login) # Making sure the client hasn't disconnected if login == "disconnect": clientsocket.close() break # Splitting username and password, clients have already validated input user, passw = login[1:].split() passw = passw.encode("utf-8") # Hashing the password passw = hashlib.sha1(passw) # Storing hashed password in hex form passw = passw.hexdigest() print("DEBUG: Hashed password is: " + str(passw)) # if sign up else if login attempt if(login[0] == '?'): # Creating an account # If user hasn't already signed up if user not in loginss: # Store username and password combo in memory loginss[user] = passw; # Tell the client send_to_client(clientsocket, "$success-signup", key) # Give them default profile pic pics[user] = 0 # Update relevant storage updatefile("loginss.txt", loginss) updatefile("pic.txt", pics) print("USERS:", user, "signed up") else: # Else tell them they failed send_to_client(clientsocket, "$fail-signup", key) print("USERS: Received failed signup") continue elif(login[0] == '!'): # Logging in # In a try except to prevent key errors try: if(loginss[user] == passw): # This is a successful login # Marking such on server username = user # Tell the client send_to_client(clientsocket, "$success-login", key) print("USERS:", username, "signed in") break else: # Unsuccessful login # Tell them they failed send_to_client(clientsocket, "$fail-login", key) except: # Probably key error, they need to sign up first # Tell them they failed send_to_client(clientsocket, "$fail-login", key) # Only if they have logged in successfully if(username != ''): # If they are not connected (should be almost always) if username not in connected_list: # mark thier username as conncted connected_list.append(username) # Tell clients about new profile picture and new client username send_all(username, "p"+str(pics[username])+" "+username) new_online(username) print("USERS: Sent", username, "is online") # Record sockets and keys for easy access by utility functions record[clientsocket] = username records[username] = clientsocket keys[username] = key # Listen and act until told not to while True: # Receive using function msg = client_receive(clientsocket, otherkey) # Make sure client hasnt disconnected if msg == "disconnect": # If they have tell other clients and remove them from lists connected_list.remove(username) del keys[username] clientsocket.close() send_all("", "-"+username) print("Users: " + username + " quit") break # Interpreting comands from clients using codes from the table at the top if msg[0] == '@': # Split message recievername = msg[1:].split(" ", 1) # Determine sockets and keys receiversoc = records[recievername[0]] reckey = keys[recievername[0]] # Create message tosend = "@" + username + " " + recievername[1] print("MESSAGES: " + username + " SENT " + recievername[1] + " TO " + recievername[0]) # Send send_to_client(receiversoc, tosend, reckey) elif msg[0] == '^': # Determine sendername sendername = record[clientsocket] # Remove whitespace tosend = msg[1:].strip() print("MESSAGES: " + sendername + " SENT " + tosend + " TO ALL USERS") # Send to all using function msg_all(tosend, sendername) elif msg[0] == '*': # If request connected list, send list print("DEBUG:", username, "requested list") send_to_client(clientsocket, connected_list, key) elif msg[0] == 'p': # Determine sendername sendername = record[clientsocket] # Update memory list and file pics[sendername] = msg[1] updatefile("pic.txt", pics) # Tell other clients of updated picture send_all('', msg + " " + sendername) print("USERS:", sendername, "changed their profile picture to:", msg[1]) elif msg[0] == '=': # If request pic dict, send pic dict print("DEBUG:", username, "requested pics dict") send_to_client(clientsocket, pics, key) except: # This is usually a logoff try: # This is when they are registered and logged in clientsocket.close() connected_list.remove(username) del keys[username] # Tell other clients send_all("", "-"+username) print("USERS: " + username + " quit") except: # If they arn't registered, the above code will have already closed the socket, so just record and quit print("USERS: Non-Authenicated user quit") # Code skips to here # Check if both files exist and populate memory with their contents it they do # If they don't, set memory contents to empty and create files # Also log it at the end, so the server runner knows what just happened if file_exists("loginss") == False: file = open("loginss.txt", "w+") file.close() with open('loginss.txt', 'rb') as file: try: loginss = pickle.load(file) except: print("DEBUG: Failed reading file (the login file is probably empty, no need to worry)") if file_exists("pic") == False: file = open("pic.txt", "w+") file.close() with open('pic.txt', 'rb') as file: try: pics = pickle.load(file) except: print("DEBUG: Failed reading file (the pic file is probably empty, no need to worry)") # Telling the host that it doesn't need to filter ips host = '' # Setting the port port = 443 # Bind to the port s.bind((host, port)) # Allow up to ten messages stcked up s.listen(10) # Now wait for client connection. print("DEBUG: Started on:", (host, port)) print("DEBUG: Ready for clients") while True: # Blocking call, waits to accept a connection conn, addr = s.accept() # Log it print("NETWORK: Connected to " + addr[0] + ":" + str(addr[1])) # Start a new thread to new client threading.Thread(target=on_new_client, args=(conn,addr)).start() print("\nDEBUG: Started new thread") # Main thread continues listening loop to assingn new threads to new clients # In the rare case we are here, close down the server socket gracefully and then quit s.close()
38.820059
115
0.572188
1,491
13,160
4.932931
0.262911
0.008973
0.016315
0.026105
0.13637
0.090551
0.076683
0.055201
0.034534
0.02828
0
0.005237
0.347036
13,160
339
116
38.820059
0.850692
0.336018
0
0.234637
0
0
0.104252
0
0
0
0
0
0
1
0.044693
false
0.039106
0.03352
0
0.106145
0.122905
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1fec0bf47c009cdb0ca6fac21df153c55c6c1431
46,269
py
Python
bot/utils/trackmania.py
NottCurious/TMIndiaBot
824c171fa2f41aa21631796c384f70a34a721364
[ "MIT" ]
1
2022-02-12T16:40:17.000Z
2022-02-12T16:40:17.000Z
bot/utils/trackmania.py
NottCurious/TMIndiaBot
824c171fa2f41aa21631796c384f70a34a721364
[ "MIT" ]
78
2021-10-14T05:32:54.000Z
2022-01-21T09:22:37.000Z
bot/utils/trackmania.py
NottCurious/TMIndiaBot
824c171fa2f41aa21631796c384f70a34a721364
[ "MIT" ]
null
null
null
import asyncio import json import os import shutil import typing from datetime import datetime, timezone, timedelta from matplotlib import pyplot as plt import cv2 import country_converter as coco import flag import requests import discord from bot.api import APIClient from bot.log import get_logger from bot.utils.commons import Commons from bot.utils.database import Database from bot.utils.discord import EZEmbed log = get_logger(__name__) class TrackmaniaUtils: """Functions relating to a specific Trackmania player who is given while creating the object""" def __init__(self, username: str): self.username = username self.api_client = APIClient() async def close(self): """Closes the API Client""" await self.api_client.close() return async def get_id(self) -> str: """Gets the ID of the Player from the API Raises: NotAValidUsername: If the username is not valid, this exception is raised. Returns: str: The ID of the player """ log.debug("Checking if the ID is in the file") id = Database.retrieve_id(self.username) if id is None: log.debug("Getting the data from the TMIndiaBotAPI") id_data = await self.api_client.get( f"http://localhost:3000/tm2020/player/{self.username}/id", raise_for_status=False, ) try: id = id_data["id"] except KeyError: id = None log.debug("Storing the Username and ID to the file") Database.store_id(self.username, id) else: log.debug("Username exists in file") return id async def get_player_data( self, player_id: str ) -> typing.Union[list, discord.Embed, None]: """Gets the player data as a list of embeds Page 1 contains the Zone, Zone Ranks and Metadata of the player Page 2 contains the Matchmaking and Royal Data Page 3 contains the individual trophy counts Args: player_id (str): The player's id Returns: typing.Union[list, discord.Embed, None]: The player data in a list of 3 embed. If the player does not exist, returns a single error embed. """ log.debug(f"Getting Data for {player_id}") raw_player_data = await self.api_client.get( f"http://localhost:3000/tm2020/player/{player_id}" ) log.debug("Getting Player Flag Unicode") player_flag_unicode = self._get_player_country_flag(raw_player_data) log.debug(f"Got Player Unicode flag -> {player_flag_unicode}") display_name = raw_player_data["displayname"] log.debug(f"Display Name is {display_name}") log.debug("Checking if Player has Played the Game") if raw_player_data["trophies"]["points"] == 0: return [ EZEmbed.create_embed( title=f"{player_flag_unicode} {display_name} has never played Trackmania 2020", color=0xFF0000, ) ] log.debug("Creating Two Embeds") page_one = EZEmbed.create_embed( title=f"Player Data for {player_flag_unicode} {display_name} - Page 1", color=Commons.get_random_color(), ) page_two = EZEmbed.create_embed( title=f"Player Data for {player_flag_unicode} {display_name} - Page 2", color=Commons.get_random_color(), ) page_three = EZEmbed.create_embed( title=f"Player Data for {player_flag_unicode} {display_name} - Page 3", color=Commons.get_random_color(), ) zones, zone_ranks = self._get_zones_and_positions(raw_player_data) royal_data = self._get_royal_data(raw_player_data) matchmaking_data = self._get_matchmaking_data(raw_player_data) trophy_count = self._get_trophy_count(raw_player_data) log.debug("Adding Zones and Zone Ranks to Page One") page_one.add_field(name="Zones", value=zones, inline=False) page_one.add_field(name="Zone Ranks", value=zone_ranks, inline=False) log.debug("Adding Matchmaking and Royal Data to Page Two") page_two.add_field(name="Matchmaking", value=matchmaking_data, inline=False) page_two.add_field(name="Royal", value=royal_data, inline=False) log.debug("Adding Trophy Count to Page Three") page_three.add_field(name="Trophy Count", value=trophy_count, inline=False) try: log.debug("Adding Meta Data to Page One") page_one = self._add_meta_details(page_one, raw_player_data) log.debug("Added Meta Data to Page One") except: log.debug("Player does not have Meta Data") log.debug(f"Returning {page_one}, {page_two} and {page_three}") return [page_one, page_two, page_three] async def get_cotd_data(self, user_id: str) -> discord.Embed: log.debug(f"Requesting COTD Data for {user_id} (Username: {self.username})") cotd_data = await self.api_client.get( f"http://localhost:3000/tm2020/player/{user_id}/cotd" ) try: if cotd_data["error"]: log.critical(f"{self.username} has never played a cotd") return ( EZEmbed.create_embed( title="This player has never played a COTD", colour=0xFF0000 ), None, ) except: pass log.debug("Parsing Best Rank Overall Data") best_rank_overall = COTDUtil.get_best_rank_overall(cotd_data) best_div_overall = COTDUtil.get_best_div_overall(cotd_data) best_div_rank_overall = COTDUtil.get_best_div_rank_overall(cotd_data) log.debug("Parsed Best Rank Overall Data") log.debug("Parsing Best Rank Primary Data") best_rank_primary = COTDUtil.get_best_rank_primary(cotd_data) best_div_primary = COTDUtil.get_best_div_primary(cotd_data) best_div_rank_primary = COTDUtil.get_best_div_rank_primary(cotd_data) log.debug("Parsed Best Rank Primary Data") log.debug("Parsing Average Rank Overall Data") average_rank_overall = COTDUtil.get_average_rank_overall(cotd_data) average_div_overall = COTDUtil.get_average_div_overall(cotd_data) average_div_rank_overall = COTDUtil.get_average_div_rank_overall(cotd_data) log.debug("Parsed Average Rank Overall Data") log.debug("Parsing Average Rank Primary Data") average_rank_primary = COTDUtil.get_average_rank_primary(cotd_data) average_div_primary = COTDUtil.get_average_div_primary(cotd_data) average_div_rank_primary = COTDUtil.get_average_div_rank_primary(cotd_data) log.debug("Parsed Average Rank Primary Data") log.debug("Creating Strings for Embed") best_data_overall = f"```Best Rank: {best_rank_overall}\nBest Div: {best_div_overall}\nBest Rank in Div: {best_div_rank_overall}\n```" best_data_primary = f"```Best Rank: {best_rank_primary}\nBest Div: {best_div_primary}\nBest Rank in Div: {best_div_rank_primary}\n```" average_data_overall = f"```Average Rank: {average_rank_overall}\nAverage Div: {average_div_overall}\nAverage Rank in Div: {average_div_rank_overall}\n```" average_data_primary = f"```Average Rank: {average_rank_primary}\nAverage Div: {average_div_primary}\nAverage Rank in Div: {average_div_rank_primary}\n```" log.debug("Created Strings for Embed") log.debug("Creating Embed Page") cotd_data_embed = EZEmbed.create_embed( title=f"COTD Data for {self.username}", color=Commons.get_random_color() ) log.debug("Created Embed Page") log.debug("Adding Fields") cotd_data_embed.add_field( name="Best Data Overall", value=best_data_overall, inline=False ) cotd_data_embed.add_field( name="Best Data Primary (No Reruns)", value=best_data_primary, inline=False ) cotd_data_embed.add_field( name="Average Data Overall", value=average_data_overall, inline=False ) cotd_data_embed.add_field( name="Average Data Primary (No Reruns)", value=average_data_primary, inline=False, ) log.debug("Added Fields") cotd_data_embed.set_footer( text="This function does not include COTDs where the player has left after the 15mins qualifying" ) log.debug("Getting Rank Data for Plots") ranks_overall = COTDUtil.get_list_of_ranks_overall(cotd_data) ranks_primary = COTDUtil.get_list_of_ranks_primary(cotd_data) log.debug("Getting IDs of Ranks for Plots") dates_overall = COTDUtil.get_list_of_dates_overall(cotd_data) dates_primary = COTDUtil.get_list_of_dates_primary(cotd_data) log.debug("Getting IDs for Plot") ids_overall = COTDUtil.get_list_of_ids_overall(cotd_data) ids_primary = COTDUtil.get_list_of_ids_primary(cotd_data) log.debug("Creating Plots for Ranks Overall and Ranks Primary") # Use Threading here log.debug("Creating Plot for Overall") COTDUtil._create_rank_plot( ranks=ranks_overall, dates=dates_overall, ids=ids_overall, plot_name="Overall Ranks (With Reruns)", image_name="overallranks", ) log.debug("Creating Plot for Primary") COTDUtil._create_rank_plot( ranks=ranks_primary, dates=dates_primary, ids=ids_primary, plot_name="Primary Rank Graph (No Reruns)", image_name="primaryranks", ) log.debug("Concatenating Both Graphs into One") COTDUtil._concat_graphs() log.debug("Opening Concatenated Graphs") image = discord.File( "./bot/resources/temp/concatenated_graphs.png", filename="concatenated_graphs.png", ) log.debug("Opened Concatenated graphs") log.debug("Adding the Image to the Embed") cotd_data_embed.set_image(url="attachment://concatenated_graphs.png") return cotd_data_embed, image def _get_player_country_flag(self, raw_player_data: dict): """Gets the country that the player is from as unicode characters""" log.debug("Getting Zones") try: zone_one = raw_player_data["trophies"]["zone"]["name"] zone_two = raw_player_data["trophies"]["zone"]["parent"]["name"] log.debug(f"Zones -> {zone_one} and {zone_two}") continents = ( "Asia", "Middle East", "Europe", "North America", "South America", "Africa", ) if zone_two in continents: log.debug("Only First Zone is Required") iso_letters = coco.convert(names=[zone_one], to="ISO2") unicode_letters = flag.flag(iso_letters) else: log.debug("Need to use Zone Two") iso_letters = coco.convert(names=[zone_two], to="ISO2") unicode_letters = flag.flag(iso_letters) log.debug(f"Unicode Letters are {unicode_letters}") return unicode_letters except: log.error("Player has never played Trackmania 2020") return ":flag_white:" def _get_royal_data(self, raw_player_data: dict) -> str: """Gets the royal data of the player as a string""" log.debug("Getting Player Data") try: royal_data = raw_player_data["matchmaking"][1] rank = royal_data["info"]["rank"] wins = royal_data["info"]["progression"] current_div = royal_data["info"]["division"]["position"] if wins != 0: progression_to_next_div = ( round( (wins - royal_data["info"]["division"]["minwins"]) / ( royal_data["info"]["division"]["maxwins"] - royal_data["info"]["division"]["minwins"] + 1 ), 4, ) * 100 ) else: log.debug("Player Has Not Won a Single Royal Match") progression_to_next_div = "0" log.debug( f"Creating Royal Data String with {rank}, {wins}, {current_div} and {progression_to_next_div}" ) royal_data_string = f"```Rank: {rank}\nWins: {wins}\nCurrent Division: {current_div}\nProgression to Next Division: {progression_to_next_div}%```" log.debug(f"Created Royal Data String -> {royal_data_string}") return royal_data_string except: return ( "An Error Occured While Getting Royal Data, Player has not played Royal" ) def _get_matchmaking_data(self, raw_player_data: dict) -> str: """Gets the matchmaking data of the player as a string""" log.debug("Getting Matchmaking Data") try: matchmaking_data = raw_player_data["matchmaking"][0] rank = matchmaking_data["info"]["rank"] score = matchmaking_data["info"]["score"] current_div = int(matchmaking_data["info"]["division"]["position"]) log.debug("Opening the MM Ranks File") with open( "./bot/resources/json/mm_ranks.json", "r", encoding="UTF-8" ) as file: mm_ranks = json.load(file) current_div = mm_ranks["rank_data"][str(current_div - 1)] log.debug("Calculating Progression to Next Division") progression_to_next_div = ( round( (score - matchmaking_data["info"]["division"]["minpoints"]) / ( matchmaking_data["info"]["division"]["maxpoints"] - matchmaking_data["info"]["division"]["minpoints"] + 1 ), 4, ) * 100 ) log.debug( f"Creating Matchmaking Data String with {rank}, {score}, {current_div}, {progression_to_next_div}" ) matchmaking_data_string = f"```Rank: {rank}\nScore: {score}\nCurrent Division: {current_div}\nProgression to Next Division: {progression_to_next_div}%```" log.debug(f"Created Matchmaking Data String -> {matchmaking_data_string}") return matchmaking_data_string except: log.error("Player has never Played Matchmaking") return "An error Occured While Getting Matchmaking Data, Player has not played Matchmaking" def _get_trophy_count(self, raw_player_data: dict) -> str: """The trophy counts as a string""" log.debug("Getting Trophy Counts") trophy_count_string = "```\n" log.debug("Adding Total Points") total_points = Commons.add_commas(raw_player_data["trophies"]["points"]) trophy_count_string += f"Total Points: {total_points}\n\n" log.debug(f"Added Total Points -> {total_points}") for i, trophy_count in enumerate(raw_player_data["trophies"]["counts"]): trophy_count_string = ( trophy_count_string + f"Trophy {i + 1}: {trophy_count}\n" ) trophy_count_string += "```" log.debug(f"Final Trophy Count -> {trophy_count_string}") return trophy_count_string def _get_zones_and_positions(self, raw_player_data) -> str: """ Converts raw_player_data into location and their ranks """ ranks_string = "" log.debug("Getting Zones") zone_one = raw_player_data["trophies"]["zone"]["name"] zone_two = raw_player_data["trophies"]["zone"]["parent"]["name"] zone_three = raw_player_data["trophies"]["zone"]["parent"]["parent"]["name"] try: zone_four = raw_player_data["trophies"]["zone"]["parent"]["parent"][ "parent" ]["name"] except: zone_four = "" log.debug(f"Got Zones -> {zone_one}, {zone_two}, {zone_three}, {zone_four}") log.debug("Getting Position Data") raw_zone_positions = raw_player_data["trophies"]["zonepositions"] zone_one_position = raw_zone_positions[0] zone_two_position = raw_zone_positions[1] zone_three_position = raw_zone_positions[2] if zone_four != "": zone_four_position = raw_zone_positions[3] else: zone_four_position = -1 log.debug("Got Position Data") log.debug("Making string for position data") ranks_string = "```\n" ranks_string += f"{zone_one} - {zone_one_position}\n" ranks_string += f"{zone_two} - {zone_two_position}\n" ranks_string += f"{zone_three} - {zone_three_position}\n" if zone_four != "": ranks_string += f"{zone_four} - {zone_four_position}\n" ranks_string += "```" log.debug(f"Final Ranks String is {ranks_string}") log.debug("Creating Zones String") zones_string = f"```\n{zone_one}, {zone_two}, {zone_three}" if zone_four != "": zones_string += f", {zone_four}" zones_string += "\n```" return zones_string, ranks_string def _add_meta_details( self, player_page: discord.Embed, raw_player_data: dict, ) -> discord.Embed: """Adds the Metadata of a player to the first page of the embed Args: player_page (discord.Embed): the first page of player details raw_player_data (dict): player data from the api Returns: discord.Embed: First page of the embed after metadata has been added """ log.debug("Adding Meta Details for Player") meta_data = raw_player_data["meta"] try: log.debug("Checking if Player has Twitch") twitch_name = meta_data["twitch"] player_page.add_field( name="[<:twitch:895250576751853598>] Twitch", value=f"[{twitch_name}](https://twitch.tv/{twitch_name})", inline=True, ) log.debug("Twitch Added for Player") except: log.debug("Player does not have a Twitch Account Linked to TMIO") try: log.debug("Checking if Player has Twitter") twitter_name = meta_data["twitter"] player_page.add_field( name="[<:twitter:895250587157946388>] Twitter", value=f" [{twitter_name}](https://twitter.com/{twitter_name})", inline=True, ) log.debug("Twitter Added for Player") except: log.debug("Player does not have a Twitter Account Linked to TMIO") try: log.debug("Checking if Player has YouTube") youtube_link = meta_data["youtube"] player_page.add_field( name="[<:youtube:895250572599513138>] YouTube", value=f"[YouTube](https://youtube.com/channel/{youtube_link})", inline=True, ) log.debug("YouTube Added for Player") except: log.debug("Player does not have a YouTube Account Linked to TMIO") log.debug("Adding TMIO") display_name = raw_player_data["displayname"] player_id = raw_player_data["accountid"] player_page.add_field( name="TMIO", value=f"[{display_name}](https://trackmania.io/#/player/{player_id})", ) try: log.debug("Checking if TMGL Player") if meta_data["tmgl"] is True: player_page.add_field( name="TMGL", value="This Player Participates in TMGL", inline=True ) log.debug("Added TMGL Field") except: log.debug("Player does not participate in TMGL") log.debug("Added TMIO Link") log.debug(f"Returning {player_page}") return player_page class TOTDUtils: @staticmethod def _download_thumbail(url: str) -> None: """Downloads the Thumbnail from Nadeo's API and stores in `./bot/resources/temp/totd.png`""" if os.path.exists("./bot/resources/temp/totd.png"): log.debug("Thumbnail already downloaded") return req = requests.get(url, stream=True) if req.status_code == 200: log.debug("Image was retrieved succesfully") req.raw.decode_content = True log.debug("Saving Image to File") with open("./bot/resources/temp/totd.png", "wb") as file: shutil.copyfileobj(req.raw, file) else: log.critical("Image could not be retrieved") @staticmethod def _parse_mx_tags(self, tags: str) -> str: """Parses Maniaexchange tags to their strings Args: tags (str): The tags as a string of `ints` Returns: str: The tags as a string of `strings` """ log.debug(f"Tags -> {tags}") log.debug("Removing Spaces") tags.replace(" ", "") log.debug(f"Without Spaces -> {tags}") tags = tags.split(",") tag_string = "" with open("./bot/resources/json/mxtags.json", "r") as file: mxtags = json.load(file)["mx"] for i, tag in enumerate(tags): log.debug(f"Converting {tag}") for j in range(len(mxtags)): if int(tag) == int(mxtags[j]["ID"]): tag_string = tag_string + mxtags[j]["Name"] + ", " log.debug(f"Tag String -> {tag_string}") return tag_string[:-2] @staticmethod async def today(): """The data of the current day's totd""" log.info("Creating an API Client") api_client = APIClient() log.info("Created an API Client") log.debug("Getting TOTD Data from API") totd_data = await api_client.get("http://localhost:3000/tm2020/totd/latest") log.debug("Parsing TOTD Data") map_name = totd_data["name"] author_name = totd_data["authorplayer"]["name"] thumbnail_url = totd_data["thumbnailUrl"] author_time = Commons.format_seconds(int(totd_data["authorScore"])) gold_time = Commons.format_seconds(int(totd_data["goldScore"])) silver_time = Commons.format_seconds(int(totd_data["silverScore"])) bronze_time = Commons.format_seconds(int(totd_data["bronzeScore"])) nadeo_uploaded = totd_data["timestamp"] wr_holder = totd_data["leaderboard"]["tops"][0]["player"]["name"] wr_time = Commons.format_seconds( int(totd_data["leaderboard"]["tops"][0]["time"]) ) tmio_id = totd_data["mapUid"] log.debug("Parsed TOTD Data") log.debug("Parsing Download Link") download_link = totd_data["fileUrl"] log.debug("Parsed Download Link") log.debug("Parsing Time Uploaded to Timestamp") nadeo_timestamp = ( datetime.strptime(nadeo_uploaded[:-6], "%Y-%m-%dT%H:%M:%S") .replace(tzinfo=timezone.utc) .timestamp() ) log.debug("Parsed Time Uploaded to Timestamps") log.debug("Creating Strings from Parsed Data") medal_times = f"<:author:894268580902883379> {author_time}\n<:gold:894268580970004510> {gold_time}\n<:silver:894268580655411220> {silver_time}\n<:bronze:894268580181458975> {bronze_time}" world_record = f"Holder: {wr_holder}\nTime: {wr_time}" nadeo_uploaded = f"<t:{int(nadeo_timestamp)}:R>" log.debug("Created Strings from Parsed Data") log.debug( "Getting Map Thumbnail\nChecking if map Thumbnail has Already been Downloaded" ) if not os.path.exists("./bot/resources/temp/totd.png"): log.critical("Map Thumbail has not been downloaded") TOTDUtils._download_thumbail(thumbnail_url) log.debug("Parsing TM Exchange Data") try: mania_tags = totd_data["exchange"]["Tags"] mx_uploaded = totd_data["exchange"]["UploadedAt"] tmx_code = totd_data["exchange"]["TrackID"] try: mx_dt = datetime.strptime(mx_uploaded[:-3], "%Y-%m-%dT%H:%M:%S") except ValueError: mx_dt = datetime.strptime(mx_uploaded[:-4], "%Y-%m-%dT%H:%M:%S") mx_timestamps = mx_dt.replace(tzinfo=timezone.utc).timestamp() mx_uploaded = f"<t:{int(mx_timestamps)}:R>" except: log.critical("Map has never been uploaded to trackmania.exchange") log.debug("Creating Embed") current_day = datetime.now(timezone(timedelta(hours=5, minutes=30))).strftime( "%d" ) current_month = datetime.now(timezone(timedelta(hours=5, minutes=30))).strftime( "%B" ) # Add Day Suffix if int(current_day) % 10 == 1: day_suffix = "st" elif int(current_day) % 10 == 2: day_suffix = "nd" elif int(current_day) % 10 == 3: day_suffix = "rd" else: day_suffix = "th" embed = EZEmbed.create_embed( title=f"Here is the {current_day}{day_suffix} {current_month} TOTD", color=Commons.get_random_color(), ) log.debug("Creating Image File") image = discord.File("./bot/resources/temp/totd.png", filename="totd.png") embed.set_image(url="attachment://totd.png") embed.add_field(name="Map Name", value=map_name, inline=False) embed.add_field(name="Author", value=author_name, inline=True) try: embed.add_field( name="Tags", value=TOTDUtils._parse_mx_tags(mania_tags), inline=False ) except: pass embed.add_field( name="Time Uploaded to Nadeo server", value=nadeo_uploaded, inline=False ) try: embed.add_field(name="Time Uploaded to TMX", value=mx_uploaded, inline=True) except: pass embed.add_field(name="Medal Times", value=medal_times, inline=False) embed.add_field(name="Word record", value=world_record, inline=False) tmio_link = f"https://trackmania.io/#/leaderboard/{tmio_id}" try: tmx_link = f"https://trackmania.exchange/maps/{tmx_code}/" except: tmx_link = None log.debug("Created Embed") log.info("Closing the API Client") await api_client.close() log.info("Closed the API Embed") return embed, image, download_link, tmio_link, tmx_link class Leaderboards: @staticmethod def get_campaign_ids(year: str = "2021", season: str = "Fall") -> list[str]: """Gets a list of all campaign ids for a given year and season Args: year (str, optional): The year of the season. Defaults to "2021". season (str, optional): The season itself. Defaults to "Fall". Returns: list[str]: List of ids """ log.debug(f"Opening {year}/{season.lower()} Data File") with open( f"./bot/resources/json/campaign/{year}/{season.lower()}.json", "r", encoding="UTF-8", ) as file: file_data = json.load(file) id_list = file_data["ids"] log.debug("Not Ignoring First Five Maps") return id_list @staticmethod async def update_campaign_leaderboards( id_list: list[str], year: str = "2021", season: str = "Fall", skip_first_five: bool = False, ): """Updates the leaderboard files for the campaign Args: id_list (list[str]): Campaign map id list year (str, optional): The year of the season. Defaults to "2021" season (str, optional): The season itself. Defaults to "Fall". """ log.info("Creating APIClient for Updating Campaign Leaderboards") api_client = APIClient() log.info("Created APIClient for Updating Campaign Leaderboards") for i, id in enumerate(id_list): leaderboard_data = [] log.debug("Getting Data from API") leaderboard_data = await api_client.get( f"http://localhost:3000/tm2020/leaderboard/{id}/5" ) log.debug("Got Data from API") with open( f"./bot/resources/leaderboard/{year}/{season.lower()}/{i + 1}.json", "w", encoding="UTF-8", ) as file: log.debug(f"Dumping Data to File -> {year}>{season}>{i+1}") json.dump(leaderboard_data, file, indent=4) log.debug("Sleeping for 10s") # time.sleep(10) await asyncio.sleep(10) log.debug(f"Finished Map #{i + 1}") await api_client.close() @staticmethod def get_player_list(map_no: str, year: str = "2021", season: str = "Fall"): log.debug(f"Opening File, Map No -> {map_no}") with open( f"./bot/resources/leaderboard/{year}/{season.lower()}/{map_no}.json", "r", encoding="UTF-8", ) as file: data = json.load(file) player_list = [] log.debug("Appending Players") for player in data: player_list.append((player["player"]["name"], player["position"])) return player_list @staticmethod def get_player_good_maps( player_name: str, year: str = "2021", season: str = "Fall" ) -> discord.Embed: log.debug(f"Getting Player Details for Player name -> {player_name}") player_embed = EZEmbed.create_embed( title=f"{player_name} is good at the following maps", color=Commons.get_random_color(), ) t100_str, t200_str, t300_str, t400_str, t500_str = "", "", "", "", "" for i in range(6, 26, 1): player_list = Leaderboards.get_player_list(str(i), year, season.lower()) for player_tuple in player_list: if player_tuple[0].lower() == player_name.lower(): if int(player_tuple[1]) <= 100: log.debug(f"{player_name} is a top 100 player for Map {i}") t100_str = ( t100_str + str(i) + " - " + str(player_tuple[1]) + "\n" ) elif int(player_tuple[1]) <= 200 and int(player_tuple[1]) > 100: log.debug(f"{player_name} is a top 200 player for Map {i}") t200_str = ( t200_str + str(i) + " - " + str(player_tuple[1]) + "\n" ) elif int(player_tuple[1]) <= 300 and int(player_tuple[1]) > 200: log.debug(f"{player_name} is a top 300 player for Map {i}") t300_str = ( t300_str + str(i) + " - " + str(player_tuple[1]) + "\n" ) elif int(player_tuple[1]) <= 400 and int(player_tuple[1]) > 300: log.debug(f"{player_name} is a top 400 player for Map {i}") t400_str = ( t400_str + str(i) + " - " + str(player_tuple[1]) + "\n" ) elif int(player_tuple[1]) <= 500 and int(player_tuple[1]) > 400: log.debug(f"{player_name} is a top 500 player for Map {i}") t500_str = ( t500_str + str(i) + " - " + str(player_tuple[1]) + "\n" ) if t100_str != "": log.debug(f"Appending T100 String for {player_name}") player_embed.add_field( name="**Top 100**", value="```" + t100_str + "```", inline=False ) else: log.debug("Player does not have any top 100 ranks") player_embed.add_field( name="**Top 100**", value="Player does not have any top 100 times for maps 06-25", inline=False, ) if t200_str != "": log.debug(f"Appending Top 100 String for {player_name}") player_embed.add_field( name="**Top 200**", value="```" + t200_str + "```", inline=False ) else: log.debug("Player does not have any top 200 ranks") player_embed.add_field( name="**Top 200**", value="Player does not have any top 200 times for maps 06-25", inline=False, ) if t300_str != "": log.debug(f"Appending Top 100 String for {player_name}") player_embed.add_field( name="**Top 300**", value="```" + t300_str + "```", inline=False ) else: log.debug("Player does not have any top 300 ranks") player_embed.add_field( name="**Top 300**", value="Player does not have any top 300 times for maps 06-25", inline=False, ) if t400_str != "": log.debug(f"Appending Top 100 String for {player_name}") player_embed.add_field( name="**Top 400**", value="```" + t400_str + "```", inline=False ) else: log.debug("Player does not have any top 400 ranks") player_embed.add_field( name="**Top 400**", value="Player does not have any top 400 times for maps 06-25", inline=False, ) if t500_str != "": log.debug(f"Appending Top 100 String for {player_name}") player_embed.add_field( name="**Top 500**", value="```" + t500_str + "```", inline=False ) else: log.debug("Player does not have any top 500 ranks") player_embed.add_field( name="**Top 500**", value="Player does not have any top 500 times for maps 06-25", inline=False, ) return player_embed class COTDUtil: @staticmethod def get_best_rank_primary(cotd_data) -> int: log.debug( "Getting Best Primary Best Rank -> {}".format( cotd_data["stats"]["bestprimary"]["bestrank"] ) ) return cotd_data["stats"]["bestprimary"]["bestrank"] @staticmethod def get_best_div_primary(cotd_data) -> int: log.debug( "Getting Primary Best Div -> {}".format( cotd_data["stats"]["bestprimary"]["bestdiv"] ) ) return cotd_data["stats"]["bestprimary"]["bestdiv"] @staticmethod def get_best_rank_primary_time(cotd_data) -> int: log.debug( "Getting the time of Primary Best -> {}".format( cotd_data["stats"]["bestprimary"]["bestranktime"] ) ) return cotd_data["stats"]["bestprimary"]["bestranktime"] @staticmethod def get_best_div_primary_time(cotd_data) -> int: log.debug( "Getting the time of Primary Best Div -> {}".format( cotd_data["stats"]["bestprimary"]["bestdivtime"] ) ) return cotd_data["stats"]["bestprimary"]["bestdivtime"] @staticmethod def get_best_div_rank_primary(cotd_data) -> int: log.debug( "Getting the Best Rank in Div -> {}".format( cotd_data["stats"]["bestprimary"]["bestrankindiv"] ) ) return cotd_data["stats"]["bestprimary"]["bestrankindiv"] @staticmethod def get_best_rank_overall(cotd_data) -> int: log.debug( "Getting the Overall Best Rank -> {}".format( cotd_data["stats"]["bestoverall"]["bestrank"] ) ) return cotd_data["stats"]["bestoverall"]["bestrank"] @staticmethod def get_best_div_overall(cotd_data) -> int: log.debug( "Getting the Overall Best Div -> {}".format( cotd_data["stats"]["bestoverall"]["bestdiv"] ) ) return cotd_data["stats"]["bestoverall"]["bestdiv"] @staticmethod def get_best_rank_overall_time(cotd_data) -> int: log.debug( f'Getting the time of Overall Best Rank -> {cotd_data["stats"]["bestoverall"]["bestranktime"]}' ) return cotd_data["stats"]["bestoverall"]["bestranktime"] @staticmethod def get_best_div_overall_time(cotd_data) -> int: log.debug( "Getting the time of Overall Best Div -> {}".format( cotd_data["stats"]["bestoverall"]["bestdivtime"] ) ) return cotd_data["stats"]["bestoverall"]["bestdivtime"] @staticmethod def get_best_div_rank_overall(cotd_data) -> int: log.debug( "Getting the Best Rank in Div Overall -> {}".format( cotd_data["stats"]["bestoverall"]["bestrankindiv"] ) ) return cotd_data["stats"]["bestoverall"]["bestrankindiv"] @staticmethod def return_cotds(cotd_data): log.debug("Returning all COTDs") return cotd_data["cotds"] @staticmethod def return_cotds_without_reruns(cotd_data): log.debug("Returning COTDs without reruns") cotds_safe = [] for cotd in cotd_data["cotds"]: if "#2" in cotd["name"] or "#3" in cotd["name"]: continue cotds_safe.append(cotd) return cotds_safe @staticmethod def get_num_cotds_played(cotds): log.debug(f"Number of COTDs Played -> {len(cotds)}") return len(cotds) @staticmethod def remove_unfinished_cotds(cotds): log.debug("Looping around COTDs") cotds_safe = [] for cotd in cotds: if not cotd["score"] == 0: cotds_safe.append(cotd) log.debug(f"{len(cotds_safe)} COTDs Finished out of Given Set") return cotds_safe @staticmethod def get_average_rank_overall(cotd_data): cotds = COTDUtil.return_cotds(cotd_data) cotds_played = COTDUtil.get_num_cotds_played(cotds) rank_total = 0 # Looping Through COTDs for cotd in cotds: rank_total += int(cotd["rank"]) log.debug(f"Average Rank Overall -> {round(rank_total / cotds_played, 2)}") return round(rank_total / cotds_played, 2) @staticmethod def get_average_rank_primary(cotd_data): cotds = COTDUtil.return_cotds_without_reruns(cotd_data) cotds_played = COTDUtil.get_num_cotds_played(cotds) rank_total = 0 for cotd in cotds: rank_total += int(cotd["rank"]) try: log.debug(f"Average Rank Primary -> {round(rank_total / cotds_played, 2)}") return round(rank_total / cotds_played, 2) except: log.debug("Average Rank Primary -> 0") return 0 @staticmethod def get_average_div_overall(cotd_data): cotds = COTDUtil.return_cotds(cotd_data) cotds_played = COTDUtil.get_num_cotds_played(cotds) div_total = 0 # Looping Through COTDs for cotd in cotds: div_total += int(cotd["div"]) log.debug(f"Average Div Overall -> {round(div_total / cotds_played, 2)}") return round(div_total / cotds_played, 2) @staticmethod def get_average_div_primary(cotd_data): cotds = COTDUtil.return_cotds_without_reruns(cotd_data) cotds_played = COTDUtil.get_num_cotds_played(cotds) div_total = 0 for cotd in cotds: div_total += int(cotd["div"]) try: log.debug(f"Average Div Primary -> {round(div_total / cotds_played, 2)}") return round(div_total / cotds_played, 2) except: log.debug("Average Div Primary -> 0") return 0 @staticmethod def get_average_div_rank_overall(cotd_data): cotds = COTDUtil.return_cotds(cotd_data) cotds_played = COTDUtil.get_num_cotds_played(cotds) div_rank_total = 0 for cotd in cotds: div_rank_total += int(cotd["div"]) log.debug( f"Average Div Rank Overall -> {round(div_rank_total / cotds_played, 2)}" ) return round(div_rank_total / cotds_played, 2) @staticmethod def get_average_div_rank_primary(cotd_data): cotds = COTDUtil.return_cotds_without_reruns(cotd_data) cotds_played = COTDUtil.get_num_cotds_played(cotds) div_rank_total = 0 for cotd in cotds: div_rank_total += int(cotd["divrank"]) try: log.debug( f"Average Div Rank Primary -> {round(div_rank_total / cotds_played, 2)}" ) return round(div_rank_total / cotds_played, 2) except: log.debug("Average Div Rank Primary -> 0") return 0 @staticmethod def get_list_of_ranks_overall(cotd_data): cotds = COTDUtil.return_cotds(cotd_data) cotds = COTDUtil.remove_unfinished_cotds(cotds) ranks = [] for cotd in cotds: ranks.append(cotd["rank"]) log.debug(f"Ranks are {ranks[::-1]}") return ranks[::-1] @staticmethod def get_list_of_ranks_primary(cotd_data): cotds = COTDUtil.return_cotds_without_reruns(cotd_data) cotds = COTDUtil.remove_unfinished_cotds(cotds) ranks = [] for cotd in cotds: ranks.append(cotd["rank"]) log.debug(f"Ranks are {ranks[::-1]}") return ranks[::-1] @staticmethod def get_list_of_dates_overall(cotd_data): cotds = COTDUtil.return_cotds(cotd_data) cotds = COTDUtil.remove_unfinished_cotds(cotds) timestamps = [] for cotd in cotds: timestamps.append(cotd["name"][15:]) log.debug(f"Timestamps are {timestamps[::-1]}") return timestamps[::-1] @staticmethod def get_list_of_dates_primary(cotd_data): cotds = COTDUtil.return_cotds_without_reruns(cotd_data) cotds = COTDUtil.remove_unfinished_cotds(cotds) timestamps = [] for cotd in cotds: timestamps.append(cotd["name"][15:]) log.debug(f"Timestamps are {timestamps[::-1]}") return timestamps[::-1] @staticmethod def get_list_of_ids_overall(cotd_data): cotds = COTDUtil.return_cotds(cotd_data) cotds = COTDUtil.remove_unfinished_cotds(cotds) ids = [] for cotd in cotds: ids.append(cotd["id"]) log.debug(f"IDs are {ids[::-1]}") return ids[::-1] @staticmethod def get_list_of_ids_primary(cotd_data): cotds = COTDUtil.return_cotds_without_reruns(cotd_data) cotds = COTDUtil.remove_unfinished_cotds(cotds) ids = [] for cotd in cotds: ids.append(cotd["id"]) log.debug(f"IDs are {ids[::-1]}") return ids[::-1] @staticmethod def get_num_wins(cotd_data): log.debug( "Getting number of wins -> {}".format(cotd_data["stats"]["totalwins"]) ) return cotd_data["stats"]["totalwins"] @staticmethod def _create_rank_plot( ranks: list, dates: list, ids: list, plot_name: str, image_name: str ): log.debug("Clearing Plot") plt.clf() if len(dates) >= 40: log.debug( f"{plot_name} -> Player has played more than 40 COTDs, using ids instead of dates" ) plt.plot(ids, ranks, label=plot_name) plt.xlabel("COTD IDs") else: log.debug( f"{plot_name} -> Player has less than 40 COTDs, using dates instead of ids" ) plt.plot(dates, ranks, label=plot_name) plt.xlabel("COTD Dates") log.debug(f"{plot_name} -> Setting Plot Rotation to 90Deg") plt.xticks(rotation=90) log.debug(f"{plot_name} -> Setting YLabel to Ranks") plt.ylabel("Ranks") log.debug(f"{plot_name} -> Setting title to {plot_name}") plt.title(f"Rank Graph for {plot_name}") log.debug(f"{plot_name} -> Setting Tight Layout") plt.tight_layout() log.debug(f"{plot_name} -> Saving the Plot to Computer") plt.savefig("./bot/resources/temp/" + image_name) @staticmethod def _concat_graphs(): log.info("Concatenating Graphs") log.debug("Opening First Graph") first_graph = cv2.imread("./bot/resources/temp/overallranks.png") log.debug("First Graph Opened") log.debug("Opening Second Graph") second_graph = cv2.imread("./bot/resources/temp/primaryranks.png") log.debug("Second Graph Opened") log.debug("Concatenating Graphs") concatenated_graphs = cv2.hconcat([first_graph, second_graph]) log.debug("Concatenated Graphs") log.info("Saving Graphs") cv2.imwrite("./bot/resources/temp/concatenated_graphs.png", concatenated_graphs) class NotAValidUsername(Exception): """Raised when the Username given is not valid. Args: Exception ([type]): [description] """ def __init__(self, excp: Exception): self.message = excp.message def __str__(self): return self.message if self.message is not None else None
35.756569
195
0.578832
5,491
46,269
4.674194
0.094336
0.054547
0.019988
0.013909
0.524936
0.399595
0.316606
0.253682
0.216123
0.184797
0
0.017766
0.307787
46,269
1,293
196
35.784223
0.783596
0.028118
0
0.293156
0
0.00715
0.27097
0.041654
0
0
0.000366
0
0
1
0.043922
false
0.003064
0.017365
0.001021
0.11951
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1fed6ebbcca1ccb5af62d7ab28474d73bafe114f
4,535
py
Python
src/vehicle_core/model/throttle_model.py
decabyte/vehicle_core
623e1e993445713ab2ba625ac54be150077c2f1e
[ "BSD-3-Clause" ]
1
2016-12-14T11:48:02.000Z
2016-12-14T11:48:02.000Z
src/vehicle_core/model/throttle_model.py
decabyte/vehicle_core
623e1e993445713ab2ba625ac54be150077c2f1e
[ "BSD-3-Clause" ]
null
null
null
src/vehicle_core/model/throttle_model.py
decabyte/vehicle_core
623e1e993445713ab2ba625ac54be150077c2f1e
[ "BSD-3-Clause" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- # Software License Agreement (BSD License) # # Copyright (c) 2014, Ocean Systems Laboratory, Heriot-Watt University, UK. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # * Neither the name of the Heriot-Watt University nor the names of # its contributors may be used to endorse or promote products # derived from this software without specific prior written # permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # # Original authors: # Valerio De Carolis, Marian Andrecki, Corina Barbalata, Gordon Frost from __future__ import division import numpy as np import scipy as sci import scipy.signal ##pythran export predict_throttle(float[], float[], float[], float, float) def predict_throttle(throttle_request, b, a, offset, limit): """This function returns the predicted throttle for each thruster given a throttle request using a low-pass filter IIR filtering. See (http://en.wikipedia.org/wiki/Infinite_impulse_response) for more details. The use of scipy is not possible if the pythran optimizer is employed with this module. :param throttle_request: matrix of throttle request (N x M) (rows are different thrusters and columns are samples) :param b: low-pass filter b coefficients :param a: low-pass filter a coefficients :param offset: samples offset in the throttle request :param limit: throttle value hard limit :return: throttle_model is the predicted value of the throttle """ # apply latency delay (offset is positive) throttle_delayed = throttle_request[:, 0:-(offset + 1)] throttle_model = np.zeros_like(throttle_delayed) # apply low-pass filter (using scipy) throttle_model = sci.signal.lfilter(b, a, throttle_delayed) # # apply low-pass filter (using custom implementation) # P = len(b) # Q = len(a) # N = throttle_delayed.shape[0] # M = throttle_delayed.shape[1] # K = np.maximum(P, Q) # # for i in xrange(N): # for j in xrange(K, M): # # x = throttle_delayed[i, j-P:j] # y = throttle_model[i, j-Q:j-1] # # throttle_model[i,j] = (np.sum(b[::-1] * x) - np.sum(a[:0:-1] * y)) / a[0] # calculate the result and apply limits return np.clip(throttle_model[:,-1], -limit, limit) ##pythran export rate_limiter(float[], float[], float, float) def rate_limiter(new_throttle, last_throttle, rising_limit, falling_limit): """Models the change in thruster's throttle. http://www.mathworks.co.uk/help/simulink/slref/ratelimiter.html :param last_throttle: result of a previous iteration :param new_throttle: :param rising_limit: rising rate limit between two samples :param falling_limit: falling rate limit between two samples :return: next_throttle: the new throttle after applying rate limits """ diff_throttle = new_throttle - last_throttle next_throttle = np.zeros_like(new_throttle) for i, dth in enumerate(diff_throttle): if dth > rising_limit: next_throttle[i] = last_throttle[i] + rising_limit elif dth < -falling_limit: next_throttle[i] = last_throttle[i] - falling_limit else: next_throttle[i] = new_throttle[i] return next_throttle
40.855856
118
0.714443
637
4,535
5.00314
0.416013
0.021964
0.023533
0.018826
0.131785
0.085974
0.085974
0.042673
0.042673
0.042673
0
0.00416
0.204851
4,535
110
119
41.227273
0.879645
0.743771
0
0
0
0
0
0
0
0
0
0
0
1
0.1
false
0
0.2
0
0.4
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1fee9ed72e23e0f9892bd14d8b33f1a360d24471
1,605
py
Python
social_friends_finder/backends/vkontakte_backend.py
haremmaster/django-social-friends-finder
cad63349b19b3c301626c24420ace13c63f45ad7
[ "BSD-3-Clause" ]
19
2015-01-01T16:23:06.000Z
2020-01-02T22:42:17.000Z
social_friends_finder/backends/vkontakte_backend.py
haremmaster/django-social-friends-finder
cad63349b19b3c301626c24420ace13c63f45ad7
[ "BSD-3-Clause" ]
2
2015-01-01T16:34:59.000Z
2015-03-26T10:30:59.000Z
social_friends_finder/backends/vkontakte_backend.py
laplacesdemon/django-social-friends-finder
cad63349b19b3c301626c24420ace13c63f45ad7
[ "BSD-3-Clause" ]
11
2015-01-16T18:39:34.000Z
2021-08-13T00:46:41.000Z
from social_friends_finder.backends import BaseFriendsProvider from social_friends_finder.utils import setting if not setting("SOCIAL_FRIENDS_USING_ALLAUTH", False): from social_auth.backends.contrib.vk import VKOAuth2Backend USING_ALLAUTH = False else: from allauth.socialaccount.models import SocialToken, SocialAccount, SocialApp USING_ALLAUTH = True import vkontakte class VKontakteFriendsProvider(BaseFriendsProvider): def fetch_friends(self, user): """ fethces friends from VKontakte using the access_token fethched by django-social-auth. Note - user isn't a user - it's a UserSocialAuth if using social auth, or a SocialAccount if using allauth Returns: collection of friend objects fetched from VKontakte """ if USING_ALLAUTH: raise NotImplementedError("VKontakte support is not implemented for django-allauth") #social_app = SocialApp.objects.get_current('vkontakte') #oauth_token = SocialToken.objects.get(account=user, app=social_app).token else: social_auth_backend = VKOAuth2Backend() # Get the access_token tokens = social_auth_backend.tokens(user) oauth_token = tokens['access_token'] api = vkontakte.API(token=oauth_token) return api.get("friends.get") def fetch_friend_ids(self, user): """ fetches friend id's from vkontakte Return: collection of friend ids """ friend_ids = self.fetch_friends(user) return friend_ids
33.4375
114
0.684112
184
1,605
5.804348
0.380435
0.05618
0.031835
0.043071
0
0
0
0
0
0
0
0.001663
0.250467
1,605
47
115
34.148936
0.886118
0.300312
0
0.090909
0
0
0.103314
0.02729
0
0
0
0
0
1
0.090909
false
0
0.227273
0
0.454545
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1ff9b69a4019a1762d86b4de69764598a30ea2b6
8,228
py
Python
dial/metrics.py
neukg/KAT-TSLF
91bff10312ba5fbbd46978b268a1c97a5d627dcd
[ "MIT" ]
11
2021-11-19T06:17:10.000Z
2022-03-11T07:12:30.000Z
dial/metrics.py
neukg/KAT-TSLF
91bff10312ba5fbbd46978b268a1c97a5d627dcd
[ "MIT" ]
3
2021-11-20T14:00:24.000Z
2022-03-03T19:41:01.000Z
dial/metrics.py
neukg/KAT-TSLF
91bff10312ba5fbbd46978b268a1c97a5d627dcd
[ "MIT" ]
null
null
null
from nltk.translate.bleu_score import corpus_bleu, sentence_bleu, SmoothingFunction from nltk import word_tokenize # import language_evaluation from typing import List from collections import defaultdict, Counter import re import math import sys def mean(lst): return sum(lst) / len(lst) def _calc_ngram_dict(tokens:List[str], ngram:int, dict_ref=None): ngram_dict = defaultdict(int) if dict_ref is None else dict_ref total = len(tokens) for i in range(0, total - ngram + 1): item = tuple(tokens[i:i + ngram]) ngram_dict[item] += 1 return ngram_dict def _calc_cover(cand, gold, ngram): cand_dict = _calc_ngram_dict(cand, ngram) gold_dict = _calc_ngram_dict(gold, ngram) cover = 0 total = 0 for token, freq in cand_dict.items(): if token in gold_dict: cover += min(freq, gold_dict[token]) total += freq return cover, total def _calc_cover_rate(cands, golds, ngram): """ calc_cover_rate """ cover = 0.0 total = 0.000001 for cand_tokens, gold_tokens in zip(cands, golds): cur_cover, cur_total = _calc_cover(cand_tokens, gold_tokens, ngram) cover += cur_cover total += cur_total return cover / total def _calc_bp(cands, golds): c_count = 0.000001 r_count = 0.0 for cand_tokens, gold_tokens in zip(cands, golds): c_count += len(cand_tokens) r_count += len(gold_tokens) bp = 1 if c_count < r_count: bp = math.exp(1 - r_count / c_count) return bp def calc_corpus_bleu(cands, golds): bp = _calc_bp(cands, golds) cover_rate1 = _calc_cover_rate(cands, golds, 1) cover_rate2 = _calc_cover_rate(cands, golds, 2) cover_rate3 = _calc_cover_rate(cands, golds, 3) bleu1 = 0 bleu2 = 0 bleu3 = 0 if cover_rate1 > 0: bleu1 = bp * math.exp(math.log(cover_rate1)) if cover_rate2 > 0: bleu2 = bp * math.exp((math.log(cover_rate1) + math.log(cover_rate2)) / 2) if cover_rate3 > 0: bleu3 = bp * math.exp((math.log(cover_rate1) + math.log(cover_rate2) + math.log(cover_rate3)) / 3) return bleu1, bleu2, bleu3 # def calc_corpus_bleu_new(cands, golds): # golds = [[gold] for gold in golds] # sf = SmoothingFunction().method7 # bleu1 = corpus_bleu(golds, cands, smoothing_function=sf, weights=[1, 0, 0, 0]) # bleu2 = corpus_bleu(golds, cands, smoothing_function=sf, weights=[0.5, 0.5, 0, 0]) # bleu3 = corpus_bleu(golds, cands, smoothing_function=sf, weights=[0.34, 0.33, 0.33, 0]) # return bleu1, bleu2, bleu3 def calc_sentence_bleu(cands, golds): bleu1 = [] bleu2 = [] bleu3 = [] sf = SmoothingFunction().method7 for hyp, ref in zip(cands, golds): try: b1 = sentence_bleu([ref], hyp, smoothing_function=sf, weights=[1, 0, 0, 0]) except ZeroDivisionError: b1 = 0.0 try: b2 = sentence_bleu([ref], hyp, smoothing_function=sf, weights=[0.5, 0.5, 0, 0]) except ZeroDivisionError: b2 = 0.0 try: b3 = sentence_bleu([ref], hyp, smoothing_function=sf, weights=[0.34, 0.33, 0.33, 0]) except ZeroDivisionError: b3 = 0.0 bleu1.append(b1) bleu2.append(b2) bleu3.append(b3) return mean(bleu1), mean(bleu2), mean(bleu3) def calc_corpus_bleu_new(hypothesis, references): # hypothesis = [normalize_answer(hyp).split(" ") for hyp in hypothesis] # references = [[normalize_answer(ref).split(" ")] for ref in references] references = [[gold] for gold in references] sf = SmoothingFunction(epsilon=1e-12).method1 b1 = corpus_bleu(references, hypothesis, weights=(1.0/1.0,), smoothing_function=sf) b2 = corpus_bleu(references, hypothesis, weights=(1.0/2.0, 1.0/2.0), smoothing_function=sf) b3 = corpus_bleu(references, hypothesis, weights=(1.0/3.0, 1.0/3.0, 1.0/3.0), smoothing_function=sf) b4 = corpus_bleu(references, hypothesis, weights=(1.0/4.0, 1.0/4.0, 1.0/4.0, 1.0/4.0), smoothing_function=sf) return b1, b2, b3, b4 def _calc_distinct_ngram(cands, ngram): ngram_total = 0.00001 ngram_distinct_count = 0.00001 pred_dict = defaultdict(int) for cand_tokens in cands: _calc_ngram_dict(cand_tokens, ngram, pred_dict) for key, freq in pred_dict.items(): ngram_total += freq ngram_distinct_count += 1 return ngram_distinct_count / ngram_total def _calc_sent_distinct_ngram(cand, ngram): ngram_total = 0.0000000001 ngram_distinct_count = 0.0 ngram_dict = defaultdict(int) for i in range(0, len(cand) - ngram + 1): item = tuple(cand[i:i + ngram]) ngram_dict[item] += 1 for _, freq in ngram_dict.items(): ngram_total += freq ngram_distinct_count += 1 return ngram_distinct_count / ngram_total def calc_corpus_distinct(cands): distinct1 = _calc_distinct_ngram(cands, 1) distinct2 = _calc_distinct_ngram(cands, 2) return distinct1, distinct2 def calc_sentence_distinct(cands): distinct1 = mean([_calc_sent_distinct_ngram(c, 1) for c in cands]) distinct2 = mean([_calc_sent_distinct_ngram(c, 2) for c in cands]) return distinct1, distinct2 def calc_corpus_f1(cands, golds): golden_word_total = 0.00000001 pred_word_total = 0.00000001 hit_word_total = 0.00000001 for response, golden_response in zip(cands, golds): common = Counter(response) & Counter(golden_response) hit_word_total += sum(common.values()) golden_word_total += len(golden_response) pred_word_total += len(response) p = hit_word_total / pred_word_total r = hit_word_total / golden_word_total f1 = 2 * p * r / (p + r) return f1 def normalize_answer(s): """Lower text and remove punctuation, articles and extra whitespace.""" re_art = re.compile(r'\b(a|an|the)\b') re_punc = re.compile(r'[!"#$%&()*+,-./:;<=>?@\[\]\\^`{|}~_\']') def remove_articles(text): return re_art.sub(' ', text) def white_space_fix(text): return ' '.join(text.split()) def remove_punc(text): return re_punc.sub(' ', text) # convert punctuation to spaces def lower(text): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(s)))).split(' ') def calc_rouge(cands, golds): rouge_evaluator = language_evaluation.RougeEvaluator(num_parallel_calls=1, tokenization_fn=normalize_answer) predictions = [' '.join(c) for c in cands] answers = [' '.join(g) for g in golds] rouge_result = rouge_evaluator.run_evaluation(predictions, answers) return rouge_result def dialogue_evaluation(ori_cands, ori_golds): assert len(ori_cands) == len(ori_golds), f"num cand: {len(ori_cands)}, num gold: {len(ori_golds)}" cands = [] golds = [] help_tokenize = lambda x: word_tokenize(x.lower()) for cand, gold in zip(ori_cands, ori_golds): cands.append(help_tokenize(cand.lower())) golds.append(help_tokenize(gold.lower())) cbleu1, cbleu2, cbleu3, cbleu4 = calc_corpus_bleu_new(cands, golds) sbleu1, sbleu2, sbleu3 = calc_sentence_bleu(cands, golds) cdiv1, cdiv2 = calc_corpus_distinct(cands) sdiv1, sdiv2 = calc_sentence_distinct(cands) cf1 = calc_corpus_f1(cands, golds) # rouge_result = calc_rouge(cands, golds) result = { 'cf1': cf1, 'bleu1': cbleu1, 'bleu2': cbleu2, 'bleu3': cbleu3, 'bleu4': cbleu4, 'dist1': cdiv1, 'dist2': cdiv2, } # result.update(rouge_result) result = {k: round(100 * v, 6) for k, v in result.items()} return result def file_dialogue_evaluation(cand_file, gold_file): print(f"cand file: {cand_file}, gold file: {gold_file}") cands = [] golds = [] with open(cand_file, 'r', encoding='utf-8') as f: for line in f: cands.append(line.strip()) with open(gold_file, 'r', encoding='utf-8') as f: for line in f: golds.append(line.strip()) results = dialogue_evaluation(cands, golds) print(results) if __name__ == "__main__": cand_file = sys.argv[1] gold_file = sys.argv[2] file_dialogue_evaluation(cand_file, gold_file)
35.465517
113
0.653986
1,174
8,228
4.356899
0.167802
0.043011
0.037146
0.030499
0.295797
0.235582
0.199805
0.141935
0.12043
0.090714
0
0.045398
0.223627
8,228
232
114
35.465517
0.755322
0.092124
0
0.126984
0
0.010582
0.028099
0.00484
0
0
0
0
0.005291
1
0.111111
false
0
0.037037
0.026455
0.253968
0.010582
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1ffb6e885c207ea205ef242e09f2cabe5866ad26
3,705
py
Python
cameraToWorld.py
blguweb/Tap-Tap-computer
4e2007b5a31e6d5f902b1e3ca58206870331ef07
[ "MIT" ]
null
null
null
cameraToWorld.py
blguweb/Tap-Tap-computer
4e2007b5a31e6d5f902b1e3ca58206870331ef07
[ "MIT" ]
null
null
null
cameraToWorld.py
blguweb/Tap-Tap-computer
4e2007b5a31e6d5f902b1e3ca58206870331ef07
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import os from typing import NoReturn import cv2 as cv import numpy as np from numpy import mat import xml.etree.ElementTree as ET import math camera_angle = 315 camera_intrinsic = { # # 相机内参矩阵 # 相机内参矩阵 matlab 求得 "camera_matrix": [871.086328150675740,0.0, 314.319098669115306, 0.0, 868.410697770935144, 254.110678266434348, 0.0, 0.0, 1.0], # 畸变系数 "camera_distortion": [0.182040359674805,-0.564946010535902,0.001566542339394, 0.003396709692351,0.000000000000000 ], # # # 旋转矢量 "camera_rvec": [-1.57079633, 0.0, 0.0], # 平移矢量 # "camera_tvec": ['-29.046143504451425', '1126.526303382564', '736.155158603123'] "camera_tvec": [0.0, 0.0, 0.0], # # 旋转矩阵 # "rvec_matrix": [[1.0,0.0,0.0], # [0.0,0.0,-1.0], # [0.0,1.0,0.0]] } class CtoWorld(object): def __init__(self): self.image_size = (640 , 480) self.rvec = np.asarray(camera_intrinsic['camera_rvec']) self.cam_mat = np.asarray(camera_intrinsic['camera_matrix']) self.tvec = np.asarray(camera_intrinsic['camera_tvec']) self.cam_dist = np.asarray(camera_intrinsic['camera_distortion']) self.rot_mat = mat(cv.Rodrigues(self.rvec)[0]) # self.cam_mat_new, roi = cv.getOptimalNewCameraMatrix(self.cam_mat, self.cam_dist, self.image_size, 1, self.image_size) # self.roi = np.array(roi) def pixel_c(self,points,depth): # 像素 -> 相机 p= (depth*np.asarray(points)).T p = mat(p, np.float).reshape((3,1)) self.cam_mat = mat(self.cam_mat, np.float).reshape((3, 3)) ca_points =np.dot( np.linalg.inv(self.cam_mat),p) print("c",ca_points) return ca_points def c_w(self,points): revc = mat(self.rot_mat, np.float).reshape((3, 3)) T = mat(self.tvec, np.float).reshape((3, 1)) w_points = np.dot(revc,points)+T print("w",w_points) return w_points def imu_get(self,message): mess = message.split() z = float(mess[0]) x = float(mess[1]) y = float(mess[2]) print("3",x,y,z) return x,y,z def unit_vector_get(self,vx,vy,vz): # 摄像头与北的夹角 c_to_n = camera_angle # 计算角度 # 因为是西 所以是负数 # xita 对于 -y 顺时针为正 逆时针为负c_to_n - (-vz) xita = c_to_n + vz fai = vx + 90 print("fai",fai,xita) # 方向单位向量 uz = math.cos(math.radians(fai)) print("uz",uz) ux = - math.sin(math.radians(xita)) * math.sin(math.radians(fai)) uy = - math.cos(math.radians(xita)) * math.sin(math.radians(fai)) vec = [ux,uy,uz] print("vtype",vec) return vec def target_not(self,unot,uvector): # 需要知道在哪一个面碰壁 # 比如y tx = uvector[0] * (-unot[1]) / uvector[1] + unot[0] tz = uvector[2] * (-unot[1]) / uvector[1] + unot[2] return tx,tz if __name__ == '__main__': mctoworld = CtoWorld() # 生产矫正对象 # 像素坐标 x,y,depth points = [355,218,1] depth = 1540 # 相机坐标 camera_points = mctoworld.pixel_c(points,depth) w_points = mctoworld.c_w(camera_points) # IMU mes = "-42.60 6.91 0.67" x,y,z = mctoworld.imu_get(mes) mvector = mctoworld.unit_vector_get(x,y,z) tx,tz = mctoworld.target_not(w_points,mvector) print("tx: ",tx) print("tz: ",tz) if -2000 < tx < -1380 and 840 < tz < 1300: print("true") else: print("false")
33.080357
129
0.550877
508
3,705
3.877953
0.326772
0.025381
0.025888
0.022335
0.164975
0.070558
0.045178
0.041117
0
0
0
0.123357
0.302024
3,705
112
130
33.080357
0.638438
0.145479
0
0
0
0
0.052214
0
0
0
0
0
0
1
0.078947
false
0
0.092105
0
0.25
0.131579
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1ffb6f2d2eca765ba18ee0ccc397d70767e06533
5,004
py
Python
compilers/labs/lab2/gui.py
vampy/university
9496cb63594dcf1cc2cec8650b8eee603f85fdab
[ "MIT" ]
6
2015-06-22T19:43:13.000Z
2019-07-15T18:08:41.000Z
compilers/labs/lab2/gui.py
vampy/university
9496cb63594dcf1cc2cec8650b8eee603f85fdab
[ "MIT" ]
null
null
null
compilers/labs/lab2/gui.py
vampy/university
9496cb63594dcf1cc2cec8650b8eee603f85fdab
[ "MIT" ]
1
2015-09-26T09:01:54.000Z
2015-09-26T09:01:54.000Z
#!/usr/bin/python import os from log import Log from enum import IntEnum, unique from grammar import Grammar from automaton import FiniteAutomaton @unique class Command(IntEnum): GRAMMAR_READ = 1 GRAMMAR_DISPLAY = 2 GRAMMAR_VERIFY = 3 AUTOMATON_READ = 4 AUTOMATON_DISPLAY = 5 CONVERT_RG_TO_FA = 6 CONVERT_FA_TO_RG = 7 HELP = 99 QUIT = 0 class Gui: @staticmethod def run(): Log.info('Running...') try: grammar = Grammar.from_lines(Gui.get_lines_filename('grammar.rg')) print(grammar, grammar.is_regular(), grammar.is_left, grammar.is_right, end='\n' * 2) print(grammar.to_finite_automaton(), end='\n' * 2) except Exception as e: Log.error(grammar.error_message) Log.error(str(e)) try: automaton = FiniteAutomaton.from_lines(Gui.get_lines_filename('automata.fa')) print(automaton, end='\n' * 2) print(automaton.to_regular_grammar()) except Exception as e: Log.error(str(e)) Gui.print_help_menu() grammar, automaton = None, None while True: try: command = Command(Gui.get_int('>>> ')) if command is Command.QUIT: print('\n\nQuitting...') break elif command is Command.HELP: Gui.print_help_menu() elif command is Command.GRAMMAR_READ: filename = Gui.get_string('Filename = ') grammar = Grammar.from_lines(Gui.get_lines_filename(filename)) Log.success('Success') elif command is Command.GRAMMAR_DISPLAY or command is Command.CONVERT_RG_TO_FA: if grammar is None: raise Exception('Please read a RG') if command is Command.GRAMMAR_DISPLAY: print(grammar) else: print(grammar.to_finite_automaton()) Log.success('Success') elif command is Command.GRAMMAR_VERIFY: if grammar is None: raise Exception('Please read a RG') is_regular = grammar.is_regular() if is_regular: Log.success('Grammar is {0} regular'.format('left' if grammar.is_left else 'right')) else: Log.error('Grammar is NOT regular') elif command is Command.AUTOMATON_READ: filename = Gui.get_string('Filename = ') automaton = FiniteAutomaton.from_lines(Gui.get_lines_filename(filename)) Log.success('Success') elif command is Command.AUTOMATON_DISPLAY or command is Command.CONVERT_FA_TO_RG: if automaton is None: raise Exception('Please read a FA') if command is Command.AUTOMATON_DISPLAY: print(automaton) else: print(automaton.to_regular_grammar()) Log.success('Success') else: print(command) except Exception as e: Log.error(str(e)) @staticmethod def get_lines_filename(filename): if not os.path.exists(filename): raise FileExistsError('The file "{0}" does not exist'.format(filename)) with open(filename, 'r') as f: lines = f.readlines() return lines @staticmethod def print_help_menu(): print('{0}. Read grammar'.format(Command.GRAMMAR_READ)) print('{0}. Display grammar'.format(Command.GRAMMAR_DISPLAY)) print('{0}. Verify grammar'.format(Command.GRAMMAR_VERIFY), end='\n' * 2) print('{0}. Read FA'.format(Command.AUTOMATON_READ)) print('{0}. Display FA'.format(Command.AUTOMATON_DISPLAY), end='\n' * 2) print('{0}. Convert RG to FA'.format(Command.CONVERT_RG_TO_FA)) print('{0}. Convert RG to RG'.format(Command.CONVERT_FA_TO_RG), end='\n' * 2) print('{0}. Help menu'.format(Command.HELP)) print('{0}. Quit'.format(Command.QUIT), end='\n' * 2) @staticmethod def get_int(prompt, prompt_retry='Retry again..', is_retry=False): if is_retry: print(prompt_retry) try: return int(Gui.get_string(prompt)) except ValueError: return Gui.get_int(prompt, prompt_retry, True) @staticmethod def get_string(prompt): try: # Do not allow empty input user_input = input(prompt) if not user_input: return Gui.get_string(prompt) return user_input except EOFError: # Ctrl-D return Command.QUIT except KeyboardInterrupt: # Ctrl-C return Command.QUIT
33.139073
108
0.552158
559
5,004
4.78712
0.187835
0.036996
0.06577
0.044843
0.396861
0.245889
0.188341
0.176756
0.085949
0.085949
0
0.008634
0.351918
5,004
150
109
33.36
0.816528
0.010991
0
0.310345
0
0
0.082103
0
0
0
0
0
0
1
0.043103
false
0
0.043103
0
0.241379
0.198276
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1ffec07dcf5a4c57c0d689934f15fff735336375
2,382
py
Python
ml-scripts/ss_calib/scripts/ss_charge_cali.py
YashengFu/exo-200_scripts
d33a1a2eeda5f072409656b96e8730f2de53ee0b
[ "MIT" ]
null
null
null
ml-scripts/ss_calib/scripts/ss_charge_cali.py
YashengFu/exo-200_scripts
d33a1a2eeda5f072409656b96e8730f2de53ee0b
[ "MIT" ]
null
null
null
ml-scripts/ss_calib/scripts/ss_charge_cali.py
YashengFu/exo-200_scripts
d33a1a2eeda5f072409656b96e8730f2de53ee0b
[ "MIT" ]
null
null
null
import numpy as np import time import argparse import pandas as pd import matplotlib matplotlib.use("Agg") import matplotlib.pyplot as plt from scipy import special from tqdm import tqdm from scipy.optimize import curve_fit from utils.build_hist import build_hist class SS_Charge: """ read calibration data and use SS event get calibrate constants """ def __init__(self,file_path,input_files): self.file_path = file_path self.input_files = input_files self.df_data = self.get_data(input_files) self.cluster_energy = [] def get_data(self,input_files): df_all =[] file_index = 0 for index in range(len(input_files)): df = pd.read_hdf(self.file_path+input_files[index]) df = df.reset_index() df['entry'] = df['entry']+file_index df = df.set_index(["entry"]) file_index+= len(df['label']) df_all.append(df) df_total = pd.concat(df_all) return df_total def select_ss_data(self,ss_type=1): c_energy = [] select_data = self.df_data[self.df_data['ss_type']==1] print('%s events are %d'%(ss_type,select_data.shape[0])) for index in tqdm(set(select_data.index.get_level_values('entry').values), mininterval=1, leave=False): variables = [float(i) for i in select_data["report"][index].split()] c_energy.append(variables[-1]) self.cluster_energy=c_energy def check_data(self): hist_data,bin_edges,patches = plt.hist(self.cluster_energy,bins=np.arange(0,3001,6),label='cluster_energy',histtype='step',alpha=0.9,linewidth=1,edgecolor='blue',density=False) bin_centers = 0.5*(bin_edges[1:] + bin_edges[:-1]) bin_centers = np.array(bin_centers) return bin_centers, hist_data def root_fit(self): c_hist = build_hist(self.cluster_energy) return c_hist if __name__ == "__main__": start_time = time.time() test_object = SS_Charge("/dybfs2/nEXO/fuys/EXO-200/shape_agreement/2019_0vbb/Phase1/fv_162_10_182_173_3d0.6/data/ml_rec_data/",["run_6255_ml.h5"]) test_object.select_ss_data(1) bin_centers, hist_data = test_object.check_data() bin_centers, hist_data, bin_centers_mask, c_energy_mask, popt, perr = test_object.fit_data() print(f"time costs: {(time.time() -start_time)/60} min")
38.419355
184
0.673804
360
2,382
4.163889
0.369444
0.046698
0.045364
0.036024
0.029353
0
0
0
0
0
0
0.02697
0.206129
2,382
61
185
39.04918
0.765732
0.026029
0
0
0
0.019231
0.107205
0.043403
0
0
0
0
0
1
0.096154
false
0
0.192308
0
0.365385
0.038462
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1fff4ed247e76eafdf9461ae3d7ab7dc88f2b73c
97,747
py
Python
ExoplanetPocketknife.py
ScottHull/Exoplanet-Pocketknife
15b49ff3612adc3b31a78c27379fb8b2f47c6c8f
[ "CC0-1.0" ]
null
null
null
ExoplanetPocketknife.py
ScottHull/Exoplanet-Pocketknife
15b49ff3612adc3b31a78c27379fb8b2f47c6c8f
[ "CC0-1.0" ]
null
null
null
ExoplanetPocketknife.py
ScottHull/Exoplanet-Pocketknife
15b49ff3612adc3b31a78c27379fb8b2f47c6c8f
[ "CC0-1.0" ]
null
null
null
# python /usr/bin/env/python # /// The Exoplanet Pocketknife # /// Scott D. Hull, The Ohio State University 2015-2017 # /// All usage must include proper citation and a link to the Github repository # /// https://github.com/ScottHull/Exoplanet-Pocketknife import os, csv, time, sys, shutil, subprocess from threading import Timer from math import * import pandas as pd import matplotlib.pyplot as plt from scipy import integrate as inte import numpy as np import bisect bsp_run = False morb_run = False gravity = 9.8 # plate_thickness = 10.0 # This is in km! plate_thickness = 10 * 1000 # This is in m! na_atwt = 22.98976928 mg_atwt = 24.305 al_atwt = 26.9815386 si_atwt = 28.0855 ca_atwt = 40.078 ti_atwt = 47.867 cr_atwt = 51.9961 fe_atwt = 55.845 ni_atwt = 58.6934 na2o_molwt = 61.9785 mgo_molwt = 40.3040 al2o3_molwt = 101.9601 sio2_molwt = 60.0835 cao_molwt = 56.0770 tio2_molwt = 79.8650 cr2o3_molwt = 151.9892 feo_molwt = 71.8440 nio_molwt = 74.6924 fe2o3_molwt = 159.687 num_na2o_cations = 2 num_mgo_cations = 1 num_al2o3_cations = 2 num_sio2_cations = 1 num_cao_cations = 1 num_tio2_cations = 1 num_cr2o3_cations = 2 num_feo_cations = 1 num_nio_cations = 1 num_fe2o3_cations = 2 asplund_na = 1479108.388 asplund_mg = 33884415.61 asplund_al = 2344228.815 asplund_si = 32359365.69 asplund_ca = 2041737.945 asplund_ti = 79432.82347 asplund_cr = 436515.8322 asplund_fe = 28183829.31 asplund_ni = 1698243.652 asplund_sivsfe = asplund_si / asplund_fe asplund_navsfe = asplund_na / asplund_fe mcd_earth_fe = 29.6738223341739 mcd_earth_na = 0.40545783900173 mcd_earth_mg = 32.812015232308 mcd_earth_al = 3.05167459380979 mcd_earth_si = 29.6859892035662 mcd_earth_ca = 2.20951970229211 mcd_earth_ni = 1.60579436264263 mcd_earth_ti = 0.0876307681103416 mcd_earth_cr = 0.468095964095391 mc_earth_ni = 1.60579436264263 mcd_sivsfe = mcd_earth_si / mcd_earth_fe mcd_navsfe = mcd_earth_na / mcd_earth_fe adjust_si = mcd_sivsfe / asplund_sivsfe adjust_na = mcd_navsfe / asplund_navsfe modelearth_mgo = 11.84409812845 gale_mgo = 7.65154964069009 mgo_fix = gale_mgo / modelearth_mgo depth_trans_zone = [0, 6, 19.7, 28.9, 36.4, 43.88, 51.34, 58.81, 66.36, 73.94, 81.5, 88.97, 96.45, 103.93, 111.41, 118.92, 126.47, 134.01, 141.55, 149.09, 156.64, 164.18, 171.72, 179.27, 186.79, 194.27, 201.75, 209.23, 216.71, 224.09, 231.4, 238.7, 246.01, 253.31, 260.62, 267.9, 275.16, 282.42, 289.68, 296.94, 304.19, 311.41, 318.44, 325.47, 332.5, 339.53, 346.56, 353.59, 360.62, 367.66, 374.69, 381.72, 388.75, 395.78, 402.78, 409.72, 416.67, 423.61, 430.56, 437.5, 444.44, 451.32, 457.89, 464.47, 471.05, 477.63, 484.21, 490.79, 497.37, 503.75, 510, 516.25, 522.5, 528.75, 535, 541.25, 547.5, 553.95, 560.53, 567.11, 573.68] inputfile_list = [] home_dir = [] # star_names = [] # na_h = [] # mg_h = [] # al_h = [] # si_h = [] # ca_h = [] # ti_h = [] # cr_h = [] # fe_h = [] # # star_index = [] # na_index = [] # mg_index = [] # al_index = [] # si_index = [] # ca_index = [] # ti_index = [] # cr_index = [] # fe_index = [] # # na_mol_abundances = [] # mg_mol_abundances = [] # al_mol_abundances = [] # si_mol_abundances = [] # ca_mol_abundances = [] # ti_mol_abundances = [] # cr_mol_abundances = [] # fe_mol_abundances = [] def adjustsi_fct(si_pct): adj_si_pct = si_pct * adjust_si return adj_si_pct def adjustna_fct(na_pct): adj_na_pct = na_pct * adjust_na return adj_na_pct def createbspenvfile(): if "BSP_Env_File" in os.listdir(os.getcwd()): pass else: bspenvfile = open("BSP_Env_File", 'w') one = "!BSP_Environment_File" two = "ALPHAMELTS_VERSION MELTS" three = "ALPHAMELTS_MODE isobaric" four = "ALPHAMELTS_MAXT 3000" five = "ALPHAMELTS_DELTAT -2" six = "ALPHAMELTS_MINT 1020" seven = "ALPHAMELTS_FRACTIONATE_SOLIDS true" eight = "ALPHAMELTS_CELSIUS_OUTPUT true" nine = "ALPHAMELTS_SAVE_ALL true" ten = "ALPHAMELTS_SKIP_FAILURE true" eleven = "Suppress: alloy-liquid" bspenvfile.write("{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n".format(one, two, three, four, five, six, seven, eight, nine, ten, eleven)) bspenvfile.close() def createmorbenvfile(): if "MORB_Env_File" in os.listdir(os.getcwd()): pass else: morbenvfile = open("MORB_Env_File", 'w') one = "!MORB_Environment_File" two = "ALPHAMELTS_VERSION pMELTS" three = "ALPHAMELTS_MODE isobaric" four = "ALPHAMELTS_MAXT 3000" five = "ALPHAMELTS_DELTAT -2" six = "ALPHAMELTS_MINT 1000" seven = "ALPHAMELTS_FRACTIONATE_SOLIDS true" eight = "ALPHAMELTS_CELSIUS_OUTPUT true" nine = "ALPHAMELTS_SAVE_ALL true" ten = "ALPHAMELTS_SKIP_FAILURE true" eleven = "Suppress: alloy-liquid" morbenvfile.write("{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n".format(one, two, three, four, five, six, seven, eight, nine, ten, eleven)) morbenvfile.close() def runmelts_bsp(infile_directory, inputfilename): print("\n[~] Preparing alphaMELTS for BSP calculations...") if "{}_Completed_BSP_MELTS_Files".format(inputfilename[:-4]) in os.listdir(os.getcwd()): shutil.rmtree("{}_Completed_BSP_MELTS_Files".format(inputfilename[:-4])) os.mkdir("{}_Completed_BSP_MELTS_Files".format(inputfilename[:-4])) else: os.mkdir("{}_Completed_BSP_MELTS_Files".format(inputfilename[:-4])) bsp_outdir = (home_dir[0] + "/{}_Completed_BSP_MELTS_Files".format(inputfilename[:-4])) for i in os.listdir(infile_directory): os.chdir(home_dir[0]) if "alphaMELTS_tbl.txt" in os.listdir(os.getcwd()): os.remove("alphaMELTS_tbl.txt") else: pass shutil.copy((infile_directory + "/" + str(i)), (home_dir[0] + "/" + str(i))) print("[~] Running BSP calculations for: {}".format(i[:-20])) p = subprocess.Popen(["run_alphamelts.command", "-f", "BSP_Env_File"], stdin=subprocess.PIPE) t = Timer(300, p.kill) t.start() print("\nTimeout timer started. 300 seconds until the loop continues...\n") p.communicate(input=b"\n".join([b"1", i, b"8", b"alloy-liquid", b"0", b"x", b"5", b"4", b"-1.4", b"2", b"2500", b"4200", b"4", b"1", b"0"])) t.cancel() if "alphaMELTS_tbl.txt" in os.listdir(os.getcwd()): oldname = "alphaMELTS_tbl.txt" newname = i[:-20] + "_BSP_OUTPUT" os.rename(oldname, newname) shutil.move(newname, bsp_outdir + "/{}".format(newname)) os.remove(i) os.chdir(bsp_outdir) csv_file_name = newname + ".csv" with open(newname, 'r') as infile, open(csv_file_name, 'w') as outfile: in_txt = csv.reader(infile, delimiter=" ") out_csv = csv.writer(outfile) out_csv.writerows(in_txt) infile.close() outfile.close() os.remove(newname) print("[~] {} BSP calculation processed!".format(i[:-20])) else: print("\n[X] {} BSP calculation FAILED!".format(i[:-20])) pass if i in home_dir[0]: os.remove(home_dir[0] + "/{}".format(i)) else: pass print("[~] Scraping BSP files for alloy abundances...") return ("{}_Completed_BSP_MELTS_Files".format(inputfilename)) def file_consolidate(path, init_path): os.chdir(path) if "EP_Consolidated_Output.csv" is os.listdir(os.getcwd()): os.remove("EP_Consolidated_Output.csv") else: pass if "EP_Consolidated_Output.csv" is os.listdir(init_path): os.remove(init_path + "/EP_Consolidated_Output.csv") else: pass outfile = open("EP_Consolidated_Output.csv", 'a') for i in os.listdir(os.getcwd()): if i != "EP_Consolidated_Output.csv": with open(i, 'r') as infile: reader = csv.reader(infile, delimiter=",") read_row = [] for row in reader: for p in row: read_row.append(p) writethis = ",".join(str(z) for z in read_row) outfile.write("{}\n".format(writethis)) os.remove(i) now_dir = os.getcwd() + "/{}".format("EP_Consolidated_Output.csv") now_dir2 = os.getcwd() to_dir = init_path + "/{}".format("EP_Consolidated_Output.csv") shutil.move(now_dir, to_dir) os.chdir(init_path) shutil.rmtree(now_dir2) print("[~] Consolidated file '{}' has been written!\n(Please see '{}' for your " "file!)\n".format("EP_Consolidated_Output.csv", init_path)) def logep(infile, infile_type, consol_file, init_path, library): if "{}_MELTS_{}_Input_Files".format(inputfile_list[0][:-4], infile_type) in os.listdir(os.getcwd()): shutil.rmtree("{}_MELTS_{}_Input_Files".format(inputfile_list[0][:-4], infile_type)) os.mkdir("{}_MELTS_{}_Input_Files".format(inputfile_list[0][:-4], infile_type)) else: os.mkdir("{}_MELTS_{}_Input_Files".format(inputfile_list[0][:-4], infile_type)) if "{}_{}_ConsolidatedChemFile.csv".format(infile[:-4], infile_type) in os.listdir(os.getcwd()): os.remove("{}_{}_ConsolidatedChemFile.csv".format(infile[:-4], infile_type)) else: pass chem_outfile = open("{}_{}_ConsolidatedChemFile.csv".format(infile[:-4], infile_type), 'a') chem_outfile.write("Star,FeO,CaO,Al2O3,Na2O,MgO,SiO2,TiO2,Cr2O3,NiO,Mass_Alloy\n") # try: with open(infile, 'r') as inputfile: if library is True: print("\n[~] Writing MELTS {} Input Files...".format(infile_type)) else: print("[~] Preparing consolidated MELTS output file...") df = pd.DataFrame(pd.read_csv(inputfile)) for index, row in df.iterrows(): star_name = row['Star'] # print(star_name) # print(row['[Fe/H]']) # print(row['[Ca/H]']) # print(row['[Al/H]']) # print(row['[Na/H]']) # print(row['[Mg/H]']) # print(row['[Si/H]']) # print(row['[Ti/H]']) # print(row['[Cr/H]']) # print(row['[Ni/H']) fe_abundance = (10 ** (row['[Fe/H]'])) * asplund_fe ca_abundance = (10 ** (row['[Ca/H]'])) * asplund_ca al_abundance = (10 ** (row['[Al/H]'])) * asplund_al na_abundance = (10 ** (row['[Na/H]'])) * asplund_na mg_abundance = (10 ** (row['[Mg/H]'])) * asplund_mg si_abundance = (10 ** (row['[Si/H]'])) * asplund_si ti_abundance = (10 ** (row['[Ti/H]'])) * asplund_ti cr_abundance = (10 ** (row['[Cr/H]'])) * asplund_cr ni_abundance = (10 ** (row['[Ni/H]'])) * asplund_ni total_abundances = (fe_abundance + ca_abundance + al_abundance + na_abundance + mg_abundance + si_abundance + ti_abundance + cr_abundance + ni_abundance) # print(total_abundances) init_pct_fe = fe_abundance / total_abundances init_pct_ca = ca_abundance / total_abundances init_pct_al = al_abundance / total_abundances init_pct_na = na_abundance / total_abundances init_pct_mg = mg_abundance / total_abundances init_pct_si = si_abundance / total_abundances init_pct_ti = ti_abundance / total_abundances init_pct_cr = cr_abundance / total_abundances init_pct_ni = ni_abundance / total_abundances init_pct_sum = (init_pct_fe + init_pct_ca + init_pct_al + init_pct_na + init_pct_mg + init_pct_si + init_pct_ti + init_pct_cr + init_pct_ni) # print(star_name) # print(init_pct_fe, init_pct_ca, init_pct_al, init_pct_na, init_pct_mg, init_pct_si, # init_pct_ti, init_pct_cr, init_pct_ni ,init_pct_sum) moles_si_remaining = adjustsi_fct(si_pct=init_pct_si) moles_na_remaining = adjustna_fct(na_pct=init_pct_na) norm_pct_sum = (init_pct_fe + init_pct_ca + init_pct_al + moles_na_remaining + init_pct_mg + moles_si_remaining + init_pct_ti + init_pct_cr + init_pct_ni) norm_pct_fe = init_pct_fe / norm_pct_sum norm_pct_ca = init_pct_ca / norm_pct_sum norm_pct_al = init_pct_al / norm_pct_sum norm_pct_na = moles_na_remaining / norm_pct_sum norm_pct_mg = init_pct_mg / norm_pct_sum norm_pct_si = moles_si_remaining / norm_pct_sum norm_pct_ti = init_pct_ti / norm_pct_sum norm_pct_cr = init_pct_cr / norm_pct_sum norm_pct_ni = init_pct_ni / norm_pct_sum check_norm_sum = ( norm_pct_fe + norm_pct_ca + norm_pct_al + norm_pct_na + norm_pct_mg + norm_pct_si + norm_pct_ti + norm_pct_cr + norm_pct_ni) wt_feo = ((norm_pct_fe * fe_atwt) * feo_molwt) / (num_feo_cations * fe_atwt) wt_cao = ((norm_pct_ca * ca_atwt) * cao_molwt) / (num_cao_cations * ca_atwt) wt_al2o3 = ((norm_pct_al * al_atwt) * al2o3_molwt) / (num_al2o3_cations * al_atwt) wt_na2o = ((norm_pct_na * na_atwt) * na2o_molwt) / (num_na2o_cations * na_atwt) wt_mgo = ((norm_pct_mg * mg_atwt) * mgo_molwt) / (num_mgo_cations * mg_atwt) wt_sio2 = ((norm_pct_si * si_atwt) * sio2_molwt) / (num_sio2_cations * si_atwt) wt_tio2 = ((norm_pct_ti * ti_atwt) * tio2_molwt) / (num_tio2_cations * ti_atwt) wt_cr2o3 = ((norm_pct_cr * cr_atwt) * cr2o3_molwt) / (num_cr2o3_cations * cr_atwt) wt_nio = ((norm_pct_ni * ni_atwt) * nio_molwt) / (num_nio_cations * ni_atwt) sum_oxwts = (wt_feo + wt_cao + wt_al2o3 + wt_na2o + wt_mgo + wt_sio2 + wt_tio2 + wt_cr2o3 + wt_nio) norm_wt_feo = (wt_feo / sum_oxwts) * 100.0 norm_wt_cao = (wt_cao / sum_oxwts) * 100.0 norm_wt_al2o3 = (wt_al2o3 / sum_oxwts) * 100.0 norm_wt_na2o = (wt_na2o / sum_oxwts) * 100.0 norm_wt_mgo = (wt_mgo / sum_oxwts) * 100.0 norm_wt_sio2 = (wt_sio2 / sum_oxwts) * 100.0 norm_wt_tio2 = (wt_tio2 / sum_oxwts) * 100.0 norm_wt_cr2o3 = (wt_cr2o3 / sum_oxwts) * 100.0 norm_wt_nio = (wt_nio / sum_oxwts) * 100.0 norm_wt_sum_check = (norm_wt_feo + norm_wt_cao + norm_wt_al2o3 + norm_wt_na2o + norm_wt_mgo + norm_wt_sio2 + norm_wt_tio2 + norm_wt_cr2o3 + norm_wt_nio) # print(star_name) # print(norm_wt_feo, norm_wt_cao, norm_wt_al2o3, norm_wt_na2o, norm_wt_mgo, norm_wt_sio2, # norm_wt_tio2, norm_wt_cr2o3, norm_wt_nio, norm_wt_sum_check) if (star_name + "_MELTS_{}_INFILE.txt".format(infile_type)) in os.listdir(os.getcwd()): os.remove(star_name + "_MELTS_{}_INFILE.txt".format(infile_type)) else: pass melts_input_file = open(star_name + "_MELTS_{}_INFILE.txt".format(infile_type), 'w') title = "Title: {}".format(star_name) initfeo = "Initial Composition: FeO {}".format(norm_wt_feo) initcao = "Initial Composition: Cao {}".format(norm_wt_cao) inital2o3 = "Initial Composition: Al2O3 {}".format(norm_wt_al2o3) initna2o = "Initial Composition: Na2O {}".format(norm_wt_na2o) initmgo = "Initial Composition: MgO {}".format(norm_wt_mgo) initsio2 = "Initial Composition: SiO2 {}".format(norm_wt_sio2) inittio2 = "Initial Composition: TiO2 {}".format(norm_wt_tio2) initcr2o3 = "Initial Composition: Cr2O3 {}".format(norm_wt_cr2o3) initnio = "Initial Composition: NiO {}".format(norm_wt_nio) init_temp = 'Initial Temperature: 2000' final_temp = "Final Temperature: 800" inc_temp = "Increment Temperature: -5" init_press = "Initial Pressure: 500" final_press = "Final Pressure: 500" dpdt = "dp/dt: 0" mode = "Mode: Fractionate Solids" mode2 = "Mode: Isobaric" melts_input_file.write( "{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n".format(title, initfeo, initcao, inital2o3, initna2o, initmgo, initsio2, inittio2, initcr2o3, initnio, init_temp, final_temp, inc_temp, init_press, final_press, dpdt, mode, mode2)) melts_input_file.close() shutil.move((os.getcwd() + "/" + star_name + "_MELTS_{}_INFILE.txt".format(infile_type)), (os.getcwd() + "/{}_MELTS_{}_Input_Files/".format(inputfile_list[0][:-4], infile_type) + star_name + "_MELTS_{}_INFILE.txt".format(infile_type))) chem_outfile.write("{},{},{},{},{},{},{},{},{},{}\n".format(star_name, norm_wt_feo, norm_wt_cao, norm_wt_al2o3, norm_wt_na2o, norm_wt_mgo, norm_wt_sio2, norm_wt_tio2, norm_wt_cr2o3, norm_wt_nio)) chem_outfile.close() if library is True: infiledir = (os.getcwd() + "/{}_MELTS_{}_Input_Files/".format(inputfile_list[0][:-4], infile_type)) print("[~] MELTS {} Input Files Written!".format(infile_type)) print("[~] MELTS files stored in {}".format(infiledir)) else: pass infiledir = (os.getcwd() + "/{}_MELTS_{}_Input_Files/".format(inputfile_list[0][:-4], infile_type)) print("[~] Launching alphaMELTS for {} Calculations...".format(infile_type)) runmelts_bsp(infile_directory=infiledir, inputfilename=infile) chem_outfile.close() if consol_file is True: file_consolidate(path=infiledir, init_path=init_path) else: file_consolidate(path=infiledir, init_path=init_path) scrapebsp2(infiledirectory=(home_dir[0] + "/{}_Completed_BSP_MELTS_Files".format(infile[:-4])), inputfilename=infile) bsprecalc(bspmeltsfilesdir=(home_dir[0] + "{}_Completed_BSP_MELTS_Files".format(infile[:-4])), infilename=infile, alloy_mass_infile="alloy_mass.csv", bsp_chem_infile="{}_{}_ConsolidatedChemFile.csv".format(infile[:-4], infile_type)) # except: # # raise Exception # print("\nError! There is likely an issue with the formatting of your input file!\n" # "Please refer to the documentation for more information.\n") # time.sleep(8) # initialization() # # sys.exit() def molepct(infile, infile_type, consol_file, init_path, library): if "{}_MELTS_{}_Input_Files".format(inputfile_list[0][:-4], infile_type) in os.listdir(os.getcwd()): shutil.rmtree("{}_MELTS_{}_Input_Files".format(inputfile_list[0][:-4], infile_type)) os.mkdir("{}_MELTS_{}_Input_Files".format(inputfile_list[0][:-4], infile_type)) else: os.mkdir("{}_MELTS_{}_Input_Files".format(inputfile_list[0][:-4], infile_type)) if "{}_{}_ConsolidatedChemFile.csv".format(infile[:-4], infile_type) in os.listdir(os.getcwd()): os.remove("{}_{}_ConsolidatedChemFile.csv".format(infile[:-4], infile_type)) else: pass chem_outfile = open("{}_{}_ConsolidatedChemFile.csv".format(infile[:-4], infile_type), 'a') chem_outfile.write("Star,FeO,CaO,Al2O3,Na2O,MgO,SiO2,TiO2,Cr2O3,NiO,Mass_Alloy\n") # try: with open(infile, 'r') as inputfile: if library is True: print("\n[~] Writing MELTS {} Input Files...".format(infile_type)) else: print("[~] Preparing consolidated MELTS output file...") df = pd.DataFrame(pd.read_csv(inputfile)) for index, row in df.iterrows(): star_name = row['Star'] # print(star_name) # print(row['[Fe/H]']) # print(row['[Ca/H]']) # print(row['[Al/H]']) # print(row['[Na/H]']) # print(row['[Mg/H]']) # print(row['[Si/H]']) # print(row['[Ti/H]']) # print(row['[Cr/H]']) # print("\n\n_________________________________________\n") # print(star_name) fe_abundance = row['Fe'] ca_abundance = row['Ca'] al_abundance = row['Al'] na_abundance = row['Na'] mg_abundance = row['Mg'] si_abundance = row['Si'] ti_abundance = row['Ti'] cr_abundance = row['Cr'] ni_abundance = row['Ni'] total_abundances = (fe_abundance + ca_abundance + al_abundance + na_abundance + mg_abundance + si_abundance + ti_abundance + cr_abundance + ni_abundance) # print("Input abundances:") # print(fe_abundance, ca_abundance, al_abundance, na_abundance, mg_abundance, si_abundance, # ti_abundance, cr_abundance, ni_abundance, total_abundances) # print(total_abundances) init_pct_fe = fe_abundance / total_abundances init_pct_ca = ca_abundance / total_abundances init_pct_al = al_abundance / total_abundances init_pct_na = na_abundance / total_abundances init_pct_mg = mg_abundance / total_abundances init_pct_si = si_abundance / total_abundances init_pct_ti = ti_abundance / total_abundances init_pct_cr = cr_abundance / total_abundances init_pct_ni = ni_abundance / total_abundances init_pct_sum = (init_pct_fe + init_pct_ca + init_pct_al + init_pct_na + init_pct_mg + init_pct_si + init_pct_ti + init_pct_cr + init_pct_ni) # print("Init Cation%:") # print(init_pct_fe, init_pct_ca, init_pct_al, init_pct_na, init_pct_mg, init_pct_si, # init_pct_ti, init_pct_cr, init_pct_sum) moles_si_remaining = adjustsi_fct(si_pct=init_pct_si) moles_na_remaining = adjustna_fct(na_pct=init_pct_na) # # print("Moles Si/Na Remaining:") # print(moles_si_remaining, moles_na_remaining) norm_pct_sum = (init_pct_fe + init_pct_ca + init_pct_al + moles_na_remaining + init_pct_mg + moles_si_remaining + init_pct_ti + init_pct_cr + init_pct_ni) norm_pct_fe = init_pct_fe / norm_pct_sum norm_pct_ca = init_pct_ca / norm_pct_sum norm_pct_al = init_pct_al / norm_pct_sum norm_pct_na = moles_na_remaining / norm_pct_sum norm_pct_mg = init_pct_mg / norm_pct_sum norm_pct_si = moles_si_remaining / norm_pct_sum norm_pct_ti = init_pct_ti / norm_pct_sum norm_pct_cr = init_pct_cr / norm_pct_sum norm_pct_ni = init_pct_ni / norm_pct_sum check_norm_sum = ( norm_pct_fe + norm_pct_ca + norm_pct_al + norm_pct_na + norm_pct_mg + norm_pct_si + norm_pct_ti + norm_pct_cr + norm_pct_ni) # print("Normalized Cation% After Si/Na Correction:") # print(norm_pct_fe, norm_pct_ca, norm_pct_al, norm_pct_na, norm_pct_mg, norm_pct_si, norm_pct_ti, # norm_pct_cr, norm_pct_ni, norm_pct_sum) wt_feo = ((norm_pct_fe * fe_atwt) * feo_molwt) / (num_feo_cations * fe_atwt) wt_cao = ((norm_pct_ca * ca_atwt) * cao_molwt) / (num_cao_cations * ca_atwt) wt_al2o3 = ((norm_pct_al * al_atwt) * al2o3_molwt) / (num_al2o3_cations * al_atwt) wt_na2o = ((norm_pct_na * na_atwt) * na2o_molwt) / (num_na2o_cations * na_atwt) wt_mgo = ((norm_pct_mg * mg_atwt) * mgo_molwt) / (num_mgo_cations * mg_atwt) wt_sio2 = ((norm_pct_si * si_atwt) * sio2_molwt) / (num_sio2_cations * si_atwt) wt_tio2 = ((norm_pct_ti * ti_atwt) * tio2_molwt) / (num_tio2_cations * ti_atwt) wt_cr2o3 = ((norm_pct_cr * cr_atwt) * cr2o3_molwt) / (num_cr2o3_cations * cr_atwt) wt_nio = ((norm_pct_ni * ni_atwt) * nio_molwt) / (num_nio_cations * ni_atwt) sum_oxwts = (wt_feo + wt_cao + wt_al2o3 + wt_na2o + wt_mgo + wt_sio2 + wt_tio2 + wt_cr2o3 + wt_nio) # print("Wt Oxides:") # print(wt_feo, wt_cao, wt_al2o3, wt_na2o, wt_mgo, wt_sio2, wt_tio2, wt_cr2o3, wt_nio, sum_oxwts) norm_wt_feo = (wt_feo / sum_oxwts) * 100.0 norm_wt_cao = (wt_cao / sum_oxwts) * 100.0 norm_wt_al2o3 = (wt_al2o3 / sum_oxwts) * 100.0 norm_wt_na2o = (wt_na2o / sum_oxwts) * 100.0 norm_wt_mgo = (wt_mgo / sum_oxwts) * 100.0 norm_wt_sio2 = (wt_sio2 / sum_oxwts) * 100.0 norm_wt_tio2 = (wt_tio2 / sum_oxwts) * 100.0 norm_wt_cr2o3 = (wt_cr2o3 / sum_oxwts) * 100.0 norm_wt_nio = (wt_nio / sum_oxwts) * 100.0 norm_wt_sum_check = (norm_wt_feo + norm_wt_cao + norm_wt_al2o3 + norm_wt_na2o + norm_wt_mgo + norm_wt_sio2 + norm_wt_tio2 + norm_wt_cr2o3 + norm_wt_nio) # print(star_name) # print(norm_wt_feo, norm_wt_cao, norm_wt_al2o3, norm_wt_na2o, norm_wt_mgo, norm_wt_sio2, # norm_wt_tio2, norm_wt_cr2o3, norm_wt_nio, norm_wt_sum_check) if (star_name + "_MELTS_{}_INFILE.txt") in os.listdir(os.getcwd()): os.remove(star_name + "_MELTS_{}_INFILE.txt".format(infile_type)) else: pass melts_input_file = open(star_name + "_MELTS_{}_INFILE.txt".format(infile_type), 'w') title = "Title: {}".format(star_name) initfeo = "Initial Composition: FeO {}".format(norm_wt_feo) initcao = "Initial Composition: Cao {}".format(norm_wt_cao) inital2o3 = "Initial Composition: Al2O3 {}".format(norm_wt_al2o3) initna2o = "Initial Composition: Na2O {}".format(norm_wt_na2o) initmgo = "Initial Composition: MgO {}".format(norm_wt_mgo) initsio2 = "Initial Composition: SiO2 {}".format(norm_wt_sio2) inittio2 = "Initial Composition: TiO2 {}".format(norm_wt_tio2) initcr2o3 = "Initial Composition: Cr2O3 {}".format(norm_wt_cr2o3) initnio = "Initial Composition: NiO {}".format(norm_wt_nio) init_temp = 'Initial Temperature: 2000' final_temp = "Final Temperature: 800" inc_temp = "Increment Temperature: -5" init_press = "Initial Pressure: 500" final_press = "Final Pressure: 500" dpdt = "dp/dt: 0" mode = "Mode: Fractionate Solids" mode2 = "Mode: Isobaric" melts_input_file.write( "{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n".format(title, initfeo, initcao, inital2o3, initna2o, initmgo, initsio2, inittio2, initcr2o3, initnio, init_temp, final_temp, inc_temp, init_press, final_press, dpdt, mode, mode2)) chem_outfile.write( "{},{},{},{},{},{},{},{},{},{}\n".format(star_name, norm_wt_feo, norm_wt_cao, norm_wt_al2o3, norm_wt_na2o, norm_wt_mgo, norm_wt_sio2, norm_wt_tio2, norm_wt_cr2o3, norm_wt_nio)) melts_input_file.close() shutil.move((os.getcwd() + "/" + star_name + "_MELTS_{}_INFILE.txt".format(infile_type)), (os.getcwd() + "/{}_MELTS_{}_Input_Files/".format(inputfile_list[0][:-4], infile_type) + star_name + "_MELTS_{}_INFILE.txt".format(infile_type))) infiledir = os.getcwd() + "/{}_MELTS_{}_Input_Files/".format(inputfile_list[0][:-4], infile_type) if library is True: print("[~] MELTS {} Input Files Written!".format(infile_type)) print("[~] MELTS files stored in " + (os.getcwd())) else: pass # print("[~] Launching alphaMELTS for {} Calculations...".format(infile_type)) infiledir = (os.getcwd() + "/{}_MELTS_{}_Input_Files/".format(inputfile_list[0][:-4], infile_type)) print("[~] Launching alphaMELTS for {} Calculations...".format(infile_type)) runmelts_bsp(infile_directory=infiledir, inputfilename=infile) chem_outfile.close() if consol_file is True: file_consolidate(path=infiledir, init_path=init_path) else: file_consolidate(path=infiledir, init_path=init_path) scrapebsp2(infiledirectory=(home_dir[0] + "/{}_Completed_BSP_MELTS_Files".format(infile[:-4])), inputfilename=infile) bsprecalc(bspmeltsfilesdir=(home_dir[0] + "{}_Completed_BSP_MELTS_Files".format(infile[:-4])), infilename=infile, alloy_mass_infile="alloy_mass.csv", bsp_chem_infile="{}_{}_ConsolidatedChemFile.csv".format(infile[:-4], infile_type)) # except: # raise Exception # # print("\nError! There is likely an issue with the formatting of your input file!\n" # # "Please refer to the documentation for more information.\n") # time.sleep(8) # initialization() # sys.exit() def bsprecalc(bspmeltsfilesdir, infilename, alloy_mass_infile, bsp_chem_infile): if "{}_BSP_Composition.csv".format(infilename[:-4]) in os.listdir(home_dir[0]): os.remove(home_dir[0] + "/{}_BSP_Composition.csv".format(infilename[:-4])) bsp_chemfile = open("{}_BSP_Composition.csv".format(infilename[:-4]), 'a') bsp_comp_header = "Star,FeO,Na2O,MgO,Al2O3,SiO2,CaO,TiO2,Cr2O3" bsp_chemfile.write("{}\n".format(bsp_comp_header)) if "bsp_debug.csv" in os.listdir(os.getcwd()): os.remove("bsp_debug.csv") bsp_debug = open("bsp_debug.csv", 'a') if os.path.exists(home_dir[0] + "/MELTS_MORB_Input_Files"): shutil.rmtree(home_dir[0] + "/MELTS_MORB_Input_Files") else: pass os.mkdir(home_dir[0] + "/MELTS_MORB_Input_Files") # need to build in the MELTS file parser to extract alloy info # construct it so that it extracts alloy and chemistry, and the write to file with predictable headers # for i in os.listdir(os.getcwd()): df_chem = pd.read_csv(bsp_chem_infile) df_alloy = pd.read_csv(alloy_mass_infile) for row in df_chem.index: try: # print(df_chem) # print(df_chem.index) star_name = df_chem['Star'][row] feo_in = df_chem['FeO'][row] na2o_in = df_chem['Na2O'][row] mgo_in = df_chem['MgO'][row] al2o3_in = df_chem['Al2O3'][row] sio2_in = df_chem['SiO2'][row] cao_in = df_chem['CaO'][row] nio_in = df_chem['NiO'][row] tio2_in = df_chem['TiO2'][row] cr2o3_in = df_chem['Cr2O3'][row] in1_header = "1,feo,na2o,mgo,al2o3,sio2,cao,nio,tio2,cr2o3" in1 = ",{},{},{},{},{},{},{},{},{}".format(feo_in, na2o_in, mgo_in, al2o3_in, sio2_in, cao_in, nio_in, tio2_in, cr2o3_in) bsp_debug.write("{}\n{}\n".format(in1_header, in1)) for row in df_alloy.index: star_name2 = df_alloy['star'][row] alloy_mass = df_alloy['alloy mass'][row] if star_name == star_name2: feo_moles = feo_in / feo_molwt na2o_moles = na2o_in / na2o_molwt mgo_moles = mgo_in / mgo_molwt al2o3_moles = al2o3_in / al2o3_molwt sio2_moles = sio2_in / sio2_molwt cao_moles = cao_in / cao_molwt nio_moles = nio_in / nio_molwt tio2_moles = tio2_in / tio2_molwt cr2o3_moles = cr2o3_in / cr2o3_molwt in2_header = "2,feo,na2o,mgo,al2o3,sio2,cao,nio,tio2,cr2o3" in2 = ",{},{},{},{},{},{},{},{},{}".format(feo_moles, na2o_moles, mgo_moles, al2o3_moles, sio2_moles, cao_moles, nio_moles, tio2_moles, cr2o3_moles) bsp_debug.write("{}\n{}\n".format(in2_header, in2)) fe_moles = feo_moles * num_feo_cations na_moles = na2o_moles * num_na2o_cations mg_moles = mgo_moles * num_mgo_cations al_moles = al2o3_moles * num_al2o3_cations si_moles = sio2_moles * num_sio2_cations ca_moles = cao_moles * num_cao_cations ni_moles = nio_moles * num_nio_cations ti_moles = tio2_moles * num_tio2_cations cr_moles = cr2o3_moles * num_cr2o3_cations in3_header = "3,fe,na,mg,al,si,ca,ni,ti,cr" in3 = ",{},{},{},{},{},{},{},{},{}".format(fe_moles, na_moles, mg_moles, al_moles, si_moles, ca_moles, ni_moles, ti_moles, cr_moles) bsp_debug.write("{}\n{}\n".format(in3_header, in3)) fe_mass = fe_moles * fe_atwt na_mass = na_moles * na_atwt mg_mass = mg_moles * mg_atwt al_mass = al_moles * al_atwt si_mass = si_moles * si_atwt ca_mass = ca_moles * ca_atwt ni_mass = ni_moles * ni_atwt ti_mass = ti_moles * ti_atwt cr_mass = cr_moles * cr_atwt in4_header = "4,fe,na,mg,al,si,ca,ni,ti,cr" in4 = ",{},{},{},{},{},{},{},{},{}".format(fe_mass, na_mass, mg_mass, al_mass, si_mass, ca_mass, ni_mass, ti_mass, cr_mass) bsp_debug.write("{}\n{}\n".format(in4_header, in4)) alloy_subt_ni_mass = alloy_mass - ni_mass if alloy_subt_ni_mass < 0: print("Ni MASS ERROR!") sys.exit() else: pass new_mass_fe = fe_mass - alloy_subt_ni_mass if new_mass_fe < 0: print("Fe MASS ERROR!") sys.exit() remaining_moles_fe = new_mass_fe / fe_atwt remaining_moles_feo = remaining_moles_fe * num_feo_cations remaining_mass_feo = remaining_moles_feo * feo_molwt in5_header = "5,alloy_but_ni_mass,new_mass_fe,remaining_moles_fe,remaining_moles_feo,remaining_mass_feo" in5 = ",{},{},{},{},{}".format(alloy_subt_ni_mass, new_mass_fe, remaining_moles_fe, remaining_moles_feo, remaining_mass_feo) bsp_debug.write("{}\n{}\n".format(in5_header, in5)) unnormalized_sum = (remaining_mass_feo + na2o_in + mgo_in + al2o3_in + sio2_in + cao_in + tio2_in + cr2o3_in) norm_feo = remaining_mass_feo / unnormalized_sum * 100.0 norm_na2o = na2o_in / unnormalized_sum * 100.0 norm_mgo = mgo_in / unnormalized_sum * 100.0 norm_al2o3 = al2o3_in / unnormalized_sum * 100.0 norm_sio2 = sio2_in / unnormalized_sum * 100.0 norm_cao = cao_in / unnormalized_sum * 100.0 norm_tio2 = tio2_in / unnormalized_sum * 100.0 norm_cr2o3 = cr2o3_in / unnormalized_sum * 100.0 norm_sum = norm_feo + norm_na2o + norm_mgo + norm_al2o3 + norm_sio2 + norm_cao + norm_tio2 + norm_cr2o3 in6_header = "6,feo,na2o,mgo,al2o3,sio2,cao,tio2,cr2o3,unnorm_sum,norm_sum" in6 = ",{},{},{},{},{},{},{},{},{},{}".format(norm_feo, norm_na2o, norm_mgo, norm_al2o3, norm_sio2, norm_cao, norm_tio2, norm_cr2o3, unnormalized_sum, norm_sum) bsp_debug.write("{}\n{}\n".format(in6_header, in6)) bsp_comp = "{},{},{},{},{},{},{},{},{}".format(star_name, norm_feo, norm_na2o, norm_mgo, norm_al2o3, norm_sio2, norm_cao, norm_tio2, norm_cr2o3) bsp_chemfile.write("{}\n".format(bsp_comp)) # print(norm_feo) # print(norm_sum) # # if norm_sum != 100.0: # print("ERROR! NORMALIZED SUM IS NOT 100.0!") # sys.exit() title = "Title: {}".format(star_name) bsp_feo = "Initial Composition: FeO {}".format(norm_feo) bsp_na2o = "Initial Composition: Na2O {}".format(norm_na2o) bsp_mgo = "Initial Composition: MgO {}".format(norm_mgo) bsp_al2o3 = "Initial Composition: Al2O3 {}".format(norm_al2o3) bsp_sio2 = "Initial Composition: SiO2 {}".format(norm_sio2) bsp_cao = "Initial Composition: CaO {}".format(norm_cao) bsp_tio2 = "Initial Composition: TiO2 {}".format(norm_tio2) bsp_cr2o3 = "Initial Composition: Cr2O3 {}".format(norm_cr2o3) init_temp = 'Initial Temperature: 2000' final_temp = "Final Temperature: 800" inc_temp = "Increment Temperature: -5" init_press = "Initial Pressure: 10000" final_press = "Final Pressure: 10000" dpdt = "dp/dt: 0" mode = "Mode: Fractionate Solids" mode2 = "Mode: Isobaric" melts_morb_input_file_vars = "{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}".format( title, bsp_feo, bsp_na2o, bsp_mgo, bsp_al2o3, bsp_sio2, bsp_cao, bsp_tio2, bsp_cr2o3, init_temp, init_temp, final_temp, inc_temp, init_press, final_press, dpdt, mode, mode2) morb_outfile = open("{}_MELTS_{}_INFILE.txt".format(star_name, "MORB"), 'w') morb_outfile.write(melts_morb_input_file_vars) morb_outfile.close() fdir = os.getcwd() + "/{}_MELTS_{}_INFILE.txt".format(star_name, "MORB") tdir = home_dir[0] + "/MELTS_MORB_Input_Files/{}_MELTS_{}_INFILE.txt".format(star_name, "MORB") shutil.move(fdir, tdir) except: pass bsp_debug.close() bsp_chemfile.close() hefestofilewriter_bsp(bulkfile=(home_dir[0] + "/{}_BSP_Composition.csv".format(infilename[:-4])), infilename=infilename) runmelts_morb(infile_directory=(home_dir[0] + "/MELTS_MORB_Input_Files"), inputfilename=infilename[:-4]) def runmelts_morb(infile_directory, inputfilename): if "{}_Completed_MORB_MELTS_Files".format(inputfilename) in os.listdir(os.getcwd()): shutil.rmtree("{}_Completed_MORB_MELTS_Files".format(inputfilename)) os.mkdir("{}_Completed_MORB_MELTS_Files".format(inputfilename)) else: os.mkdir("{}_Completed_MORB_MELTS_Files".format(inputfilename)) for i in os.listdir(infile_directory): os.chdir(home_dir[0]) if "alphaMELTS_tbl.txt" in os.listdir(os.getcwd()): os.remove("alphaMELTS_tbl.txt") else: pass shutil.copy((infile_directory + "/" + i), (home_dir[0] + "/" + i)) print("[~] Running MORB calculations for: {}".format(i[:-20])) p = subprocess.Popen(["run_alphamelts.command", "-f", "MORB_Env_File"], stdin=subprocess.PIPE) t = Timer(300, p.kill) t.start() print("\nTimeout timer started. 300 seconds until the loop continues...\n") p.communicate(input=b"\n".join( [b"1", i, b"8", b"alloy-liquid", b"0", b"x", b"5", b"3", b"+0.4", b"2", b"1400", b"10000", b"10", b"1", b"3", b"1", b"liquid", b"1", b"0.05", b"0", b"10", b"0", b"4", b"0"])) t.cancel() if "alphaMELTS_tbl.txt" in os.listdir(os.getcwd()): oldname = "alphaMELTS_tbl.txt" newname = i[:-20] + "_MORB_OUTPUT" os.rename(oldname, newname) shutil.move(newname, home_dir[0] + "/{}_Completed_MORB_MELTS_Files".format(inputfilename)) os.remove(i) os.chdir(home_dir[0] + "/{}_Completed_MORB_MELTS_Files".format(inputfilename)) csv_file_name = newname + ".csv" with open(newname, 'rb') as infile, open(csv_file_name, 'wb') as outfile: in_txt = csv.reader(infile, delimiter=" ") out_csv = csv.writer(outfile) out_csv.writerows(in_txt) infile.close() outfile.close() os.remove(newname) print("[~] {} MORB calculation processed!".format(i[:-17])) else: print("[X] {} MORB calculation FAILED!".format(i[:-20])) pass if i in home_dir[0]: os.remove(home_dir[0] + "/{}".format(i)) else: pass scrapemorb(infiledirectory=(home_dir[0] + "/{}_Completed_MORB_MELTS_Files".format(inputfilename)), infilename=inputfilename) def scrapebsp2(infiledirectory, inputfilename): if "alloy_mass.csv" in os.listdir(home_dir[0]): os.remove(home_dir[0] + "/alloy_mass.csv") else: pass alloy_mass_outfile = open(home_dir[0] + "/alloy_mass.csv", 'a') alloy_mass_outfile.write("{},{}\n".format("star", "alloy mass")) os.chdir(infiledirectory) for i in os.listdir(os.getcwd()): try: os.chdir(infiledirectory) if enumerate(i, 1) >= 100: alloy_abundance = [] with open(i, 'r') as infile: reader = csv.reader(infile) row1 = next(reader) star_name = row1[1] alloy_abundance.append(star_name) for num, line in enumerate(reader, 1): if "Phase" in line: csv_list = list(reader) alloy_index = csv_list[0].index("alloy-solid_0") for row in csv_list[1:]: if not row == []: a = row[alloy_index] x = str(float(a)) alloy_abundance.append(x) else: break else: pass os.chdir(home_dir[0]) # print(alloy_abundance[1:]) alloy_abundance_nums = [] for z in alloy_abundance[1:]: alloy_abundance_nums.append(float(z)) alloy_abundance_sum = sum(alloy_abundance_nums) print("Alloy abundance for {}: {}".format(alloy_abundance[0], alloy_abundance_sum)) alloy_mass_outfile.write("{},{}\n".format(alloy_abundance[0], alloy_abundance_sum)) except: pass else: pass def hefestofilewriter_bsp(bulkfile, infilename): os.chdir(home_dir[0]) infilename = infilename[:-4] if os.path.exists("{}_BSP_HeFESTo_Input_Files".format(infilename)): shutil.rmtree("{}_BSP_HeFESTo_Input_Files".format(infilename)) else: pass os.mkdir("{}_BSP_HeFESTo_Input_Files".format(infilename)) bulkfile_df = pd.read_csv(bulkfile) for row in bulkfile_df.index: try: star = bulkfile_df["Star"][row] si = bulkfile_df["SiO2"][row] mg = bulkfile_df["MgO"][row] fe = bulkfile_df["FeO"][row] ca = bulkfile_df["CaO"][row] al = bulkfile_df["Al2O3"][row] na = bulkfile_df["Na2O"][row] hefesto_bsp_file = open("{}_BSP_HeFESTo_Infile.txt".format(star), 'a') format_of_file = "0,20,80,1600,0,-2,0\n6,2,4,2\noxides\nSi {} 5.39386 0\nMg {} 2.71075 0\n" \ "Fe {} .79840 0\nCa {} .31431 0\nAl {} .96680 0\n" \ "Na {} .40654 0\n1,1,1\ninv251010\n47\nphase plg\n1\nan\nab\nphase sp\n0\nsp\nhc\n" \ "phase opx\n1\nen\nfs\nmgts\nodi\nphase c2c\n0\nmgc2\nfec2\nphase cpx\n1\ndi\nhe\ncen\ncats\njd\n" \ "phase gt\n0\npy\nal\ngr\nmgmj\njdmj\nphase cpv\n0\ncapv\nphase ol\n1\nfo\nfa\nphase wa\n0\nmgwa\nfewa\n" \ "phase ri\n0\nmgri\nferi\nphase il\n0\nmgil\nfeil\nco\nphase pv\n0\nmgpv\nfepv\nalpv\nphase ppv\n0\nmppv\n" \ "fppv\nappv\nphase cf\n0\nmgcf\nfecf\nnacf\nphase mw\n0\npe\nwu\nphase qtz\n1\nqtz\nphase coes\n0\ncoes\n" \ "phase st\n0\nst\nphase apbo\n0\napbo\nphase ky\n0\nky\nphase neph\n0\nneph".format(si, mg, fe, ca, al, na) hefesto_bsp_file.write(format_of_file) hefesto_bsp_file.close() fdir = home_dir[0] + "/{}".format("{}_BSP_HeFESTo_Infile.txt".format(star)) tdir = home_dir[0] + "/{}/{}".format("{}_BSP_HeFESTo_Input_Files".format(infilename), "{}_BSP_HeFESTo_Infile.txt".format(star)) shutil.move(fdir, tdir) except: pass print("\n[~] BSP HeFESTo input files available in '{}'".format("{}_BSP_HeFESTo_Input_Files".format(infilename))) def hefestofilewriter_morb(bulkfile, infilename): os.chdir(home_dir[0]) if os.path.exists("{}_MORB_HeFESTo_Input_Files".format(infilename)): shutil.rmtree("{}_MORB_HeFESTo_Input_Files".format(infilename)) else: pass os.mkdir("{}_MORB_HeFESTo_Input_Files".format(infilename)) bulkfile_df = pd.read_csv(bulkfile) for row in bulkfile_df.index: try: star = bulkfile_df["Star"][row] si = bulkfile_df["SiO2"][row] mg = bulkfile_df["MgO"][row] fe = bulkfile_df["FeO"][row] ca = bulkfile_df["CaO"][row] al = bulkfile_df["Al2O3"][row] na = bulkfile_df["Na2O"][row] hefesto_morb_file = open("{}_MORB_HeFESTo_Infile.txt".format(star), 'a') format_of_file = "0,20,80,1200,0,-2,0\n6,2,4,2\noxides\nSi {} 5.33159 0\n" \ "Mg {} 1.37685 0\nFe {} .55527 0\n" \ "Ca {} 1.33440 0\nAl {} 1.82602 0\n" \ "Na {} 0.71860 0\n1,1,1\ninv251010\n47\nphase plg\n1\nan\nab\nphase sp\n0\nsp\n" \ "hc\nphase opx\n1\nen\nfs\nmgts\nodi\nphase c2c\n0\nmgc2\nfec2\nphase cpx\n1\ndi\nhe\ncen\ncats\n" \ "jd\nphase gt\n0\npy\nal\ngr\nmgmj\njdmj\nphase cpv\n0\ncapv\nphase ol\n1\nfo\nfa\nphase wa\n0\n" \ "mgwa\nfewa\nphase ri\n0\nmgri\nferi\nphase il\n0\nmgil\nfeil\nco\nphase pv\n0\nmgpv\nfepv\nalpv\n" \ "phase ppv\n0\nmppv\nfppv\nappv\nphase cf\n0\nmgcf\nfecf\nnacf\nphase mw\n0\npe\nwu\nphase qtz\n" \ "1\nqtz\nphase coes\n0\ncoes\nphase st\n0\nst\nphase apbo\n0\napbo\nphase ky\n0\nky\nphase neph\n" \ "0\nneph".format(si, mg, fe, ca, al, na) hefesto_morb_file.write(format_of_file) hefesto_morb_file.close() fdir = home_dir[0] + "/{}".format("{}_MORB_HeFESTo_Infile.txt".format(star)) tdir = home_dir[0] + "/{}/{}".format("{}_MORB_HeFESTo_Input_Files".format(infilename), "{}_MORB_HeFESTo_Infile.txt".format(star)) shutil.move(fdir, tdir) except: pass print("\n[~] Crust HeFESTo input files available in '{}'".format("{}_MORB_HeFESTo_Input_Files".format(infilename))) consol_hefestofolders(infilename=infilename) def consol_hefestofolders(infilename): print('\n[~] Consolidating HeFESTo input file folders...') bsp_folder = "/{}_BSP_HeFESTo_Input_Files".format(infilename) morb_folder = "/{}_MORB_HeFESTo_Input_Files".format(infilename) print("[~] Got HeFESTo BSP folder '{}'".format(bsp_folder)) print("[~] Got HeFESTo Crust folder '{}'".format(morb_folder)) if "{}_HeFESTo_Input_Files".format(infilename) in os.listdir(os.getcwd()): shutil.rmtree("{}_HeFESTo_Input_Files".format(infilename)) else: pass consol_folder = (home_dir[0] + "/{}_HeFESTo_Input_Files".format(infilename)) print("\n[~] Created consolidated HeFESTo input file folder: {}".format(consol_folder)) fdir_bsp = (home_dir[0] + bsp_folder) fdir_morb = (home_dir[0] + morb_folder) tdir_bsp = consol_folder + bsp_folder tdir_morb = consol_folder + morb_folder shutil.move(fdir_bsp, tdir_bsp) shutil.move(fdir_morb, tdir_morb) print("\n[~] HeFESTo Input files are now available in {} for transfer to a HeFESTo VM".format(consol_folder)) print("\n[~] Please move this script and folder '{}' to a working HeFESTo directory!".format(consol_folder)) print("[~] Exiting the Exoplanet Pocketknife's active processes...") time.sleep(6) initialization() def runhefesto(infiledir, actual_run, runname): os.chdir(home_dir[0]) if actual_run is True: # try: if 'main' not in os.listdir(os.getcwd()): print("[X] ERROR! HeFESTo's 'main' not detected in the working directory!\n") time.sleep(4) initialization() else: print("[~] HeFESTo detected in the working directory!\n") pass # os.chdir(home_dir[0]) # print("\nPlease enter the name of your BSP HeFESTo input .csv sheet:") # hefesto_input_bsp = input(">>> ") # if hefesto_input_bsp in os.listdir(os.getcwd()): # print("[~] {} has been found in the working directory!".format(hefesto_input_bsp)) # else: # print("[X] {} has NOT been found in the working directory!".format(hefesto_input_bsp)) # time.sleep(4) # initialization() # print("\nPlease enter the name of your crust HeFESTo input .csv sheet:") # hefesto_input_morb = input(">>> ") # if hefesto_input_morb in os.listdir(os.getcwd()): # print("[~] {} has been found in the working directory!".format(hefesto_input_morb)) # else: # print("[X] {} has NOT been found in the working directory!".format(hefesto_input_morb)) # time.sleep(4) # initialization() # # if os.path.exists("HeFESTo_BSP_Input_Files"): # shutil.rmtree("HeFESTo_BSP_Input_Files") # else: # pass # if os.path.exists("HeFESTo_MORB_Input_Files"): # shutil.rmtree("HeFESTo_MORB_Input_Files") # else: # pass # # os.mkdir("HeFESTo_BSP_Input_Files") if os.path.exists(home_dir[0] + "/{}_HeFESTo_BSP_Output_Files".format(runname)): shutil.rmtree(home_dir[0] + "/{}_HeFESTo_BSP_Output_Files".format(runname)) else: pass if os.path.exists(home_dir[0] + "/{}_HeFESTo_MORB_Output_Files".format(runname)): shutil.rmtree(home_dir[0] + "/{}_HeFESTo_MORB_Output_Files".format(runname)) else: pass os.mkdir(home_dir[0] + "/{}_HeFESTo_BSP_Output_Files".format(runname)) os.mkdir(home_dir[0] + "/{}_HeFESTo_BSP_Output_Files/fort.66".format(runname)) os.mkdir(home_dir[0] + "/{}_HeFESTo_BSP_Output_Files/fort.58".format(runname)) os.mkdir(home_dir[0] + "/{}_HeFESTo_BSP_Output_Files/fort.59".format(runname)) os.mkdir(home_dir[0] + "/{}_HeFESTo_MORB_Output_Files".format(runname)) os.mkdir(home_dir[0] + "/{}_HeFESTo_MORB_Output_Files/fort.66".format(runname)) os.mkdir(home_dir[0] + "/{}_HeFESTo_MORB_Output_Files/fort.58".format(runname)) os.mkdir(home_dir[0] + "/{}_HeFESTo_MORB_Output_Files/fort.59".format(runname)) bsp_dir = [] morb_dir = [] os.chdir(infiledir) for i in os.listdir(os.getcwd()): if "BSP" in i or "bsp" in i: print("[~] Found BSP directory: {}".format(i)) bsp_dir.append(i) elif "MORB" in i or "morb" in i: print("[~] Found MORB directory: {}".format(i)) morb_dir.append(i) # else: # print("\n[X] HeFESTo cumulative input directory not properly formatted!") # initialization() if len(bsp_dir) > 1 or len(morb_dir) > 1: print("\n[X] HeFESTo cumulative input directory not properly formatted!") time.sleep(2) initialization() bsp_dir = home_dir[0] + "/{}/{}".format(infiledir, bsp_dir[0]) morb_dir = home_dir[0] + "/{}/{}".format(infiledir, morb_dir[0]) print("\b[~] Initiating HeFESTo BSP calculations...") for i in os.listdir(bsp_dir): star_name = i[:-23] os.chdir(home_dir[0]) if "fort.66" in os.listdir(os.getcwd()): try: os.remove("fort.66") except: pass try: shutil.rmtree("fort.66") except: pass else: pass if "fort.58" in os.listdir(os.getcwd()): try: os.remove("fort.58") except: pass try: shutil.rmtree("fort.58") except: pass else: pass if "fort.59" in os.listdir(os.getcwd()): try: os.remove("fort.59") except: pass try: shutil.rmtree("fort.59") except: pass else: pass if "control" in os.listdir(os.getcwd()): try: os.remove("control") except: pass try: shutil.rmtree("control") except: pass else: pass os.chdir(bsp_dir) shutil.copy((bsp_dir + "/{}".format(i)), (home_dir[0] + "/{}".format("control"))) print("\n[~] Performing HeFESTo BSP calculations on: {}".format(i)) os.chdir(home_dir[0]) argz = (home_dir[0] + "/main") p = subprocess.Popen(argz, stdin=None, stdout=None) t = Timer(800, p.kill) t.start() p.communicate() t.cancel() if "fort.66" in os.listdir(os.getcwd()): print("\n[~] 'fort.66' found!") shutil.move("fort.66", (home_dir[0] + "/{}_HeFESTo_BSP_Output_Files/fort.66/{}".format(runname, star_name + "_fort66"))) if "fort.58" in os.listdir(os.getcwd()): print("\n[~] 'fort.58' found!") shutil.move("fort.58", (home_dir[0] + "/{}_HeFESTo_BSP_Output_Files/fort.58/{}".format(runname, star_name + "_fort58"))) if "fort.59" in os.listdir(os.getcwd()): print("\n[~] 'fort.59' found!") shutil.move("fort.59", (home_dir[0] + "/{}_HeFESTo_BSP_Output_Files/fort.59/{}".format(runname, star_name + "_fort59"))) if "control" in os.listdir(os.getcwd()): os.remove("control") time.sleep(2) print("\b[~] Initiating HeFESTo crust calculations...") for i in os.listdir(morb_dir): star_name = i[:-24] os.chdir(home_dir[0]) if "fort.66" in os.listdir(home_dir[0]): os.remove(home_dir[0] + "/fort.66") if "fort.58" in os.listdir(home_dir[0]): os.remove(home_dir[0] + "/fort.58") if "fort.59" in os.listdir(home_dir[0]): os.remove(home_dir[0] + "/fort.59") if "control" in os.listdir(home_dir[0]): os.remove(home_dir[0] + "/control") os.chdir(morb_dir) shutil.copy((morb_dir + "/{}".format(i)), (home_dir[0] + "/{}".format("control"))) print("\n[~] Performing HeFESTo crust calculations on: {}".format(i)) os.chdir(home_dir[0]) argz = (home_dir[0] + "/main") p = subprocess.Popen(argz, stdin=None, stdout=None) t = Timer(800, p.kill) t.start() p.communicate() t.cancel() try: if "fort.66" in os.listdir(home_dir[0]): print("\n[~] 'fort.66; found!") shutil.move(home_dir[0] + "/fort.66", (home_dir[0] + "/{}_HeFESTo_MORB_Output_Files/fort.66/{}".format(runname, star_name + "_fort66"))) if "fort.58" in os.listdir(home_dir[0]): print("\n[~] 'fort.58' found!") shutil.move(home_dir[0] + "/fort.58", (home_dir[0] + "/{}_HeFESTo_MORB_Output_Files/fort.58/{}".format(runname, star_name + "_fort58"))) if "fort.59" in os.listdir(home_dir[0]): print("\n[~] 'fort.59 found!") shutil.move(home_dir[0] + "/fort.59", (home_dir[0] + "/{}_HeFESTo_MORB_Output_Files/fort.59/{}".format(runname, star_name + "_fort59"))) if "control" in os.listdir(home_dir[0]): os.remove(home_dir[0] + "/control") except: pass os.chdir(home_dir[0]) if "fort.66" in os.listdir(os.getcwd()): os.remove("fort.66") if "fort.58" in os.listdir(os.getcwd()): os.remove("fort.58") if "fort.66" in os.listdir(os.getcwd()): os.remove("fort.69") if "control" in os.listdir(os.getcwd()): os.remove("control") if os.path.exists("{}_HeFESTo_Output_Files".format(runname)): shutil.rmtree("{}_HeFESTo_Output_Files".format(runname)) os.mkdir("{}_HeFESTo_Output_Files".format(runname)) shutil.move(home_dir[0] + "/{}_HeFESTo_BSP_Output_Files".format(runname), home_dir[0] + "/{}_HeFESTo_Output_Files".format(runname)) shutil.move(home_dir[0] + "/{}_HeFESTo_MORB_Output_Files".format(runname), home_dir[0] + "/{}_HeFESTo_Output_Files".format(runname)) print("\n[~] HeFESTo Output Files available at '{}'".format(home_dir[0] + "/{}_HeFESTo_Output_Files".format(runname))) print("\n[~] Finished with HeFESTo calculations!") # bsp_infile_init = (home_dir[0] + "/{}".format(hefesto_input_bsp)) # bsp_infile_to = (home_dir[0] + "/HeFESTo_BSP_Input_Files/{}".format(hefesto_input_bsp)) # morb_infile_init = (home_dir[0] + "/{}".format(hefesto_input_morb)) # morb_infile_to = (home_dir[0] + "/HeFESTo_MORB_Input_Files/{}".format(hefesto_input_morb)) # shutil.copy(bsp_infile_init, bsp_infile_to) # shutil.copy(morb_infile_init, morb_infile_to) # os.chdir(bsp_dir) # with open(hefesto_input_bsp, 'r') as infile: # reader = csv.reader(infile, delimiter=",") # for row in reader: # list_formatted = [] # for z in row: # list_formatted.append(z) # title = list_formatted[0].strip() # output_file = open("{}_HeFESTo_BSP_nput.txt".format(title), 'a') # for z in list_formatted[1:]: # output_file.write("{}\n".format(z)) # output_file.close() # # os.chdir(home_dir[0] + "/HeFESTo_MORB_Input_Files") # with open(hefesto_input_morb, 'r') as infile: # reader = csv.reader(infile, delimiter=",") # for row in reader: # list_formatted = [] # for z in row: # list_formatted.append(z) # title = list_formatted[0].strip() # output_file = open("{}_HeFESTo_MORB_Input.txt".format(title), 'a') # for z in list_formatted[1:]: # output_file.write("{}\n".format(z)) # output_file.close() # print("[~] HeFESTo files written!\n" # "Please see {} for your files!\n".format(os.getcwd())) # except: # pass # os.chdir(home_dir[0] + "/HeFESTo_BSP_Input_Files") # print("[~] Launching HeFESTo simulations...") # # curr_planet = "" # # for i in os.listdir(os.getcwd()): # # curr_planet.update(i) # # print("[~] Currently simulating BSP for: {}".format(curr_planet.get())) # # # # else: # try: # if os.path.exists(home_dir[0] + "/HeFESTo_Inputs"): # shutil.rmtree(home_dir[0] + "/HeFESTo_Inputs") # else: # pass # os.mkdir(home_dir[0] + "/HeFESTo_Inputs") # os.chdir(home_dir[0]) # print("\nPlease enter the name of your HeFESTo input .csv sheet:") # hefesto_input = input(">>> ") # if hefesto_input in os.listdir(os.getcwd()): # print("[~] {} has been found in the working directory!".format(hefesto_input)) # else: # print("[X] {} has NOT been found in the working directory!".format(hefesto_input)) # time.sleep(4) # initialization() # # infile_init = (home_dir[0] + "/{}".format(hefesto_input)) # infile_to = (home_dir[0] + "/HeFESTo_Inputs/{}".format(hefesto_input)) # shutil.copy(infile_init, infile_to) # # os.chdir(home_dir[0] + "/HeFESTo_Inputs") # with open(hefesto_input, 'r') as infile: # reader = csv.reader(infile, delimiter=",") # for row in reader: # list_formatted = [] # for z in row: # list_formatted.append(z) # title = list_formatted[0].strip() # output_file = open("{}_HeFESTo_Input.txt".format(title), 'a') # for z in list_formatted[1:]: # output_file.write("{}\n".format(z)) # # if z.isalpha() == True: # # output_file.write("{}\n".format(z)) # # else: # # output_file.write("{}\n".format(z)) # output_file.close() # print("[~] HeFESTo files written!\n" # "Please see {} for your files!\n".format(os.getcwd())) # except: # pass def scrapemorb(infiledirectory, infilename): if "{}_MORB_Consolidated_Chem_File".format(infilename) in os.listdir(home_dir[0]): os.remove(home_dir[0] + "/{}_MORB_Consolidated_Chem_File".format(infilename)) else: pass morb_outfile = open((home_dir[0] + "/{}_MORB_Consolidated_Chem_File".format(infilename)), 'a') # need a header morb_outfile_header = "Star Name,Pressure,Temperature,mass,SiO2,TiO2,Al2O3,Fe2O3,Cr2O3,FeO,MgO,CaO,Na2O\n" morb_outfile.write(morb_outfile_header) for i in os.listdir(infiledirectory): try: print("\n[~] Scraping MORB output file: {}".format(i)) os.chdir(infiledirectory) with open(i, 'r') as infile: star_name = [] data = [] reader = csv.reader(infile, delimiter=',') reader2 = list(reader) star_name.append(reader2[0][1]) if enumerate(i, 1) >= 100: for num, line in enumerate(reader2, 1): if "Liquid" in line: skip_row2 = num + 1 liquid_comp = reader2[skip_row2] for item in liquid_comp: data.append(item) else: pass data_formatted = ",".join(str(z) for z in data) os.chdir(home_dir[0]) morb_outfile.write("{},{}\n".format(star_name[0], data_formatted)) else: os.chdir(home_dir[0]) morb_outfile.write("{},ERROR!\n".format(star_name[0])) except: pass morb_outfile.close() os.chdir(home_dir[0]) consol_file = (home_dir[0] + "/{}_MORB_Consolidated_Chem_File".format(infilename)) morbrecalc(infiledirectory=infiledirectory, infilename=infilename, bulkfilename=consol_file) def morbrecalc(infiledirectory, infilename, bulkfilename): os.chdir(home_dir[0]) if "{}_MORB_Recalc_Bulkfile.csv".format(infilename) in os.listdir(os.getcwd()): os.remove("{}_MORB_Recalc_Bulkfile.csv".format(infilename)) else: pass if "morb_debug.csv" in os.listdir(os.getcwd()): os.remove("morb_debug.csv") morb_debug = open("morb_debug.csv", 'a') morb_recalc_outfile = open("{}_MORB_Recalc_Bulkfile.csv".format(infilename), 'a') morb_recalc_outfile_header = "Star,Pressure,Temperature,Mass,SiO2,TiO2,Al2O3,Cr2O3,FeO,MgO,CaO,Na2O,SUM\n" morb_recalc_outfile.write(morb_recalc_outfile_header) df_morb_chem = pd.read_csv(bulkfilename) for row in df_morb_chem.index: try: star_name = df_morb_chem["Star Name"][row] pressure = float(df_morb_chem["Pressure"][row]) temperature = float(df_morb_chem["Temperature"][row]) mass = float(df_morb_chem["mass"][row]) sio2_in = float(df_morb_chem["SiO2"][row]) tio2_in = float(df_morb_chem["TiO2"][row]) al2o3_in = float(df_morb_chem["Al2O3"][row]) fe2o3_in = float(df_morb_chem["Fe2O3"][row]) cr2o3_in = float(df_morb_chem["Cr2O3"][row]) feo_in = float(df_morb_chem["FeO"][row]) mgo_in = float(df_morb_chem["MgO"][row]) cao_in = float(df_morb_chem["CaO"][row]) na2o_in = float(df_morb_chem["Na2O"][row]) chem_in_sum = (sio2_in + tio2_in + al2o3_in + fe2o3_in + cr2o3_in + feo_in + mgo_in + cao_in + na2o_in) md1_header = "1,sio2,tio2,al2o3,fe2o3,cr2o3,cr2o3,feo,mgo,cao,na2o" md1 = ",{},{},{},{},{},{},{},{},{}".format(sio2_in, tio2_in, al2o3_in, fe2o3_in, cr2o3_in, feo_in, mgo_in, cao_in, na2o_in) morb_debug.write("{}\n{}\n".format(md1_header, md1)) wt_sio2_in = (sio2_in/100.0) * mass wt_tio2_in = (tio2_in / 100.0) * mass wt_al2o3_in = (al2o3_in / 100.0) * mass wt_fe2o3_in = (fe2o3_in / 100.0) * mass wt_cr2o3_in = (cr2o3_in / 100.0) * mass wt_feo_in = (feo_in / 100.0) * mass wt_mgo_in = (mgo_in / 100.0) * mass wt_cao_in = (cao_in / 100.0) * mass wt_na2o_in = (na2o_in / 100.0) * mass sum_wt_in = (wt_sio2_in + wt_tio2_in + wt_al2o3_in + wt_fe2o3_in + wt_cr2o3_in + wt_feo_in + wt_mgo_in + wt_cao_in + wt_na2o_in) md2_header = "2,sio2,tio2,al2o3,fe2o3,cr2o3,feo,mgo,cao,na2o" md2 = ",{},{},{},{},{},{},{},{},{}".format(wt_sio2_in, wt_tio2_in, wt_al2o3_in, wt_fe2o3_in, wt_cr2o3_in, wt_feo_in, wt_mgo_in, wt_cao_in, wt_na2o_in) morb_debug.write("{}\n{}\n".format(md2_header, md2)) sio2_moles = wt_sio2_in / sio2_molwt tio2_moles = wt_tio2_in / tio2_molwt al2o3_moles = wt_al2o3_in / al2o3_molwt fe2o3_moles = wt_fe2o3_in / fe2o3_molwt cr2o3_moles = wt_cr2o3_in / cr2o3_molwt feo_moles = wt_feo_in / feo_molwt mgo_moles = wt_mgo_in / mgo_molwt cao_moles = wt_cao_in / cao_molwt na2o_moles = wt_na2o_in / na2o_molwt sum_oxide_moles = (sio2_moles + tio2_moles + al2o3_moles + fe2o3_moles + cr2o3_moles + feo_moles + mgo_moles + cao_moles + na2o_moles) md3_header = "3,sio2,tio2,al2o3,fe2o3,feo,mgo,cao,na2o" md3 = ",{},{},{},{},{},{},{},{},{}".format(sio2_moles, tio2_moles, al2o3_moles, fe2o3_moles, cr2o3_moles, feo_moles, mgo_moles, cao_moles, na2o_moles) morb_debug.write("{}\n{}\n".format(md3_header, md3)) si_cations = sio2_moles * num_sio2_cations ti_cations = tio2_moles * num_tio2_cations al_cations = al2o3_moles * num_al2o3_cations fe_fe2o3_cations = fe2o3_moles * num_fe2o3_cations cr_cations = cr2o3_moles * num_cr2o3_cations fe_feo_cations = feo_moles * num_feo_cations mg_cations = mgo_moles * num_mgo_cations ca_cations = cao_moles * num_cao_cations na_cations = na2o_moles * num_na2o_cations sum_cations = (si_cations + ti_cations + al_cations + fe_fe2o3_cations + cr_cations + fe_feo_cations + mg_cations + ca_cations + na_cations) md4_header = "4,si,ti,al,fe,cr,fe,mg,ca,na,sum" md4 = ",{},{},{},{},{},{},{},{},{},{}".format(si_cations, ti_cations, al_cations, fe_fe2o3_cations, cr_cations, fe_feo_cations, mg_cations, na_cations, na_cations, sum_cations) morb_debug.write("{}\n{}\n".format(md4_header, md4)) # fe2o3 --> feo recalc total_mol_fe = (fe_feo_cations + fe_fe2o3_cations) total_wt_fe = total_mol_fe * fe_atwt total_wt_feo = total_mol_fe * feo_molwt md5_header = "5,total_mol_fe,total_wt_fe,total_wt_feo" md5 = ",{},{},{}".format(total_mol_fe, total_wt_fe, total_wt_feo) morb_debug.write("{}\n{}\n".format(md5_header, md5)) # unnormalized wt% unnorm_sum = (wt_sio2_in + wt_tio2_in + wt_al2o3_in + total_wt_feo + wt_cr2o3_in + wt_mgo_in + wt_cao_in + wt_na2o_in) # normalized oxide wt% w/o mgo fix norm_wt_sio2 = wt_sio2_in / unnorm_sum norm_wt_tio2 = wt_tio2_in / unnorm_sum norm_wt_al2o3 = wt_al2o3_in / unnorm_sum norm_wt_feo = total_wt_feo / unnorm_sum norm_wt_cr2o3 = wt_cr2o3_in / unnorm_sum norm_wt_mgo = wt_mgo_in / unnorm_sum norm_wt_cao = wt_cao_in / unnorm_sum norm_wt_na2o = wt_na2o_in / unnorm_sum norm_sum_nomgofix = (norm_wt_sio2 + norm_wt_tio2 + norm_wt_al2o3 + norm_wt_feo + norm_wt_cr2o3 + norm_wt_mgo + norm_wt_cao + norm_wt_na2o) md6_header = "6,sio2,tio2,al2o3,feo,cr2o3,mgo,cao,na2o,sum" md6 = ",{},{},{},{},{},{},{},{},{}".format(norm_wt_sio2, norm_wt_tio2, norm_wt_al2o3, norm_wt_feo, norm_wt_cr2o3, norm_wt_mgo, norm_wt_cao, norm_wt_na2o, norm_sum_nomgofix) morb_debug.write("{}\n{}\n".format(md6_header, md6)) # mgo fix norm_wt_mgo_fix = norm_wt_mgo * mgo_fix norm_sum_mgofix = (norm_wt_sio2 + norm_wt_tio2 + norm_wt_al2o3 + norm_wt_feo + norm_wt_cr2o3 + norm_wt_mgo_fix + norm_wt_cao + norm_wt_na2o) md7_header = "7,mgo_fix,norm_wt_mgo_fx,norm_sum_mgofix" md7 = ",{},{},{}".format(mgo_fix, norm_wt_mgo_fix, norm_sum_mgofix) morb_debug.write("{}\n{}\n".format(md7_header, md7)) # normaized oxide wt% abundances --- what we want! sio2_wtpct = (norm_wt_sio2 / norm_sum_mgofix) * 100 tio2_wtpct = (norm_wt_tio2 / norm_sum_mgofix) * 100 al2o3_wtpct = (norm_wt_al2o3 / norm_sum_mgofix) * 100 feo_wtpct = (norm_wt_feo / norm_sum_mgofix) * 100 cr2o3_wtpct = (norm_wt_cr2o3 / norm_sum_mgofix) * 100 mgo_wtpct = (norm_wt_mgo_fix / norm_sum_mgofix) * 100 cao_wtpct = (norm_wt_cao / norm_sum_mgofix) * 100 na2o_wtpct = (norm_wt_na2o / norm_sum_mgofix) * 100 sum_wtpct = (sio2_wtpct + tio2_wtpct + al2o3_wtpct + feo_wtpct + cr2o3_wtpct + mgo_wtpct + cao_wtpct + na2o_wtpct) md8_header = "8,sio2,tio2,al2o3,feo,cr2o3,mgo,cao,na2o,sum" md8 = ",{},{},{},{},{},{},{},{},{}".format(sio2_wtpct, tio2_wtpct, al2o3_wtpct, feo_wtpct, cr2o3_wtpct, mgo_wtpct, cao_wtpct, na2o_wtpct, sum_wtpct) morb_debug.write("{}\n{}\n".format(md8_header, md8)) chem_to_outfile = "{},{},{},{},{},{},{},{},{},{},{},{},{}\n".format(star_name, pressure, temperature, mass, sio2_wtpct, tio2_wtpct, al2o3_wtpct, cr2o3_wtpct, feo_wtpct, mgo_wtpct, cao_wtpct, na2o_wtpct, sum_wtpct) morb_recalc_outfile.write(chem_to_outfile) except: pass morb_debug.close() morb_recalc_outfile.close() hefestofilewriter_morb(bulkfile="{}_MORB_Recalc_Bulkfile.csv".format(infilename), infilename=infilename) def integrationloop2(hefestodir, runname): # standard_depths = [] # # model_sun_bsp_rho = [3.1399, 3.16644, 3.21129, 3.21993, 3.22843, 3.23679, 3.24503, 3.25316, 3.26117, 3.26909, 3.28169, 3.29415, # 3.30499, 3.31476, 3.3238, 3.33232, 3.34046, 3.34832, 3.35595, 3.3634, 3.3707, 3.37788, 3.38495, 3.39193, # 3.39884, 3.40567, 3.41244, 3.41916, 3.42582, 3.43244, 3.43902, 3.44557, 3.45208, 3.45857, 3.46504, 3.47149, # 3.47794, 3.48438, 3.49083, 3.4973, 3.50379, 3.51032, 3.51783, 3.52856, 3.5352, 3.54193, 3.54876, 3.55574, # 3.56291, 3.57035, 3.57813, 3.58638, 3.59525, 3.60495, 3.61577, 3.69282, 3.7338, 3.74885, 3.75742, 3.76575, # 3.77393, 3.78203, 3.79015, 3.79837, 3.80676, 3.81424, 3.81873, 3.82321, 3.82768, 3.83213, 3.83656, 3.84098, # 3.84538, 3.84977, 3.85831, 3.87594, 3.89625, 3.90832, 3.91254, 3.91675, 3.92094] # # model_sun_crust_rho = [2.89708, 2.92792, 2.94455, 3.04297, 3.17487, 3.19574, 3.25329, 3.36196, 3.37489, # 3.38665, 3.39781, 3.40855, 3.43322, 3.4435, 3.45364, 3.46287, 3.47109, 3.47896, 3.4865, # 3.49376, 3.50079, 3.50761, 3.51426, 3.52077, 3.52715, 3.53344, 3.53963, 3.54574, 3.55179, # 3.55777, 3.56371, 3.5696, 3.57545, 3.58126, 3.58704, 3.59279, 3.66547, 3.67112, 3.67676, # 3.68238, 3.68799, 3.69359, 3.69919, 3.70479, 3.71039, 3.71601, 3.72163, 3.72728, 3.73294, # 3.73864, 3.74438, 3.75015, 3.75598, 3.76188, 3.76784, 3.77389, 3.78003, 3.78629, 3.79267, # 3.79921, 3.80591, 3.8128, 3.81991, 3.82728, 3.83492, 3.84288, 3.85119, 3.85991, 3.86906, # 3.8787, 3.88887, 3.89961, 3.91094, 3.9229, 3.9355, 3.94971, 3.97115, 3.99127, 4.01053, # 4.02931, 4.04793] # # model_sun_delta_rho = [a - b for a, b in zip(model_sun_crust_rho, model_sun_bsp_rho)] # # lit_sun_bsp_rho = [] # # lit_sun_crust_rho = [2.96748, 2.98934, 3.02871, 3.12504, 3.2649, 3.32414, 3.40401, 3.41811, 3.43281, 3.44608, # 3.45855, 3.47031, 3.5037, 3.51281, 3.52141, 3.52955, 3.5373, 3.54472, 3.55187, 3.55881, 3.56557, # 3.57218, 3.57866, 3.58505, 3.59134, 3.59757, 3.60373, 3.60984, 3.6159, 3.62192, 3.62791, # 3.63387, 3.6398, 3.64571, 3.6516, 3.65749, 3.75811, 3.7639, 3.7697, 3.77549, 3.7813, 3.78712, # 3.79296, 3.79882, 3.80472, 3.81065, 3.81662, 3.82265, 3.82874, 3.8349, 3.84114, 3.84747, 3.85391, # 3.86047, 3.86718, 3.87404, 3.88108, 3.88832, 3.89579, 3.90353, 3.91157, 3.91994, 3.92868, 3.93784, # 3.94746, 3.95758, 3.96823, 3.97945, 3.99123, 4.00355, 4.01639, 4.02969, 4.04339, 4.05801, # 4.07212, 4.08535, 4.09777, 4.10947, 4.1205, 4.13093, 4.14081] print("\n") hefesto_dir = home_dir[0] + "/" + hefestodir output_folder = home_dir[0] + "/{}_Buoyancy_Outputs".format(runname) if os.path.exists(output_folder): shutil.rmtree(output_folder) else: pass os.mkdir(output_folder) bsp_and_morb_dir = [] # BSP dir at index 0, MORB dir at index 1 for i in os.listdir(hefesto_dir): if "BSP" in str(i): bsp_and_morb_dir.append(str(hefesto_dir + "/" + i + "/fort.58")) elif "MORB" in str(i): bsp_and_morb_dir.append(str(hefesto_dir + "/" + i + "/fort.58")) if len(bsp_and_morb_dir) != 2: print("\n[X] The directory '{}' is not formatted properly!".format(hefesto_dir)) time.sleep(2) initialization() else: print("\n[~] Found BSP HeFESTo File directory: '{}'!".format(bsp_and_morb_dir[0])) print("[~] Found MORB HeFESTo File directory: '{}'!".format(bsp_and_morb_dir[1])) if "{}_Integrated_Values.csv".format(runname) in os.listdir(home_dir[0]): os.remove("{}_Integrated_Values.csv".format(runname)) integrated_output_file = open("{}_Integrated_Values.csv".format(runname), 'a') integrated_output_file.write("Star,Net Buoyant Force,{}".format(",".join(str(i) for i in depth_trans_zone))) print("\n[~] Initiating HeFESTo output file parsing...") # planet_grav = (6.674*10**-11) * (planet_mass / planet_radius**2) for i in os.listdir(bsp_and_morb_dir[0]): star_name = i.replace("fort.58.control.", "").replace("_fort.58", "").replace("_bsp.txt_bsp", "").replace("fort.58_", "").replace("_fort58", "") try: for z in os.listdir(bsp_and_morb_dir[1]): starname_morb = z.replace("fort.58.control.", "").replace("fort.58_", "").replace("_morb.txt_morb", "").replace("_fort.58", "").replace("_fort58", "") if star_name ==starname_morb: print("\n\n[~] Matched BSP and MORB files for star: {}".format(star_name)) os.chdir(bsp_and_morb_dir[0]) with open(i, 'r') as bsp_infile: os.chdir(bsp_and_morb_dir[1]) with open(z, 'r') as morb_infile: bsp_readfile = pd.read_fwf(bsp_infile, colspecs='infer') morb_readfile = pd.read_fwf(morb_infile, colspecs='infer') bsp_df = bsp_readfile.iloc[:, [1, 3]] morb_df = morb_readfile.iloc[:, [1, 3]] depths = [] bsp_rho = [] morb_rho = [] morb_minus_bsp_rho = [] integrated_values = [] for y in bsp_df['depth']: depths.append(float(y)) for y in bsp_df['rho']: bsp_rho.append(float(y)) for y in morb_df['rho']: morb_rho.append(float(y)) bsp_infile.close() morb_infile.close() cur_index = 0 for q in morb_rho: corresponding_bsp = bsp_rho[cur_index] morb_minus_bsp_rho.append(corresponding_bsp - q) # morb_minus_bsp_rho.append(q - corresponding_bsp) cur_index += 1 # print("\nDEPTHS") # print(depths) # print("\nBSPRHO") # print(bsp_rho) # print("\nMORBRHO") # print(morb_rho) # print("\nDELTARHO") # print(morb_minus_bsp_rho) for t in range(len(morb_minus_bsp_rho) - 1): x = depths[:(t + 2)] y = morb_minus_bsp_rho[:(t + 2)] # integrated_values.append(inte.simps(y, x)) integrated_values.append((inte.simps(y, x) * 1000 * 1000 * plate_thickness * gravity)) # Multiply by 1000 to account for g/cm^3 -> kg/m^3, and by 1000 again for depth km -> m. # print("\nINTEVALS") # print(integrated_values) print("[~] Calculated a net buoyancy force of {} for star {}!".format(integrated_values[-1], star_name)) os.chdir(home_dir[0]) integrated_vals_formatted = ",".join(str(i) for i in integrated_values) integrated_output_file.write("\n{},{},{}".format(star_name, str(integrated_values[-1]), integrated_vals_formatted)) except: integrated_output_file.write("\n{},{}".format(star_name, "FAILURE")) print("[X] Failed to calculate a net buoyancy force for star {}!".format(star_name)) integrated_output_file.close() print("\n[~] Net buoyant force output file '{}' available in '{}'!".format("{}_Integrated_Values.csv".format(runname), home_dir[0])) def visualize_outputs(integrated_output_file, runname): os.chdir(home_dir[0]) print("\n[~] Preparing to plot integrated buoyancy force results...") if os.path.exists("{}_Buoyancy_Force_Graphs".format(runname)): shutil.rmtree("{}_Buoyancy_Force_Graphs".format(runname)) os.mkdir("{}_Buoyancy_Force_Graphs".format(runname)) loop_num = 1 integrated_output_file_df = pd.read_csv(integrated_output_file) for row in integrated_output_file_df.index: try: integrated_buoyant_vals = [] star_name = integrated_output_file_df['Star'][row] print("\n[~] Plotting integrated buoyancy force results for star: {}".format(star_name)) if "{}.png".format(star_name) in os.listdir(home_dir[0]): os.remove(home_dir[0] + "/{}_Buoyancy_Force_Graphs/{}.png".format(runname, star_name)) buoyant_force = integrated_output_file_df['Net Buoyant Force'][row] with open(integrated_output_file, 'r') as inte_output: reader = csv.reader(inte_output) for i, row in enumerate(reader): if i == loop_num: for z in row[2:]: integrated_buoyant_vals.append(float(z)) loop_num += 1 inte_output.close() plt.plot(depth_trans_zone[1:], integrated_buoyant_vals) plt.title("{} Net Buoyant Forces".format(star_name)) plt.xlabel("Depth (km)") plt.ylabel("Buoyant Force (N/m)") plt.xlim(0, 574) plt.grid() plt.savefig("{}.png".format(star_name), format='png') plt.close() fdir = home_dir[0] + "/{}.png".format(star_name) tdir = home_dir[0] + "/{}_Buoyancy_Force_Graphs/{}.png".format(runname, star_name) shutil.move(fdir, tdir) print("[~] Buoyant force plot for star {} available in directory '{}'!".format(star_name, tdir)) except: print("[X] Failed to build a plot for star {}!".format(star_name)) print("\n[~] Thank you for using the Exoplanet Pocketknife!\n[~] Returning to main menu...") time.sleep(2) initialization() def decideplot(): print("\n[~] Would you like to graph the integrated buoyancy force results?\nPlease enter 'y' or 'n' for 'yes' or 'no', respectively") plot_input = raw_input(">>> ") if plot_input == 'y': visualize_outputs(integrated_output_file="{}_Integrated_Values.csv".format(runname), runname=runname) elif plot_input == 'n': print("\n[~] Thank you for using the Exoplanet Pocketknife!\nReturning to the main menu...") time.sleep(2) initialization() else: print("\n[X] Oops! That's not a valid command!") time.sleep(2) decideplot() decideplot() def initialization(): home_dir.append(os.getcwd()) # integrationloop2() createbspenvfile() createmorbenvfile() print("\n_______________________________________________\n\n\n\n\n\n\n\n\n\n") print("\n\n\nThe Exoplanet Pocketknife\nScott D. Hull, The Ohio State University 2015-2017\n") print("This software is meant to work in conjunction with the methods described in 'The Prevalence of" " Exoplanetary Plate Tectonics' (Unterborn et. al 2017).\nPlease refer to the article and " "the documentation for more information.\n" "\n*Any use of this software or the methods described in Unterborn et al. 2017 requires proper" " citation.*\n\n") # if "Star2Oxide_Output.csv" in os.listdir(os.getcwd()): # os.remove("Star2Oxide_Output.csv") # else: # pass # outputfile = open("Star2Oxide_Output.csv", 'a') # time.sleep(1) print("Enter:\n" "'1' to raw_input [X/H] stellar abundances\n" "'2' to raw_input stellar mole abundances\n" "'3' to launch HeFESTo calculations\n" "'4' to perform buoyancy force calculations & visualize\n" "'o' for more options\n" "'e' to exit the Exoplanet Pocketknife\n") option1 = str(raw_input(">>> ")) if option1 == '1': if "run_alphamelts.command" in os.listdir(os.getcwd()): print("\nPlease enter your .csv formatted raw_input file with [X/H] stellar abundances:") infile = str(raw_input(">>> ")) if infile in os.listdir(os.getcwd()): print("\n[~] {} has been found in the working directory!".format(infile)) inputfile_list.append(infile) # time.sleep(1) logep(infile, infile_type='BSP', consol_file=False, init_path=(os.getcwd()), library=True) else: print("\n{} has NOT been found in the working directory!".format(infile)) initialization() else: print("\n[X] 'run_alphamelts.command' is not in the working directory!") time.sleep(2) initialization() elif option1 == '2': print("\nPlease enter your .csv formatted raw_input file with stellar mole abundances:") infile = str(raw_input(">>> ")) if "run_alphamelts.command" in os.listdir(os.getcwd()): if infile in os.listdir(os.getcwd()): print("\n[~] {} has been found in the working directory!".format(infile)) inputfile_list.append(infile) # time.sleep(1) molepct(infile, infile_type='BSP', consol_file=False, init_path=(os.getcwd()) ,library=True) else: print("\n{} has NOT been found in the working directory!".format(infile)) initialization() else: print("\n[X] 'run_alphamelts.command' is not in the working directory!") time.sleep(2) initialization() elif option1 == "3": print("Please enter the name of the HeFESTo cumulative input file directory") option3 = str(raw_input(">>> ")) print("What would you like to name this run?") option4 = str(raw_input(">>> ")) if os.path.exists(home_dir[0] + "/{}".format(option3)): runhefesto(infiledir=option3, actual_run=True, runname=option4) else: print("\n[X] '{}' does not exist in working directory: " "'{}'!".format((home_dir[0] + "/{}".format(option3)), home_dir[0])) time.sleep(2) pass elif option1 == "4": print("\nPlease enter the name of your HeFESTo Output File directory...") option5 = raw_input(">>> ") if not os.path.exists(option5): print("That directory does not exist in the working directory!") time.sleep(2) initialization() realform_dir = home_dir[0] + "/" + option5 # if len(os.listdir(realform_dir)) != 2: # print("\n[X] Warning! The HeFESTo directory '{}' is not properly formatted! (Length != 2, but is length {})".format(realform_dir, len(os.listdir(realform_dir)))) # for i in os.listdir(realform_dir): # print(i) # time.sleep(2) # initialization() print("What would you like to name this run?") option6 = raw_input(">>> ") integrationloop2(hefestodir=option5, runname=option6) elif option1 == 'o': print("\nPlease enter the letter of your choice. Would you like to: \n'a' Write a single file with MELTS raw_inputs\n" "'b' Write a library of MELTS raw_input files\n'c' Write a library of HeFESTo raw input files\n" "'d' Go back\n") raw_input_help = raw_input(">>> ") if raw_input_help == 'a': print("\nEnter '1' to raw_input [X/H] stellar abundances or '2' to raw_input stellar mole abundances.") raw_input_help2 = str(raw_input(">>> ")) if raw_input_help2 == "1": print("\nPlease enter your .csv formatted raw_input file with [X/H] stellar abundances:") infile = str(raw_input(">>> ")) if infile in os.listdir(os.getcwd()): print("\n[~] {} has been found in the working directory!".format(infile)) inputfile_list.append(infile) # time.sleep(1) logep(infile, infile_type='file', consol_file=True, init_path=(os.getcwd()), library=False) else: print("{} has NOT been found in the working directory!\n".format(infile)) time.sleep(1) initialization() elif raw_input_help2 == "2": print("\nPlease enter your .csv formatted raw_input file with stellar mole abundances:") infile = str(raw_input(">>> ")) if infile in os.listdir(os.getcwd()): print("\n[~] {} has been found in the working directory!".format(infile)) inputfile_list.append(infile) # time.sleep(1) molepct(infile, infile_type='file', consol_file=True, init_path=(os.getcwd()), library=False) else: print("\n{} has NOT been found in the working directory!".format(infile)) initialization() else: print("\n[X] Oops! That's not a valid command!\n") time.sleep(1) initialization() elif raw_input_help == 'b': print("\nEnter '1' to raw_input [X/H] stellar abundances or '2' to raw_input stellar mole abundances.") raw_input_help2 = str(raw_input(">>> ")) if raw_input_help2 == "1": print("\nPlease enter your .csv formatted raw_input file with [X/H] stellar abundances:") infile = raw_input(">>> ") if infile in os.listdir(os.getcwd()): print("\n[~] {} has been found in the working directory!".format(infile)) inputfile_list.append(infile) # time.sleep(1) logep(infile, infile_type='file', consol_file=False, init_path=(os.getcwd()), library=True) else: print("{} has NOT been found in the working directory!\n".format(infile)) time.sleep(1) initialization() elif raw_input_help2 == "2": print("\nPlease enter your .csv formatted raw_input file with stellar mole abundances:") infile = str(raw_input(">>> ")) if infile in os.listdir(os.getcwd()): print("\n[~] {} has been found in the working directory!".format(infile)) inputfile_list.append(infile) # time.sleep(1) molepct(infile, infile_type='file', consol_file=False, init_path=(os.getcwd()), library=True) else: print("\n{} has NOT been found in the working directory!".format(infile)) initialization() else: print("\n[X] Oops! That's not a valid command!\n") time.sleep(1) initialization() elif raw_input_help == 'c': runhefesto(actual_run=False) elif raw_input_help == 'd': initialization() else: print("\n[X] Oops! That's not a valid command!\n") time.sleep(1) initialization() elif option1 == 'e': print("\nThank you for using the Exoplanet Pocketknife!\n") print("\n___________________________________________\n") sys.exit() else: print("\n[X] Oops! {} is not a valid command!\n".format(option1)) time.sleep(1) initialization() initialization()
46.7689
207
0.541234
11,946
97,747
4.141972
0.093253
0.016734
0.020049
0.005497
0.655032
0.597534
0.547757
0.512409
0.478779
0.445938
0
0.061092
0.32932
97,747
2,089
208
46.791288
0.693671
0.121917
0
0.455182
0
0.023109
0.195014
0.073073
0
0
0
0
0
1
0.014706
false
0.035714
0.005602
0
0.022409
0.07493
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9500f8ddc8a192d5b326bf23ad973aa2e9a8109b
4,074
py
Python
tools/extract_observable.py
pauxy-qmc/pauxy
1da80284284769b59361c73cfa3c2d914c74a73f
[ "Apache-2.0" ]
16
2020-08-05T17:17:17.000Z
2022-03-18T04:06:18.000Z
tools/extract_observable.py
pauxy-qmc/pauxy
1da80284284769b59361c73cfa3c2d914c74a73f
[ "Apache-2.0" ]
4
2020-05-17T21:28:20.000Z
2021-04-22T18:05:50.000Z
tools/extract_observable.py
pauxy-qmc/pauxy
1da80284284769b59361c73cfa3c2d914c74a73f
[ "Apache-2.0" ]
5
2020-05-18T01:03:18.000Z
2021-04-13T15:36:29.000Z
#!/usr/bin/env python '''Exctact element of green's function''' import argparse import sys import numpy import os import pandas as pd import json _script_dir = os.path.abspath(os.path.dirname(__file__)) sys.path.append(os.path.join(_script_dir, 'analysis')) import matplotlib.pyplot as plt # from pauxy.analysis.extraction import analysed_itcf # from pauxy.analysis.extraction import analysed_energies, extract_hdf5_simple from pauxy.analysis.extraction import ( extract_mixed_estimates, get_metadata ) import matplotlib.pyplot as pl def parse_args(args): """Parse command-line arguments. Parameters ---------- args : list of strings command-line arguments. Returns ------- options : :class:`argparse.ArgumentParser` Command line arguments. """ parser = argparse.ArgumentParser(description = __doc__) parser.add_argument('-s', '--spin', type=str, dest='spin', default=None, help='Spin component to extract.' 'Options: up/down') parser.add_argument('-t', '--type', type=str, dest='type', default=None, help='Type of green\'s function to extract.' 'Options: lesser/greater') parser.add_argument('-k', '--kspace', dest='kspace', action='store_true', default=False, help='Extract kspace green\'s function.') parser.add_argument('-e', '--elements', type=lambda s: [int(item) for item in s.split(',')], dest='elements', default=None, help='Element to extract.') parser.add_argument('-o', '--observable', type=str, dest='obs', default='None', help='Data to extract') parser.add_argument('-p', '--plot-energy', action='store_true', dest='plot', default=False, help='Plot energy trace.') parser.add_argument('-f', nargs='+', dest='filename', help='Space-separated list of files to analyse.') options = parser.parse_args(args) if not options.filename: parser.print_help() sys.exit(1) return options def main(args): """Extract observable from analysed output. Parameters ---------- args : list of strings command-line arguments. Returns ------- results : :class:`pandas.DataFrame` Anysed results. """ options = parse_args(args) print_index = False if options.obs == 'itcf': results = analysed_itcf(options.filename[0], options.elements, options.spin, options.type, options.kspace) elif options.obs == 'energy': results = analysed_energies(options.filename[0], 'mixed') elif options.obs == 'back_propagated': results = analysed_energies(options.filename[0], 'back_propagated') elif 'correlation' in options.obs: ctype = options.obs.replace('_correlation', '') results = correlation_function(options.filename[0], ctype, options.elements) print_index = True elif options.plot: data = extract_mixed_estimates(options.filename[0]) md = get_metadata(options.filename[0]) fp = md['propagators']['free_projection'] dt = md['qmc']['dt'] mc = md['qmc']['nsteps'] data = data[abs(data.Weight) > 0.0] tau = numpy.arange(0,len(data)) * mc * dt if fp: pl.plot(tau, numpy.real(data.ENumer/data.EDenom)) pl.xlabel(r"$\tau$ (au)") pl.ylabel(r"Energy (au)") pl.show() else: pl.plot(tau, data[options.obs].real) pl.xlabel(r"$\tau$ (au)") pl.ylabel(r"{} (au)".format(options.obs)) pl.show() else: print ('Unknown observable') if not options.plot: print (results.to_string()) results.to_csv("%s"%options.obs) if __name__ == '__main__': main(sys.argv[1:])
33.393443
82
0.579774
462
4,074
4.993506
0.333333
0.034677
0.051582
0.035111
0.172952
0.136108
0.066753
0.066753
0.046814
0
0
0.0041
0.281541
4,074
121
83
33.669421
0.784079
0.138684
0
0.075
0
0
0.148961
0
0
0
0
0
0
1
0.025
false
0
0.1125
0
0.15
0.0625
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
950130b7d174e4ab134e14783a96e2c70ef6e914
12,854
py
Python
datasets.py
shivakanthsujit/FMMRNet
12742398e3b981938a69e44b3f37d285904929b4
[ "MIT" ]
null
null
null
datasets.py
shivakanthsujit/FMMRNet
12742398e3b981938a69e44b3f37d285904929b4
[ "MIT" ]
null
null
null
datasets.py
shivakanthsujit/FMMRNet
12742398e3b981938a69e44b3f37d285904929b4
[ "MIT" ]
null
null
null
import glob import os import albumentations as A import kaggle import numpy as np import PIL import pytorch_lightning as pl import torch from albumentations.pytorch import ToTensorV2 from torch.utils.data import random_split from torch.utils.data.dataloader import DataLoader from utils import show_images def get_train_transforms(input_size=256): return A.Compose( [ A.RandomCrop(input_size, input_size), A.HorizontalFlip(), A.VerticalFlip(), A.OneOf( [ A.HueSaturationValue( hue_shift_limit=0.2, sat_shift_limit=0.2, val_shift_limit=0.2, p=0.9, ), A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.15, p=0.9), ], p=0.9, ), A.ToFloat(255), ToTensorV2(), ], additional_targets={"image1": "image"}, ) def get_valid_transforms(input_size=256): return A.Compose( [A.CenterCrop(input_size, input_size), A.ToFloat(255), ToTensorV2()], additional_targets={"image1": "image"}, ) train_transform = get_train_transforms() valid_transform = get_valid_transforms() BATCH_SIZE = 4 SEED = 42 NUM_WORKERS = 4 kaggle.api.authenticate() class BaseDataModule(pl.LightningDataModule): def __init__(self, batch_size=BATCH_SIZE, seed=SEED, num_workers=NUM_WORKERS, on_gpu=True): super().__init__() self.batch_size = batch_size self.seed = seed self.num_workers = num_workers self.on_gpu = on_gpu def show_sample(self, split="train"): assert split in ["train", "val", "test"], f"Invalid {split}" if hasattr(self, f"{split}_data"): loader = getattr(self, f"{split}_loader")() print(f"No. of batches in {split}: ", len(loader)) x, y, z = next(iter(loader)) show_images(torch.cat((x, y, z))) else: print(f"Split {split} not found") def train_dataloader(self): return DataLoader( self.train_data, shuffle=True, batch_size=self.batch_size, num_workers=self.num_workers, pin_memory=self.on_gpu, ) def val_dataloader(self): return DataLoader( self.val_data, shuffle=False, batch_size=self.batch_size, num_workers=self.num_workers, pin_memory=self.on_gpu, ) def test_dataloader(self): return DataLoader( self.test_data, shuffle=False, batch_size=self.batch_size, num_workers=self.num_workers, pin_memory=self.on_gpu, ) def split_dataset(data, frac, seed): assert isinstance(frac, float) and frac <= 1.0 and frac >= 0.0, f"Invalid fraction {frac}" train_split = int(len(data) * frac) val_split = len(data) - train_split return random_split(data, [train_split, val_split], generator=torch.Generator().manual_seed(seed)) class JRDR(torch.utils.data.Dataset): def __init__(self, root, type="Light", split="train", transform=train_transform): self.root = root self.data_dir = os.path.join(self.root, "rain_data_" + split + "_" + type) if type == "Heavy" or split == "test": self.rain_dir = os.path.join(self.data_dir, "rain/X2") else: self.rain_dir = os.path.join(self.data_dir, "rain") self.norain_dir = os.path.join(self.data_dir, "norain") self.files = glob.glob(self.rain_dir + "/*.*") if len(self.files) == 0: raise RuntimeError("Dataset not found.") self.transform = transform def get_file_name(self, idx): img1 = self.files[idx] _, img2 = os.path.split(img1) img2 = img2.split("x2")[0] + ".png" img2 = os.path.join(self.norain_dir, img2) return img1, img2 def __getitem__(self, idx): img1, img2 = self.get_file_name(idx) rain_img = PIL.Image.open(img1) norain_img = PIL.Image.open(img2) if self.transform is not None: rain_img, norain_img = np.array(rain_img), np.array(norain_img) aug = self.transform(image=rain_img, image1=norain_img) rain_img, norain_img = aug["image"], aug["image1"] return rain_img, norain_img, rain_img - norain_img def __len__(self): return len(glob.glob(self.norain_dir + "/*.*")) class JRDRDataModule(BaseDataModule): """ JRDR DataModule for PyTorch-Lightning Learn more at https://pytorch-lightning.readthedocs.io/en/stable/extensions/datamodules.html """ def __init__( self, data_dir="data/", dataset_type="Light", train_transform=train_transform, valid_transform=valid_transform, batch_size=BATCH_SIZE, seed=SEED, num_workers=NUM_WORKERS, on_gpu=True, ): super().__init__(batch_size=batch_size, seed=seed, num_workers=num_workers, on_gpu=on_gpu) self.data_dir = data_dir self.train_transform = train_transform self.valid_transform = valid_transform self.type = dataset_type def prepare_data(self): dataset_dir = os.path.join(self.data_dir, "JRDR") if not os.path.exists(dataset_dir): kaggle.api.dataset_download_files("shivakanthsujit/jrdr-deraining-dataset", path=self.data_dir, unzip=True) def setup(self, stage): dataset_dir = os.path.join(self.data_dir, "JRDR") data = JRDR(root=dataset_dir, type=self.type, split="train", transform=self.train_transform) self.train_data, self.val_data = split_dataset(data, 0.8, self.seed) self.test_data = JRDR(root=dataset_dir, type=self.type, split="test", transform=self.valid_transform) class li_cvpr(torch.utils.data.Dataset): def __init__(self, root, transform=valid_transform): self.root = root self.rain_files = sorted(glob.glob(self.root + "/*in.png")) self.norain_files = sorted(glob.glob(self.root + "/*GT.png")) if len(self.rain_files) == 0 or len(self.norain_files) == 0: raise RuntimeError("Dataset not found.") self.transform = transform def get_file_name(self, idx): img1 = self.rain_files[idx] img2 = self.norain_files[idx] return img1, img2 def __getitem__(self, idx): img1, img2 = self.get_file_name(idx) rain_img = PIL.Image.open(img1) norain_img = PIL.Image.open(img2) if self.transform is not None: rain_img, norain_img = np.array(rain_img), np.array(norain_img) aug = self.transform(image=rain_img, image1=norain_img) rain_img, norain_img = aug["image"], aug["image1"] return rain_img, norain_img, rain_img - norain_img def __len__(self): return len(self.rain_files) class Rain12DataModule(BaseDataModule): """ Rain12 DataModule for PyTorch-Lightning Learn more at https://pytorch-lightning.readthedocs.io/en/stable/extensions/datamodules.html """ def __init__( self, data_dir="data/", train_transform=train_transform, valid_transform=valid_transform, batch_size=BATCH_SIZE, seed=SEED, num_workers=NUM_WORKERS, on_gpu=True, ): super().__init__(batch_size=batch_size, seed=seed, num_workers=num_workers, on_gpu=on_gpu) self.data_dir = data_dir self.train_transform = train_transform self.valid_transform = valid_transform def prepare_data(self): kaggle.api.dataset_download_files("shivakanthsujit/li-cvpr-dataset", path=self.data_dir, unzip=True) def setup(self, stage): dataset_dir = os.path.join(self.data_dir, "Rain12") if stage == "fit" or stage is None: data = li_cvpr(root=dataset_dir, transform=self.train_transform) self.train_data, self.val_data = split_dataset(data, 0.8, self.seed) if stage == "test" or stage is None: self.test_data = li_cvpr(root=dataset_dir, transform=self.valid_transform) class IDGAN(torch.utils.data.Dataset): def __init__(self, root, split="train", syn=True, transform=train_transform): self.root = root self.data_dir = os.path.join(self.root, "rain") if split == "test": self.rain_dir = os.path.join(self.data_dir, "test_syn") else: self.rain_dir = os.path.join(self.data_dir, "training") self.norain_dir = self.rain_dir self.files = glob.glob(self.rain_dir + "/*.*") if len(self.files) == 0: raise RuntimeError("Dataset not found.") self.transform = transform def get_file_name(self, idx): img1 = self.files[idx] _, img2 = os.path.split(img1) img2 = img2.split("x2")[0] + ".png" img2 = os.path.join(self.norain_dir, img2) return img1, img2 def __getitem__(self, idx): img1 = self.files[idx] im = PIL.Image.open(img1) w, h = im.size norain_img = im.crop((0, 0, w // 2, h)) norain_img = np.array(norain_img) rain_img = im.crop((w // 2, 0, w, h)) rain_img = np.array(rain_img) if self.transform is not None: rain_img, norain_img = np.array(rain_img), np.array(norain_img) aug = self.transform(image=rain_img, image1=norain_img) rain_img, norain_img = aug["image"], aug["image1"] return rain_img, norain_img, rain_img - norain_img def __len__(self): return len(glob.glob(self.norain_dir + "/*.*")) class IDCGANDataModule(BaseDataModule): """ IDCGAN DataModule for PyTorch-Lightning Learn more at https://pytorch-lightning.readthedocs.io/en/stable/extensions/datamodules.html """ def __init__( self, data_dir="data/", syn=True, train_transform=train_transform, valid_transform=valid_transform, batch_size=BATCH_SIZE, seed=SEED, num_workers=NUM_WORKERS, on_gpu=True, ): super().__init__(batch_size=batch_size, seed=seed, num_workers=num_workers, on_gpu=on_gpu) self.data_dir = data_dir self.train_transform = train_transform self.valid_transform = valid_transform self.syn = syn def prepare_data(self): kaggle.api.dataset_download_files("shivakanthsujit/idgan-dataset", path=self.data_dir, unzip=True) def setup(self, stage): dataset_dir = os.path.join(self.data_dir, "IDGAN") if stage == "fit" or stage is None: data = IDGAN(root=dataset_dir, syn=self.syn, transform=self.train_transform) self.train_data, self.val_data = split_dataset(data, 0.8, self.seed) if stage == "test" or stage is None: self.test_data = IDGAN(root=dataset_dir, syn=self.syn, split="test", transform=self.valid_transform) def get_train_valid_loader( train_data, valid_data, batch_size=4, valid_size=0.1, show_sample=False, num_workers=NUM_WORKERS, pin_memory=False, shuffle=True, seed=SEED, ): error_msg = "[!] valid_size should be in the range [0, 1]." assert (valid_size >= 0) and (valid_size <= 1), error_msg num_train = len(train_data) indices = list(range(num_train)) split = int(np.floor(valid_size * num_train)) if shuffle: np.random.seed(seed) np.random.shuffle(indices) train_idx, valid_idx = indices[split:], indices[:split] train_dataset = torch.utils.data.Subset(train_data, train_idx) valid_dataset = torch.utils.data.Subset(valid_data, valid_idx) train_loader = DataLoader( train_dataset, batch_size=batch_size, num_workers=num_workers, pin_memory=pin_memory, ) valid_loader = DataLoader( valid_dataset, batch_size=batch_size, num_workers=num_workers, pin_memory=pin_memory, ) print("Training Batches: ", len(train_loader)) print("Validation Batches: ", len(valid_loader)) # visualize some images if show_sample: x, y, z = next(iter(train_loader)) show_images(torch.cat((x, y, z))) x, y, z = next(iter(valid_loader)) show_images(torch.cat((x, y, z))) return train_loader, valid_loader def get_test_loader(test_data, batch_size=1, shuffle=False, num_workers=NUM_WORKERS, pin_memory=False): test_loader = DataLoader( test_data, batch_size=batch_size, num_workers=num_workers, shuffle=shuffle, pin_memory=pin_memory, ) print("Testing Batches: ", len(test_loader)) return test_loader
33.300518
119
0.628131
1,688
12,854
4.531398
0.115521
0.043143
0.028762
0.033991
0.691463
0.64649
0.619427
0.61629
0.532357
0.517061
0
0.011941
0.257274
12,854
385
120
33.387013
0.789253
0.032597
0
0.51634
0
0
0.050416
0.007918
0
0
0
0
0.009804
1
0.101307
false
0
0.039216
0.026144
0.218954
0.01634
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9505115c9cbc7843483152234defea7c4da55e5d
663
py
Python
29_Tree/Step03/wowo0709.py
StudyForCoding/BEAKJOON
84e1c5e463255e919ccf6b6a782978c205420dbf
[ "MIT" ]
null
null
null
29_Tree/Step03/wowo0709.py
StudyForCoding/BEAKJOON
84e1c5e463255e919ccf6b6a782978c205420dbf
[ "MIT" ]
3
2020-11-04T05:38:53.000Z
2021-03-02T02:15:19.000Z
29_Tree/Step03/wowo0709.py
StudyForCoding/BEAKJOON
84e1c5e463255e919ccf6b6a782978c205420dbf
[ "MIT" ]
null
null
null
import sys input = sys.stdin.readline from collections import deque def bfs(v): dp = [-1 for _ in range(V+1)] dp[v] = 0 q = deque() q.append(v) while q: cv = q.popleft() for nc,nv in tree[cv]: if dp[nv] == -1: # 아직 들르지 않았다면, dp[nv] = dp[cv] + nc q.append(nv) return dp # main V = int(input()) tree = [[] for _ in range(V+1)] # 1167번과 입력 형태만 다름 for _ in range(V-1): a,b,c = map(int,input().split()) tree[a].append((c,b)) tree[b].append((c,a)) ds = bfs(1) # 임의의 정점으로부터의 거리 계산 v = ds.index(max(ds)) # 거리가 최대인 정점을 찾음 print(max(bfs(v))) # 찾은 정점으로부터의 최대 거리 계산
24.555556
43
0.517345
117
663
2.905983
0.470085
0.044118
0.088235
0.097059
0.105882
0
0
0
0
0
0
0.024176
0.313725
663
27
44
24.555556
0.723077
0.131222
0
0
0
0
0
0
0
0
0
0
0
1
0.041667
false
0
0.083333
0
0.166667
0.041667
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
95086bdd5bed5808e0d9ba240d94e656c6d84fab
1,624
py
Python
_scripts/pandoc_wiki_filter.py
BenjaminPollak/coursebook
4646102b5f4c3d283885ba1b221da71a5e509eeb
[ "CC-BY-3.0", "CC-BY-4.0" ]
null
null
null
_scripts/pandoc_wiki_filter.py
BenjaminPollak/coursebook
4646102b5f4c3d283885ba1b221da71a5e509eeb
[ "CC-BY-3.0", "CC-BY-4.0" ]
null
null
null
_scripts/pandoc_wiki_filter.py
BenjaminPollak/coursebook
4646102b5f4c3d283885ba1b221da71a5e509eeb
[ "CC-BY-3.0", "CC-BY-4.0" ]
null
null
null
#!/usr/bin/env python3 """ Pandoc filter to change each relative URL to absolute """ from panflute import run_filter, Str, Header, Image, Math, Link, RawInline import sys import re base_raw_url = 'https://raw.githubusercontent.com/illinois-cs241/coursebook/master/' class NoAltTagException(Exception): pass def change_base_url(elem, doc): if type(elem) == Image: # Get the number of chars for the alt tag alt_length = len(elem._content) # No alt means no compile # Accessibility by default if alt_length == 0: raise NoAltTagException(elem.url) # Otherwise link to the raw user link instead of relative # That way the wiki and the site will have valid links automagically elem.url = base_raw_url + elem.url return elem if isinstance(elem, Math): # Raw inline mathlinks so jekyll renders them content = elem.text escaped = "$$ {} $$".format(content) return RawInline(escaped) if isinstance(elem, Link): # Transform all Links into a tags # Reason being is github and jekyll are weird # About leaving html as is and markdown as parsing # So we change everything to avoid ambiguity # There is a script injection possibility here so be careful url = elem.url title = str(elem.title) if title == "": title = elem.url link = '<a href="{}">{}</a>'.format(url, title) return RawInline(link) def main(doc=None): return run_filter(change_base_url, doc=doc) if __name__ == "__main__": main()
28.491228
84
0.640394
216
1,624
4.717593
0.546296
0.034347
0.019627
0
0
0
0
0
0
0
0
0.004237
0.273399
1,624
56
85
29
0.859322
0.343596
0
0
0
0
0.097421
0
0
0
0
0
0
1
0.071429
false
0.035714
0.107143
0.035714
0.357143
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9508ac69c9c25e71d33441ccd8a681ec504ce33e
8,793
py
Python
PA_multiagent_game/multiagent_utils.py
salesforce/RIRL
6f137955bfbe2054be18bb2b15d0e6aedb972b06
[ "BSD-3-Clause" ]
null
null
null
PA_multiagent_game/multiagent_utils.py
salesforce/RIRL
6f137955bfbe2054be18bb2b15d0e6aedb972b06
[ "BSD-3-Clause" ]
null
null
null
PA_multiagent_game/multiagent_utils.py
salesforce/RIRL
6f137955bfbe2054be18bb2b15d0e6aedb972b06
[ "BSD-3-Clause" ]
null
null
null
# # Copyright (c) 2022, salesforce.com, inc. # All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause # import sys import glob sys.path.insert(0, '..') import numpy as np import matplotlib import matplotlib.pyplot as plt import tqdm import torch from torch.distributions import Categorical from IPython import display from agents.soft_q import SoftQAgent from multi_channel_RI import MCCPERDPAgent ######### General ####################################################### def smooth_plot(values, window = 100): assert window >= 1 n = len(values) if n < 2: return values elif n < window: window = int(np.floor(n/2)) else: window = int(window) cs_values = np.cumsum(values) smooth_values = (cs_values[window:] - cs_values[:-window]) / window smooth_xs = np.arange(len(smooth_values)) + (window/2) return smooth_xs, smooth_values ##### Training Function ########################################################## def train(principal_pol, agent_pol, env, n_iters=500, hist=None, train_principal=True, train_agent=True, normalize_t=False, normalize_n_a=False, plot = True, **kwargs): #train_principal and train_agent arguments used if want to stagger training assert isinstance(principal_pol, MCCPERDPAgent) if isinstance(agent_pol, SoftQAgent): agent_arch_type = 'SQA' elif isinstance(agent_pol, MCCPERDPAgent): agent_arch_type = 'RIA' else: raise NotImplementedError("Agent type not implemented") n_agents = env.n_agents # horizon = env.horizon # only add things to history if we are training both the principal and agent if train_principal and train_agent: if hist is None: hist = {'r_a': [], 'r_p': [], 'ext_r_a': [], 'ext_r_p': [], 'mi': [], 'ep_r_a': [], 'ep_r_p':[], 'ext_ep_r_a':[], 'ext_ep_r_p':[]} iter_vals = range(n_iters) if not plot: iter_vals = tqdm.tqdm(iter_vals) for _ in iter_vals: p_state = env.reset() horizon = env.horizon a_states = None r_a = None a_actions = None done = False # Principal and Agent Rewards rs_seq_a = [] rs_seq_p = [] # Principal and Agent EXTRINSIC Rewards ext_rs_seq_a = [] ext_rs_seq_p = [] principal_pol.new_episode() agent_pol.new_episode() while not done: # Step the principal policy p_actions, total_p_mi_costs = principal_pol.act(p_state) next_a_states = env.principal_step(p_actions) # Store stuff in the agent buffer, if appropriate if train_agent: if (a_states is not None) and (agent_arch_type == 'SQA'): agent_pol.batch_add_experience(a_states, a_actions, r_a, next_a_state=next_a_states, done=False) a_states = next_a_states # Step the agent policy if (agent_arch_type == 'SQA'): _, a_actions = agent_pol.act(a_states) a_actions = a_actions.detach().numpy() total_a_mi_costs = 0 else: a_actions, total_a_mi_costs = agent_pol.act(a_states) (r_as, r_p, r_a), p_state, done = env.agent_step(a_actions) #r_as is a 2d array of rewards [agent1 rewards, agent2 rewards,... agentn rewards], while r_a is one long array of length batch_size * n_agents. r_a = np.concatenate(r_as) and r_as = r_a.reshape(n_agents, batch_size).T ext_r_a = np.array(r_a) ext_r_p = np.array(r_p) # Add mi costs r_a -= total_a_mi_costs r_p -= total_p_mi_costs #Normalize if applicable if normalize_t: r_a = r_a / env.horizon r_p = r_p / env.horizon ext_r_a = ext_r_a / env.horizon ext_r_p = ext_r_p / env.horizon if normalize_n_a: r_p = r_p / float(n_agents) ext_r_p = ext_r_p / float(n_agents) # Accumulate rewards rs_seq_a.append(r_a) rs_seq_p.append(r_p) ext_rs_seq_a.append(ext_r_a) ext_rs_seq_p.append(ext_r_p) # The game just ended, so we need to... #### TRAIN AGENT #### if train_agent: if agent_arch_type == 'SQA': agent_pol.batch_add_experience(a_states, a_actions, r_a, next_a_state=None, done=True) _ = agent_pol.train() else: _ = agent_pol.end_episode(rs_seq_a) #### TRAIN PRINCIPAL #### if train_principal: _ = principal_pol.end_episode(rs_seq_p) # Log things for visualization if train_principal and train_agent: avg_rs_a = np.stack(rs_seq_a).mean(1) hist['r_a'].append(avg_rs_a) avg_rs_p = np.stack(rs_seq_p).mean(1) hist['r_p'].append(avg_rs_p) avg_ext_rs_a = np.stack(ext_rs_seq_a).mean(1) hist['ext_r_a'].append(avg_ext_rs_a) avg_ext_rs_p = np.stack(ext_rs_seq_p).mean(1) hist['ext_r_p'].append(avg_ext_rs_p) hist['ep_r_a'].append(np.sum(avg_rs_a )) hist['ep_r_p'].append(np.sum(avg_rs_p)) hist['ext_ep_r_a'].append(np.sum(avg_ext_rs_a)) hist['ext_ep_r_p'].append(np.sum(avg_ext_rs_p)) channel_mis = principal_pol.get_mis_channels() for channel_name, mi_val in channel_mis: if channel_name not in hist: hist[channel_name] = {} if env.horizon not in hist[channel_name]: hist[channel_name][env.horizon] = [] hist[channel_name][env.horizon].append(mi_val) return hist ##### Plotting the History ########################################################## def plot_hist_signaling_vary_h(hist, axes=None, plot_smoothed_only = False): matplotlib.rcParams['image.aspect'] = 'auto' matplotlib.rcParams['image.interpolation'] = 'none' if axes is None: _, axes = plt.subplots(2, 4, figsize=(16, 8)) (ax0, ax1, ax2, ax3) = axes[0] (ax4, ax5, ax6, ax7) = axes[1] for subax in axes: for ax in subax: ax.cla() total_ra = hist['ep_r_a'] total_rp = hist['ep_r_p'] total_ext_ra = hist['ext_ep_r_a'] total_ext_rp = hist['ext_ep_r_p'] if not plot_smoothed_only: ax0.plot(total_ra, color='b', alpha=0.2) ax0.plot(total_ext_ra, color='r', alpha=0.2) ax0.plot(*smooth_plot(total_ra, window=100), color='b') ax0.plot(*smooth_plot(total_ext_ra, window=100), color='r') ax0.grid(b=True) if not plot_smoothed_only: ax4.plot(total_rp, color='b', alpha=0.2) ax4.plot(total_ext_rp, color='r', alpha=0.2) ax4.plot(*smooth_plot(total_rp, window=100), color='b') ax4.plot(*smooth_plot(total_ext_rp, window=100), color='r') ax4.grid(b=True) max_h = max(hist['mi-last_effort'].keys()) min_h = min(hist['mi-last_effort'].keys()) ax1.imshow(np.array(hist['mi-last_effort'][min_h]), vmin=0, vmax=2.5) ax2.imshow(np.array(hist['mi-last_individual_outputs'][min_h]), vmin=0, vmax=2.5) ax3.imshow(np.array(hist['mi-last_wage_hours_output_time'][min_h]), vmin=0, vmax=2.5) ax0.set_title('Agent Reward') ax4.set_title('Principal Reward (includes MI cost)') ax1.set_title(f'MI: Effort {min_h}') ax2.set_title(f'MI: Individual Outputs {min_h}') ax3.set_title(f'MI: Others {min_h}') ax5.imshow(np.array(hist['mi-last_effort'][max_h]), vmin=0, vmax=2.5) ax6.imshow(np.array(hist['mi-last_individual_outputs'][max_h]), vmin=0, vmax=2.5) ax7.imshow(np.array(hist['mi-last_wage_hours_output_time'][max_h]), vmin=0, vmax=2.5) ax5.set_title(f'MI: Effort {max_h}') ax6.set_title(f'MI: Individual Outputs {max_h}') ax7.set_title(f'MI: Others {max_h}') # ###### Function for naming savefiles ######################################### def get_savestr_allh(folder, principal_effort_mi_cost, principal_output_mi_cost, normalize_t, *args, **kwargs): effort_name = f'mipe{principal_effort_mi_cost:.2f}' output_name = f'mipe{principal_output_mi_cost:.2f}' normalize_name = f'nt{int(normalize_t)}' return f'{folder}/{effort_name}_{normalize_name}_{output_name}'
35.313253
230
0.585579
1,270
8,793
3.750394
0.196063
0.010917
0.008398
0.021415
0.282385
0.152425
0.109595
0.063405
0.046609
0.046609
0
0.016365
0.277266
8,793
248
231
35.455645
0.733124
0.113158
0
0.07362
0
0
0.095213
0.030984
0
0
0
0
0.01227
1
0.02454
false
0
0.067485
0
0.116564
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
950dcd67a7917370bcc5ec2201e9aaf688e1aa85
2,062
py
Python
postgres/python-asyncio/main.py
Gelbpunkt/idlebench
fe370f9fa6335cf738a91ca818638aedf0cf1ba3
[ "Apache-2.0" ]
null
null
null
postgres/python-asyncio/main.py
Gelbpunkt/idlebench
fe370f9fa6335cf738a91ca818638aedf0cf1ba3
[ "Apache-2.0" ]
null
null
null
postgres/python-asyncio/main.py
Gelbpunkt/idlebench
fe370f9fa6335cf738a91ca818638aedf0cf1ba3
[ "Apache-2.0" ]
4
2020-08-16T22:23:42.000Z
2020-08-17T20:15:33.000Z
import asyncio import asyncpg VALUES = [ 356091260429402122, "Why are you reading", 9164, 6000000, 14, 0, 0, 0, 463318425901596672, "https://i.imgur.com/LRV2QCK.png", 15306, ["Paragon", "White Sorcerer"], 0, 0, 647, "Leader", None, 0, "10.0", "10.0", 30, 2, 1, 0, 0, "1.0", None, 0, "Elf", 2, 2, 0, 0, 0, {"red": 255, "green": 255, "blue": 255, "alpha": 0.8}, ] VALUES_100 = [VALUES for _ in range(100)] async def main(): conn = await asyncpg.connect( user="postgres", password="postgres", database="postgres", host="127.0.0.1" ) for i in range(1_000): await conn.executemany( 'INSERT INTO public.profile ("user", "name", "money", "xp", "pvpwins",' ' "money_booster", "time_booster", "luck_booster", "marriage",' ' "background", "guild", "class", "deaths", "completed", "lovescore",' ' "guildrank", "backgrounds", "puzzles", "atkmultiply", "defmultiply",' ' "crates_common", "crates_uncommon", "crates_rare", "crates_magic",' ' "crates_legendary", "luck", "god", "favor", "race", "cv", "reset_points",' ' "chocolates", "trickortreat", "eastereggs", "colour") VALUES ($1, $2, $3,' " $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19," " $20, $21, $22, $23, $24, $25, $26, $27, $28, $29, $30, $31, $32, $33," " $34, $35);", VALUES_100, ) await conn.fetchrow( 'SELECT * FROM public.profile WHERE "user"=356091260429402122;' ) await conn.execute( 'UPDATE public.profile SET "crates_common"="crates_common"+1,' ' "crates_uncommon"="crates_uncommon"+1 WHERE "user"=$1;', 356091260429402122, ) await conn.execute( 'DELETE FROM public.profile WHERE "user"=356091260429402122;' ) await conn.close() asyncio.run(main())
25.45679
88
0.511639
228
2,062
4.552632
0.587719
0.013487
0.078035
0.042389
0.102119
0.102119
0.102119
0.102119
0
0
0
0.16156
0.303589
2,062
80
89
25.775
0.561281
0
0
0.323944
0
0.056338
0.491271
0.059651
0
0
0
0
0
1
0
false
0.014085
0.028169
0
0.028169
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
950e90e9549308bcb8380f5876c0fc12c6f68485
1,112
py
Python
fv-courseware/exercise-01/counter_formal.py
DonaldKellett/nmigen-beginner
260ae76a5277e36ec9909aaf6b76acab320aed88
[ "MIT" ]
1
2020-11-09T13:34:02.000Z
2020-11-09T13:34:02.000Z
fv-courseware/exercise-01/counter_formal.py
DonaldKellett/nmigen-beginner
260ae76a5277e36ec9909aaf6b76acab320aed88
[ "MIT" ]
null
null
null
fv-courseware/exercise-01/counter_formal.py
DonaldKellett/nmigen-beginner
260ae76a5277e36ec9909aaf6b76acab320aed88
[ "MIT" ]
null
null
null
from nmigen import * from nmigen.asserts import Assert from nmigen.cli import main_parser, main_runner __all__ = ["Counter"] """ Simple counter with formal verification See slides 50-60 in https://zipcpu.com/tutorial/class-verilog.pdf """ class Counter(Elaboratable): def __init__(self, fv_mode = False): self.fv_mode = fv_mode self.i_start_signal = Signal(1, reset=0) self.counter = Signal(16) self.o_busy = Signal(1, reset=0) def ports(self): return [ self.i_start_signal, self.counter, self.o_busy ] def elaborate(self, platform): m = Module() MAX_AMOUNT = Const(22) with m.If(self.i_start_signal & (self.counter == 0)): m.d.sync += self.counter.eq(MAX_AMOUNT - 1) with m.Elif(self.counter != 0): m.d.sync += self.counter.eq(self.counter - 1) m.d.comb += self.o_busy.eq(self.counter != 0) if self.fv_mode: m.d.comb += Assert(self.counter < MAX_AMOUNT) return m if __name__ == "__main__": parser = main_parser() args = parser.parse_args() m = Module() m.submodules.counter = counter = Counter(True) main_runner(parser, args, m, ports = counter.ports())
25.272727
55
0.695144
175
1,112
4.205714
0.365714
0.134511
0.040761
0.065217
0.142663
0.142663
0.084239
0.084239
0.084239
0
0
0.01826
0.16277
1,112
44
56
25.272727
0.772288
0
0
0.060606
0
0
0.015
0
0
0
0
0
0.060606
1
0.090909
false
0
0.090909
0.030303
0.272727
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9510db3851814a40d1e201c8697a846d403a09e9
731
py
Python
mnist/download.py
hiroog/cppapimnist
30d7e01954fc43da2eea5fe3ebf034b37e79cfd1
[ "MIT" ]
null
null
null
mnist/download.py
hiroog/cppapimnist
30d7e01954fc43da2eea5fe3ebf034b37e79cfd1
[ "MIT" ]
null
null
null
mnist/download.py
hiroog/cppapimnist
30d7e01954fc43da2eea5fe3ebf034b37e79cfd1
[ "MIT" ]
null
null
null
#!/usr/bin/python3 import urllib.request import os import gzip DOWNLOAD_URL='http://yann.lecun.com/exdb/mnist/' file_list=[ 'train-images-idx3-ubyte', 'train-labels-idx1-ubyte', 't10k-images-idx3-ubyte', 't10k-labels-idx1-ubyte' ] for name in file_list: if not os.path.exists( name ): gz_name= name + '.gz' if not os.path.exists( gz_name ): print( 'download', gz_name ) with urllib.request.urlopen( DOWNLOAD_URL + gz_name ) as fi: with open( gz_name, 'wb' ) as fo: fo.write( fi.read() ) print( 'write', name ) with gzip.open( gz_name, 'rb' ) as fi: with open( name, 'wb' ) as fo: fo.write( fi.read() )
30.458333
118
0.575923
104
731
3.951923
0.423077
0.087591
0.072993
0.053528
0.194647
0.111922
0.111922
0.111922
0
0
0
0.017078
0.27907
731
23
119
31.782609
0.762808
0.023256
0
0.117647
0
0
0.204225
0.126761
0
0
0
0
0
1
0
false
0
0.176471
0
0.176471
0.117647
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
951110f9319a47de447b38bde1aba4ab72ddd1bd
2,651
py
Python
arch/arm64/tests/a64_tbnz.py
Samsung/ADBI
3e424c45386b0a36c57211da819021cb1929775a
[ "Apache-2.0" ]
312
2016-02-04T11:03:17.000Z
2022-03-18T11:30:10.000Z
arch/arm64/tests/a64_tbnz.py
NickHardwood/ADBI
3e424c45386b0a36c57211da819021cb1929775a
[ "Apache-2.0" ]
4
2016-02-04T11:05:40.000Z
2017-07-27T04:22:27.000Z
arch/arm64/tests/a64_tbnz.py
NickHardwood/ADBI
3e424c45386b0a36c57211da819021cb1929775a
[ "Apache-2.0" ]
85
2016-02-04T12:48:30.000Z
2021-01-14T06:23:24.000Z
import random from common import * class test_a64_tbnz(TemplateTest): def gen_rand(self): regs = list(set(GPREGS) - {'x0', 'w0'}) while True: yield {'insn' : random.choice(['tbz', 'tbnz']), 'reg' : random.choice(regs), 'bit' : random.randint(0,63), 'val' : random.randint(0,1), 'label_idx': random.randint(0, self.__label_count - 1)} def __init__(self): self.__label_count = 8 self.symbols = [ __name__ + '_addr_' + str(i) for i in xrange(self.__label_count) ] randvals = random.sample(xrange(0, 0xfffffffffffffff), 2*self.__label_count) self.branch = randvals[:self.__label_count] self.nobranch = randvals[self.__label_count:] def test_begin(self): yield ' .arch armv8-a' yield ' .align 2' yield ' .text' for i in xrange(0, len(self.symbols), 2): yield self.symbols[i] + ':' yield ' ldr\t\tx0, ={0}'.format(hex(self.branch[i])) yield ' ret' yield ' .skip %d' % random.randrange(512, 2048, 4) def gen_testcase(self, nr, insn, reg, bit, val, label_idx): label = self.symbols[label_idx] ret_label = self.testcase_name(nr) + '_ret' if reg.startswith('w'): v = random.randint(0,0xffffffff) bit /= 2 else: v = random.randint(0,0xfffffffffffffff) if val == 1: v |= (0x1 << bit) else: v &= ~(0x1 << bit) state = ProcessorState(setreg={reg:v, 'x0':self.nobranch[label_idx], 'x30':ret_label}, reserve=['x0']) yield state.prepare() space = '\t' if insn == 'tbnz' else '\t\t' yield self.testcase_insn(nr, '{insn}{space}{reg}, #{bit}, {label}'.format(**locals())) yield ret_label + ':' if (insn == 'tbz' and val == 0) or (insn == 'tbnz' and val != 0): yield ' // should jump' x0 = self.branch[label_idx] else: yield ' // shouldn\'t jump' x0 = self.nobranch[label_idx] yield state.check({'x0':x0}) yield state.restore() def test_end(self): for i in xrange(1, len(self.symbols), 2): yield ' .skip %d' % random.randrange(512, 2048, 4) yield self.symbols[i] + ':' yield ' ldr\t\tx0, ={0}'.format(hex(self.branch[i])) yield ' ret'
38.42029
94
0.488118
305
2,651
4.091803
0.311475
0.038462
0.067308
0.028846
0.205128
0.145833
0.145833
0.145833
0.092949
0.092949
0
0.036374
0.367409
2,651
68
95
38.985294
0.707812
0
0
0.183333
0
0
0.09619
0
0
0
0.018861
0
0
1
0.083333
false
0
0.033333
0
0.133333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0